Skip to content

Embed URL

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Annotate coverage report with lines modified by patch
#!/usr/bin/env python
import os
from collections import defaultdict
import patch
import re
import coverage
import django
from optparse import OptionParser
import webbrowser
import sys
"""
requires http://python-patch.googlecode.com/svn/trunk/patch.py
"""
# TODO don't include files in report when lines are only removed
django_path = os.path.abspath(os.path.dirname(os.path.dirname(django.__file__)))
coverage_html_dir = os.path.join(django_path, 'tests/coverage_html')
line_end = '(?:\n|\r\n?)'
# pattern to use to insert new stylesheet
# this is currently pretty brittle - but lighterweight than doing something with
# lxml and/or pyquery
current_style = "<link rel='stylesheet' href='style.css' type='text/css'>"
def parse_patch(patch_file):
"""
returns a dictionary of {filepath:[lines patched]}
"""
patch_set = patch.fromfile(patch_file)
target_files = set()
target_files.update([os.path.join(django_path, p.target.lstrip('/ab')) for p in patch_set.items])
target_files = [p for p in target_files if 'test' not in p]
target_files = [p for p in target_files if 'docs' not in p]
target_files = [p for p in target_files if os.path.exists(p)]
target_lines = defaultdict(list)
for p in patch_set.items:
source_file = os.path.join(django_path, p.target)
if source_file not in target_files:
continue
source_lines = open(source_file, 'r').readlines()
last_hunk_offset = 0
for hunk in p.hunks:
patched_lines = []
hunkreplace = [x[1:].rstrip("\r\n") for x in hunk.text if x[0] in " +"]
hunk_offset = last_hunk_offset
in_candidate_match = False
hunk_pos = 0
for i, line in enumerate(source_lines):
# I tried unsucessfully to get multiline re to work,
# fell back to this
if i < last_hunk_offset:
continue
if hunk_pos > len(hunkreplace)-1:
break
if hunkreplace[hunk_pos] == line:
if not in_candidate_match:
in_candidate_match = True
hunk_offset = i
hunk_pos += 1
continue
else:
hunk_pos += 1
continue
else:
in_candidate_match = False
hunk_offset = 0
hunk_pos = 0
continue
line_offset = hunk_offset
for hline in hunk.text:
if hline.startswith('-'):
continue
if hline.startswith('+'):
patched_lines.append(hunk_offset)
line_offset += 1
target_lines[p.target].extend(patched_lines)
last_hunk_offset = hunk_offset
return target_lines
def generate_css(target_lines):
coverage_files = os.listdir(coverage_html_dir)
for target in target_lines:
target_name = target.replace('/', '_')
fname = target_name.replace(".py", ".css")
html_name = target_name.replace(".py", ".html")
css = ','.join(["#n%s" %l for l in target_lines[target]])
css += " {background: lightgreen;}"
css_file = os.path.join(coverage_html_dir, fname)
with open(css_file, 'w') as f:
f.write(css)
html_pattern = re.compile(html_name)
# html_file = filter(html_pattern.search, coverage_files)
html_file = [p for p in coverage_files if html_pattern.search(p)]
if len(html_file) != 1:
raise ValueError("Found wrong number of matching html files")
html_file = os.path.join(coverage_html_dir,html_file[0])
html_source = open(html_file, 'r').read()
style_start = html_source.find(current_style)
new_html = html_source[:style_start]
new_html += "<link rel='stylesheet' href='%s' type='text/css'>\n" % fname
new_html += html_source[style_start:]
os.unlink(html_file)
with open(html_file, 'w') as f:
f.write(new_html)
if __name__ == "__main__":
opt = OptionParser()
(options, args) = opt.parse_args()
if not args:
print "No patch file provided"
sys.exit(1)
patchfile = args[0]
target_lines = parse_patch(patchfile)
# generate coverage reports
cov = coverage.coverage(data_file = os.path.join(django_path, 'tests', '.coverage'))
cov.load()
targets = [os.path.join(django_path, x) for x in target_lines.keys()]
print targets
cov.html_report(morfs=targets, directory=coverage_html_dir)
generate_css(target_lines)
webbrowser.open(os.path.join(coverage_html_dir, 'index.html'))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Something went wrong with that request. Please try again.