Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
#!/usr/bin/env python
# vim:se sts=4 sw=4 et fenc=utf-8 ft=python:
import json
import re
import sys
class Test(object):
def __init__(self, name, is_debug):
self.name = name
self.errors = []
self.failures = []
self.crashes = []
self.logs = []
self.is_debug = is_debug
self.timed_out = False
@property
def failed(self):
return len(self.failures) > 0 or len(self.crashes) > 0
tests = []
crashes = []
current_test = None
pat = re.compile('PageStyleChild.*cross-origin')
pat = re.compile('LinkHandlerChild.*cross-origin')
pat = re.compile('ContentSessionStore.*cross-origin')
pat = re.compile('LoginManagerContent.*cross-origin')
pat = re.compile('getCachedMessages.*cross-origin')
matches = set()
for fn in sys.argv[1:]:
with open(fn) as fh:
is_debug = False
for line in fh:
if not line:
continue
log = json.loads(line)
if log['action'] == 'test_start':
current_test = Test(log['test'], is_debug)
tests.append(current_test)
elif log['action'] == 'test_end':
current_test = None
else:
if not is_debug and log['action'] == 'process_output':
if log['data'].startswith('++DOCSHELL'):
is_debug = True
if current_test:
current_test.is_debug = True
if current_test:
current_test.logs.append(log)
if log['action'] == 'process_output':
if 'JavaScript error' in log['data']:
if pat.search(log['data']):
matches.add(current_test.name)
current_test.errors.append(log)
elif log['action'] == 'test_status':
failed = log['status'] != log.get('expected', 'PASS')
if failed:
current_test.failures.append(log)
if log['subtest'] == 'Test timed out':
current_test.timed_out = True
if log['action'] == 'crash':
if current_test:
current_test.crashes.append(log)
crashes.append(log)
tests_passed = 0
tests_failed = 0
tests_crashed = 0
debug_crashes = set()
opt_crashes = set()
for test in tests:
if test.crashes:
if test.is_debug:
debug_crashes.add(test.name)
else:
opt_crashes.add(test.name)
for test in tests:
if test.crashes:
tests_crashed += 1
elif test.failures:
tests_failed += 1
else:
tests_passed += 1
if not test.timed_out and test.failures:
print('T:', test.name)
elif False and test.crashes:
if test.is_debug:
if test.name in opt_crashes:
print('C:', test.name, test.crashes[0]['signature'])
else:
print('D:', test.name, test.crashes[0]['signature'])
elif test.name not in debug_crashes:
print('C:', test.name, test.crashes[0]['signature'])
if test.failed and test.errors:
print('Test: %s' % test.name)
for error in test.errors:
print(' ', error['data'])
print('')
print('Total: %d' % len(tests))
print('Passed: %d' % tests_passed)
print('Failed: %d' % tests_failed)
print('Crashed: %d' % tests_crashed)
print('')
print('Matching tests: %d' % len(matches))
#!/usr/bin/env python
# vim:se sts=4 sw=4 et fenc=utf-8 ft=python:
import os
import re
import sys
import urllib.parse
from urllib.parse import parse_qs, urlparse
import requests
push_url = sys.argv[1]
output_dir = sys.argv[2]
headers = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0',
}
def get(path):
return requests.get('https://treeherder.mozilla.org/api/' + path,
headers=headers)
def fetch_log(url, filename):
output_path = os.path.join(output_dir, filename)
print(output_path)
result = requests.get(url)
with open(output_path, 'wb') as fh:
fh.write(result.content)
path_re = re.compile(r'.*/(?P<basename>.*)\.(?P<ext>.*?)$')
query = parse_qs(urlparse(push_url.replace('/#/', '/')).query)
commit_id, = query['revision']
push_url = 'project/try/push/?revision=%s' % commit_id
push_data = get(push_url).json()
push_id = push_data['results'][0]['id']
jobs_url = 'project/try/jobs/?push_id=%s&count=2000&return_type=list' % push_id
job_data = get(jobs_url).json()
jobs = []
for result in job_data['results']:
job = {}
for i, key in enumerate(job_data['job_property_names']):
job[key] = result[i]
if (job['state'] == 'completed' and job['result'] == 'testfailed' and
job['job_group_symbol'] == 'M-fis' and
job['platform'] == 'linux64'):
jobs.append(job)
raw_logs = []
for job in jobs:
detail_url = 'jobdetail/?job_guid=%s' % urllib.parse.quote(job['job_guid'])
safe_guid = re.sub(r'/.*', '', job['job_guid'])
for result in get(detail_url).json()['results']:
if (result['title'] == 'artifact uploaded' and
result['value'].endswith('_raw.log')):
m = path_re.match(urlparse(result['url']).path)
filename = '%s_%s-%s_%s.%s' % (m['basename'],
job['platform'],
job['platform_option'],
safe_guid,
m['ext'])
fetch_log(result['url'], filename)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.