Last active
December 18, 2019 19:51
-
-
Save mhei/5f39c022003be4f80261046e3a7820a2 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/python | |
from __future__ import print_function | |
import sys | |
import datetime | |
import time | |
import requests | |
import re | |
from tabulate import tabulate | |
import locale | |
locale.setlocale(locale.LC_TIME, "C") | |
baseurl = "https://downloads.openwrt.org/snapshots/faillogs" | |
package_whitelist = [ | |
'libmodbus', | |
'libugpio', | |
'mmc-utils', | |
'libxml2', | |
'postgresql', | |
'icu', | |
'openldap', | |
'mariadb', | |
'php7', | |
'php7-pecl-dio', | |
'php7-pecl-http', | |
'php7-pecl-libevent', | |
'php7-pecl-propro' , | |
'php7-pecl-raphf', | |
'php7-pecl-mcrypt', | |
'php7-pecl-krb5', | |
'php7-pecl-redis', | |
'ser2net', | |
'u2pnpd', | |
'libcanfestival', | |
'libiio', | |
'uw-imap', | |
'libgpiod', | |
'libfmt', | |
'knxd', | |
'libupnp', | |
'apache', | |
'modemmanager', | |
'libmbim', | |
'libqmi', | |
'libzip' | |
] | |
def parse_page(page): | |
lines = [ line.strip() for line in page.split("\n") ] | |
links = {} | |
for line in lines: | |
#mo = re.search(r"href=\"(.+)\".*>.*</a>\s*(\S+ \S+)\s*[-0-9]+", line) | |
mo = re.search(r"href=\"(.+)\".*>.*</a>.*class=\"d\">(\S+ \S+ +\S+ \S+ \S+)</td>", line) | |
if mo: | |
key = str(mo.group(1).decode("ascii", "ignore")) | |
key = key.strip("/") | |
value = str(mo.group(2).decode("ascii", "ignore")) | |
value = datetime.datetime.strptime(value, "%a %b %d %H:%M:%S %Y") | |
if key != "..": | |
links[key] = value | |
#print("key: " + key + " value: " + str(value)) | |
else: | |
#print("No match") | |
pass | |
return links | |
packages_total = {} | |
r = requests.get(baseurl) | |
if r.status_code != 200: | |
sys.exit(1) | |
archs = parse_page(r.text) | |
#print(archs) | |
for arch in archs: | |
if (datetime.datetime.utcnow() - archs[arch]).days > 3: | |
print("Ignoring outdated arch: " + arch, file = sys.stderr) | |
continue | |
else: | |
print("Analysing arch: " + arch, file = sys.stderr) | |
r = requests.get(baseurl + "/" + arch) | |
if r.status_code != 200: | |
print("Warning: accessing faillogs for '%s' failed: %d" % (arch, r.status_code)) | |
continue | |
feeds = parse_page(r.text) | |
#print(feeds) | |
for feed in feeds: | |
r = requests.get(baseurl + "/" + arch + "/" + feed) | |
if r.status_code != 200: | |
print("Warning: accessing faillogs for '%s/%s' failed: %d" % (arch, feed, r.status_code)) | |
continue | |
packages = parse_page(r.text) | |
#print(packages) | |
for package in packages: | |
if not package in packages_total: | |
packages_total[package] = {} | |
packages_total[package]['feed'] = feed | |
if not 'failing_archs' in packages_total[package]: | |
packages_total[package]['failing_archs'] = [] | |
packages_total[package]['failing_archs'].append(arch) | |
table = [] | |
arch_has_fails = {} | |
for package in sorted(packages_total): | |
if package in package_whitelist: | |
for arch in sorted(archs): | |
if arch in packages_total[package]['failing_archs']: | |
try: | |
arch_has_fails[arch] += 1 | |
except KeyError: | |
arch_has_fails[arch] = 1 | |
#print(arch_has_fails) | |
#print(archs) | |
#print("Archs: " + str(len(archs))) | |
for arch in archs.copy(): | |
if arch not in arch_has_fails: | |
#print("Removing " + arch + " from list, because of no fails") | |
del archs[arch] | |
#print(archs) | |
#print("Archs: " + str(len(archs))) | |
for package in sorted(packages_total): | |
if package in package_whitelist: | |
fails = [] | |
for arch in sorted(archs): | |
if arch in packages_total[package]['failing_archs']: | |
feed = packages_total[package]['feed'] | |
r = requests.get(baseurl + "/" + arch + "/" + feed + "/" + package) | |
if r.status_code != 200: | |
print("Warning: accessing faillogs for '%s' failed: %d" % (arch + "/" + package, r.status_code)) | |
fails.append("<a href=\"" + baseurl + "/" + arch + "/" + feed + "/" + package + "/compile.txt" + "\">X</a>") | |
else: | |
variants = parse_page(r.text) | |
if "compile.txt" in variants: | |
fails.append("<a href=\"" + baseurl + "/" + arch + "/" + feed + "/" + package + "/compile.txt" + "\">X</a>") | |
else: | |
cell = "" | |
for variant in variants: | |
cell += "<a href=\"" + baseurl + "/" + arch + "/" + feed + "/" + package + "/" + variant + "/compile.txt" + "\">" + variant + "</a><br>" | |
fails.append(cell) | |
else: | |
fails.append("") | |
table.append([package] + fails) | |
headers = ["Package"] + sorted(archs.keys()) | |
print("""<html> | |
<head> | |
<title>Failing packages at LEDE buildbots</title> | |
<style type="text/css"> | |
table { | |
border-collapse: collapse; | |
} | |
table, th, td { | |
border: 1px solid black; | |
} | |
th { | |
transform:rotate(270deg); | |
height: 250px; | |
} | |
tr:nth-child(even) {background-color: #f2f2f2} | |
</style> | |
</head> | |
<body><h1>Failing packages</h1>\n""") | |
print(tabulate(table, headers=headers, tablefmt="html")) | |
print("</body></html>\r\n") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment