Last active
January 25, 2024 17:31
-
-
Save gabrielfeo/73160a396481883a9b82a0a0d1237386 to your computer and use it in GitHub Desktop.
Compare separate gradle-profiler (last tested with v0.20.0) benchmark results by merging them into a single HTML
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python3 | |
from collections import namedtuple | |
import os | |
import shutil | |
import re | |
import json | |
import argparse | |
parser = argparse.ArgumentParser( | |
""" | |
This utility can merge multiple gradle-profiler result HTMLs into a single | |
one. | |
Optionally, to better distinguish results when merging benchmarks that were | |
of the same scenarios, it can suffix the scenario names in each result | |
before merging: | |
- results-i5-machine (results directory) | |
- configuration (scenario) | |
- build | |
- results-i7-machine | |
- configuration | |
- build | |
merge-results ./results-i5-machine=-i5 ./results-i7-machine=-i7 | |
| | |
V | |
- merged (results directory) | |
- configuration-i5 (scenario) | |
- configuration-i7 | |
- build-i5 | |
- build-i7 | |
""" | |
) | |
parser.add_argument("--output-dir", dest="output_dir", default="./merge") | |
parser.add_argument( | |
"benchmark_dirs", | |
nargs='+', | |
help=""" | |
Benchmark results to concatenate. Each can have an optional suffix for scenario names in the | |
resulting HTML using '=': ./results-1=-1 ./results-2=-2. | |
""" | |
) | |
args = parser.parse_args() | |
DirSpec = namedtuple('DirSpec', ['path', 'scenario_suffix'], defaults=[""]) | |
dir_specs = tuple(map(lambda str: DirSpec(*str.split('=')), args.benchmark_dirs)) | |
output_dir = args.output_dir | |
def html_path_of_dir(dir): return f"{dir}/benchmark.html" | |
html_output_file = html_path_of_dir(output_dir) | |
def read_between(start_pattern, end_pattern, path) -> str: | |
result = "" | |
with open(path, "r") as file: | |
inside = False | |
for line in file: | |
if re.match(start_pattern, line) is not None: | |
inside = True | |
elif inside and re.match(end_pattern, line) is not None: | |
break | |
elif inside: | |
result += line | |
return result | |
def get_benchmark_result(benchmark_dir) -> list: | |
results_str: str = read_between( | |
"const benchmarkResult =", "\s?\;\s?", html_path_of_dir(benchmark_dir)) | |
return json.loads(results_str) | |
def get_scenario_results(benchmark_dir) -> list: | |
results_json = get_benchmark_result(benchmark_dir) | |
return results_json["scenarios"] | |
def create_output_html(): | |
first_dir = dir_specs[0].path | |
base_html_path = html_path_of_dir(first_dir) | |
os.makedirs(output_dir, exist_ok=True) | |
shutil.copyfile(base_html_path, html_output_file) | |
def concat_scenario_results() -> dict: | |
scenario_results = [] | |
for dir in dir_specs: | |
suffixed_scenario_results = [] | |
for result in get_scenario_results(dir.path): | |
name = result["definition"]["name"] | |
new_name = name + dir.scenario_suffix | |
result["definition"]["name"] = new_name | |
result["definition"]["title"] = new_name | |
suffixed_scenario_results.append(result) | |
scenario_results += suffixed_scenario_results | |
return scenario_results | |
def strip_date_and_environment(benchmark_result): | |
benchmark_result["date"] = "" | |
benchmark_result["environment"] = "" | |
def overwrite_scenario_results_in_output_html(new_scenario_results): | |
html = "" | |
with open(html_output_file, 'r') as file: | |
html = file.read() | |
new_result = get_benchmark_result(output_dir) | |
strip_date_and_environment(new_result) | |
dir_paths = map(lambda dir: dir.path, dir_specs) | |
new_result["title"] = f"Benchmark Results: {' + '.join(dir_paths)}" | |
new_result["scenarios"] = new_scenario_results | |
new_result = f"const benchmarkResult = {json.dumps(new_result, indent=2)};" | |
html = re.sub("const benchmarkResult =.*}\s+]\s+}\s+;", | |
new_result, html, flags=re.S) | |
with open(html_output_file, 'w') as file: | |
file.write(html) | |
if __name__ == '__main__': | |
create_output_html() | |
concatenated_scenario_results = concat_scenario_results() | |
overwrite_scenario_results_in_output_html(concatenated_scenario_results) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
This is useful when benchmarks need to be separate. Some use cases so far: