Skip to content

Instantly share code, notes, and snippets.

@siv2r
Created December 15, 2025 18:36
Show Gist options
  • Select an option

  • Save siv2r/fe3f07062505a08da92610582d46a13a to your computer and use it in GitHub Desktop.

Select an option

Save siv2r/fe3f07062505a08da92610582d46a13a to your computer and use it in GitHub Desktop.
Python script for benchmarking PR 1782
#!/usr/bin/env python3
import subprocess
import os
import sys
import time
from openpyxl import Workbook
from openpyxl.styles import Font, Alignment
def run_command(cmd, log_file=None):
if log_file:
with open(log_file, 'a') as f:
subprocess.run(cmd, shell=True, check=True, stdout=f, stderr=subprocess.STDOUT)
else:
subprocess.run(cmd, shell=True, check=True)
def build_bench_at_commit(commit, suffix):
benchbin = f"pr1782_bench_{suffix}"
print(f"Building secp #1782 '{suffix}' (commit {commit})...")
run_command(f"git checkout -q {commit}")
run_command("rm -rf build")
log_file = f"{suffix}.log"
run_command("cmake -B build", log_file)
run_command("cmake --build build --target bench_ecmult --parallel 4", log_file)
run_command(f"cp -f ./build/bin/bench_ecmult {benchbin}")
def run_bench(suffix):
benchbin = f"pr1782_bench_{suffix}"
print(f"Running benchmark binary {benchbin}...")
env = os.environ.copy()
env['SECP256K1_BENCH_ITERS'] = '100000'
time.sleep(2)
with open(f"pr1782_bench_{suffix}.txt", 'w') as f:
subprocess.run(f"./{benchbin} pippenger_wnaf", shell=True, check=True, stdout=f, env=env)
def parse_benchmark_file(filename):
results = {}
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines[2:]:
line = line.strip()
if not line:
continue
parts = [p.strip() for p in line.split(',')]
if len(parts) == 4:
benchmark_name = parts[0]
avg_time = float(parts[2])
results[benchmark_name] = avg_time
return results
def compare_benchmarks():
print("\nParsing benchmark results...")
master = parse_benchmark_file("pr1782_bench_master.txt")
pr1782 = parse_benchmark_file("pr1782_bench_pr1782.txt")
output_file = "pr1782_bench_cmp.xlsx"
print(f"Generating comparison report: {output_file}")
wb = Workbook()
ws = wb.active
ws.title = "Benchmark Comparison"
red_font = Font(color="FF0000")
wrap_alignment = Alignment(wrap_text=True)
ws.column_dimensions['A'].width = 22
for col in ['B', 'C', 'D']:
ws.column_dimensions[col].width = 10
headers = ['Benchmark', 'master Avg (us)', 'pr1782 Avg (us)', 'pr1782 vs master (%)']
ws.append(headers)
for cell in ws[1]:
cell.alignment = wrap_alignment
pr1782_pcts = []
for benchmark_name in pr1782.keys():
master_time = master[benchmark_name]
pr1782_time = pr1782[benchmark_name]
pr1782_pct = ((pr1782_time - master_time) / master_time) * 100
pr1782_pcts.append(pr1782_pct)
row = [benchmark_name, f"{master_time:.2f}", f"{pr1782_time:.2f}", f"{pr1782_pct:+.2f}%"]
ws.append(row)
if pr1782_pct < 0:
ws.cell(row=ws.max_row, column=4).font = red_font
avg_pct = sum(pr1782_pcts) / len(pr1782_pcts)
ws.append(['', '', '', ''])
ws.append(['Average', '', '', f"{avg_pct:+.2f}%"])
if avg_pct < 0:
ws.cell(row=ws.max_row, column=4).font = red_font
wb.save(output_file)
print(f"Comparison report saved to {output_file}")
print("\nSummary:")
print(" - Negative % = Performance improvement (faster)")
print(" - Positive % = Performance degradation (slower)")
print(" - Near 0% = No significant change")
def main():
build_bench_at_commit("e7f7083b530a55c83ce9089a7244d2d9d67ac8b2", "master")
build_bench_at_commit("d95e48b089778de9ca992cc5dfbabd6674d8c2d4", "pr1782")
time.sleep(10)
run_bench("master")
run_bench("pr1782")
compare_benchmarks()
if __name__ == "__main__":
try:
main()
except subprocess.CalledProcessError as e:
print(f"Error: Command failed with exit code {e.returncode}")
sys.exit(1)
except FileNotFoundError as e:
print(f"Error: File not found - {e}")
sys.exit(1)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment