Skip to content

Instantly share code, notes, and snippets.

@m-shibata
Last active July 18, 2022 14:14
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save m-shibata/089032289b2eaeb4c4737d1cfa37bedf to your computer and use it in GitHub Desktop.
Save m-shibata/089032289b2eaeb4c4737d1cfa37bedf to your computer and use it in GitHub Desktop.
Ubuntu Weekly Recipe 第724回:CPUコアごとのベンチマークをPythonで簡単に取得してみる
#!/usr/bin/env python3
import datetime
import json
import multiprocessing
import os
import re
import shutil
import subprocess
import sys
import threading
import time
import typing
import requests
def monitoring(
queue: "multiprocessing.Queue[list[typing.Any]]",
event: threading.Event,
cpunum: int,
epoch: datetime.datetime,
):
data = []
while not event.is_set():
datum: dict[str, typing.Any] = {"time": 0, "cpu": {}, "sensor": {}}
# Retrieve from glances
datum["time"] = (datetime.datetime.now() - epoch).seconds
percpu = requests.get("http://localhost:61208/api/3/percpu")
sensors = requests.get("http://localhost:61208/api/3/sensors")
# Retrieve from sysfs
freq = {}
if os.access("/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq", os.R_OK):
for i in range(cpunum):
with open(
"/sys/devices/system/cpu/cpu{0}/cpufreq/scaling_cur_freq".format(i)
) as f:
freq[i] = int(f.readline())
# Retrieve from procfs
hz = {}
with open("/proc/cpuinfo") as f:
cpuinfo = f.read().split("\n\n")
for cpu, info in enumerate(cpuinfo):
for line in info.split("\n"):
if "cpu MHz" in line:
hz[cpu] = float(line.split(":")[1].strip())
# Organize retrieved data
for stat in percpu.json():
cpu = int(stat["cpu_number"])
datum["cpu"][cpu] = {
"usage": int(stat["total"]),
"freq": freq[cpu],
"hz": hz[cpu],
}
for stat in sensors.json():
datum["sensor"][stat["label"]] = stat["value"]
data.append(datum)
time.sleep(1)
queue.put(data)
def parse_smp_cores():
pattern = re.compile(
r"processor\s+:\s+(?P<logi>\d+)|physical id\s+:\s+(?P<phys>\d+)|core id\s+:\s+(?P<core>\d+)"
)
cores = {}
with open("/proc/cpuinfo") as f:
cpuinfo = f.read().split("\n\n")
for block in cpuinfo:
if len(block) == 0:
continue
coreinfo = {}
for m in re.finditer(pattern, block):
coreinfo.update({k: int(v) for k, v in m.groupdict().items() if v})
# On the assumption that under max core per a CPU package will be less than 2^16.
key = str(coreinfo["phys"] << 16 | coreinfo["core"])
if key not in cores:
cores[key] = []
cores[key].append(coreinfo["logi"])
return cores
def bench_7z(
queue: "multiprocessing.Queue[dict[str, typing.Any]]",
cpu: int,
epoch: datetime.datetime,
):
comm = shutil.which("7z") or sys.exit("needs 7z command")
os.sched_setaffinity(0, {cpu})
result = subprocess.run([comm, "b", "-mmt1"], stdout=subprocess.PIPE)
if result.returncode != 0:
sys.exit("failed to {0} on cpu {1}".format(comm, cpu))
end = (datetime.datetime.now() - epoch).seconds
data = []
for line in result.stdout.decode("utf-8").splitlines():
if line.startswith("Tot:"):
data = line.split()
break
queue.put(
{
"end": end,
"result": (int(data[2]) + int(data[3])) / 2,
}
)
if __name__ == "__main__":
# Start glances as daemon
glances = shutil.which("glances") or sys.exit("needs glances command")
daemon = subprocess.Popen(
[glances, "-w", "--disable-webui"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=0,
pipesize=0,
)
os.sched_setaffinity(daemon.pid, {1})
# Wait to start server
time.sleep(3)
# Get CPU info
result = requests.get("http://localhost:61208/api/3/quicklook")
cpuname = result.json()["cpu_name"]
cpunum = len(os.sched_getaffinity(0))
data = {"cpunum": cpunum, "name": cpuname, "system": " ".join(os.uname())}
# Prepare time epoch
os.sched_setaffinity(0, {1})
epoch = datetime.datetime.now()
# Start monitor thread
queue = multiprocessing.Queue()
event = multiprocessing.Event()
monitor = multiprocessing.Process(
target=monitoring,
args=(
queue,
event,
cpunum,
epoch,
),
daemon=True,
)
monitor.start()
if not monitor.pid:
sys.exit("failed to start monitor process")
os.sched_setaffinity(monitor.pid, {1})
# Start benchmark processes
do_bench = bench_7z
data["benchmark"] = []
start = (datetime.datetime.now() - epoch).seconds
reset = {
"time": (datetime.datetime.now() - epoch).seconds,
"cpu": dict.fromkeys(range(cpunum), {"end": 0, "result": 0}),
}
data["benchmark"].append(reset)
time.sleep(3)
patterns = [(x,) for x in range(cpunum)] # single core
patterns = [(x,) for x in range(cpunum)] # single core
patterns.extend([tuple(v) for _, v in parse_smp_cores().items()]) # with SMT
patterns.append(tuple([x for x in range(0, cpunum, 2)])) # without SMT, even cores
patterns.append(tuple([x for x in range(1, cpunum, 2)])) # without SMT, odd cores
patterns.append(tuple([x for x in range(cpunum)])) # all cores
for pattern in patterns:
print("Start benchmark on CPU", end=" ", file=sys.stderr)
start = (datetime.datetime.now() - epoch).seconds
benchmark_result: list[dict[str, typing.Any]] = [{"time": start, "cpu": {}}]
bench = {}
for i in pattern:
print("{}".format(i), end=" ", file=sys.stderr)
bench[i] = {}
bench[i]["queue"] = multiprocessing.Queue()
bench[i]["proc"] = multiprocessing.Process(
target=do_bench,
args=(
bench[i]["queue"],
i,
epoch,
),
)
bench[i]["proc"].start()
if not bench[i]["proc"].pid:
sys.exit("failed to start benchmark process")
print(file=sys.stderr)
for i in pattern:
bench[i]["proc"].join()
if bench[i]["proc"].exitcode != 0:
sys.exit("failed benchmark process")
result = bench[i]["queue"].get()
benchmark_result[0]["cpu"][i] = result
end = {"time": result["end"], "cpu": {}}
end["cpu"][i] = result
benchmark_result.append(end)
data["benchmark"].extend(benchmark_result)
reset = {
"time": (datetime.datetime.now() - epoch).seconds,
"cpu": dict.fromkeys(range(cpunum), {"end": 0, "result": 0}),
}
data["benchmark"].append(reset.copy())
time.sleep(30)
reset["time"] = (datetime.datetime.now() - epoch).seconds
data["benchmark"].append(reset)
# Output retrieved data
event.set()
data["monitoring"] = queue.get()
print(json.dumps(data))
# Finalize monitor thread and glances daemon
event.set()
monitor.join()
daemon.terminate()
#!/usr/bin/env python3
import argparse
import json
import os
import re
import sys
import matplotlib.pyplot as plt
import pandas
def rename_legend(s: str):
s = re.sub("^cpu.([0-9]+)\\.", "CPU\\1 ", s)
return re.sub("^sensor.(.*)$", "\\1", s)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("json")
parser.add_argument("output")
args = parser.parse_args()
data = []
if not os.access(args.json, os.R_OK):
sys.exit("could not read json {}".format(args.json))
with open(args.json) as f:
data = json.load(f)
cpunum = data["cpunum"]
system = "\n".join((data["name"],data["system"]))
plots = [
{"title": "CPU Usage(%)", "filter": "(time|.usage$)", "style": "plain"},
{
"title": "Even CPU Freq via sysfs (Hz)",
"filter": "(time|\\d*[02468].freq$)",
"style": "plain",
},
{
"title": "Odd CPU Freq via sysfs (Hz)",
"filter": "(time|\\d*[13579].freq$)",
"style": "plain",
},
{
"title": "Even CPU Freq via procfs (MHz)",
"filter": "(time|\\d*[02468].hz$)",
"style": "plain",
},
{
"title": "Odd CPU Freq via procfs (MHz)",
"filter": "(time|\\d*[13579].hz$)",
"style": "plain",
},
{"title": "Sensors (C)", "filter": "(time|^sensor.)", "style": "plain"},
]
fig, axes = plt.subplots(
ncols=1, nrows=len(plots) + 1, sharex=False, figsize=(20, 40)
)
fig.suptitle(system, fontsize=20, y=0.9)
df_mon = pandas.json_normalize(data["monitoring"])
for i, v in enumerate(plots):
df_mon.filter(regex=v["filter"]).rename(columns=rename_legend).plot(
x="time", ax=axes[i]
).legend(loc="upper left", bbox_to_anchor=(1, 1), fontsize="small")
axes[i].set_title(v["title"])
axes[i].ticklabel_format(style=v["style"])
axes[i].grid()
df_bench = pandas.json_normalize(data["benchmark"])
df_bench.filter(regex="(time|result)").fillna(method="ffill").rename(
columns=rename_legend
).plot.area(x="time", ax=axes[len(plots)], stacked=False,).legend(
loc="upper left", bbox_to_anchor=(1, 1), fontsize="small"
)
axes[len(plots)].set_title("Benchmark result")
axes[len(plots)].grid()
fig.savefig(args.output, bbox_inches="tight")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment