Skip to content

Instantly share code, notes, and snippets.

@danieldn
Last active March 13, 2019 16:54
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save danieldn/64c23493668782d7ce847052e65c6ec9 to your computer and use it in GitHub Desktop.
Save danieldn/64c23493668782d7ce847052e65c6ec9 to your computer and use it in GitHub Desktop.
Wrapper around sysbench for running multiple tests with convenient output
import argparse
import subprocess
"""
Try running
python3 run_sysbench.py -t cpu -r 2 -n 10000 --threads 12 --time 0 --events 1000
OR
sysbench fileio --file-total-size=6G prepare
python3 sample.py -t fileio -r 3 --threads 1 --time 10 --file_total_size 6G --file_test_mode rndrw
sysbench fileio --file-total-size=6G cleanup
"""
def run_cpu_benchmark(max_prime, number_runs, number_threads, total_time, max_events):
total_time_results = []
total_avg_latency = []
for i in range(0, number_runs):
output = subprocess.check_output(['sysbench', 'cpu', '--cpu-max-prime={}'.format(max_prime),
'--threads={}'.format(number_threads),
'--time={}'.format(total_time),
'--events={}'.format(max_events), 'run'])
output = output.decode('utf-8')
for line in output.splitlines():
if 'total time' in line:
time = line.split()[2].replace('s', '')
total_time_results.append(time)
if 'avg:' in line:
avg_latency = line.split()[1]
total_avg_latency.append(avg_latency)
return total_time_results, total_avg_latency
def run_fileio_benchmark(runs, threads, file_total_size, file_test_mode, time):
read_throughput = []
write_througput = []
for i in range(0, runs):
output = subprocess.check_output(['sysbench', 'fileio', '--file-total-size={}'.format(file_total_size),
'--file-test-mode={}'.format(file_test_mode),
'--time={}'.format(time),
'--threads={}'.format(threads),
'run'])
output = output.decode('utf-8')
lines = output.splitlines()
for i, line in enumerate(lines):
if 'Throughput:' in line:
read = lines[i+1].split()[2]
read_throughput.append(read)
write = lines[i+2].split()[2]
write_througput.append(write)
return read_throughput, write_througput
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convenient wrapper around sysbench')
parser.add_argument('-t', '--test', help='Benchmark test to run', choices=['cpu', 'fileio'], required=True)
parser.add_argument('-r', '--runs', help='Number of runs', type=int, required=False)
parser.add_argument('-n', '--size', help='--cpu-max-prime=N', type=int, required=False)
parser.add_argument('--threads', help='Number of threads', type=int, required=False)
parser.add_argument('--time', help='Total time to run in seconds (0 = no limit)', type=int, required=False)
parser.add_argument('--events', help='Total events to perform', type=int, required=False)
parser.add_argument('--file_total_size', help='Size of file (5G)', required=False)
parser.add_argument('--file_test_mode', help='Try rndrw', required=False)
args = parser.parse_args()
# Run CPU test r number of times with cpu-max-prime=n
# Store time in results
if args.test == 'cpu':
time_results, latency_results = run_cpu_benchmark(args.size, args.runs, args.threads, args.time, args.events)
# Print results
print('CPU benchmark results after {} runs:'.format(args.runs, args.size))
print('Total time(s)')
for result in time_results:
print(result)
print('Avg latency(ms)')
for result in latency_results:
print(result)
if args.test == 'io:':
print('TODO: IO Tests')
if args.test == 'fileio':
read_throughput, write_throughput = run_fileio_benchmark(args.runs, args.threads, args.file_total_size, args.file_test_mode, args.time)
print('Read throughput (MiB/s)')
for result in read_throughput:
print(result)
print('Write throughput (MiB/s)')
for result in write_throughput:
print(result)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment