Skip to content

Instantly share code, notes, and snippets.

@smarie
Last active March 21, 2020 12:53
Show Gist options
  • Save smarie/25d1ba3e4db4c00d216c85ca619428e9 to your computer and use it in GitHub Desktop.
Save smarie/25d1ba3e4db4c00d216c85ca619428e9 to your computer and use it in GitHub Desktop.
An experiment comparing the various timers available on the python platform.
"""
An experiment comparing the various timers available on the python platform.
For each available timer (see `TIMER_METHODS` below), the following experiment is run:
- spawn `nb_processes` parallel processes
- each process performs a `for` loop with `nb_occ` iterations.
- each iteration uses the timer to measure the execution time around a `time.sleep(sleep_duration)` call
All experiments are run in sequence, one after the other.
The results are displayed as boxplots using matplotlib.
On windows 10 + conda python 3.7 what I could observe is that
- there is no stability across runs: sometimes `time` is more stable, sometimes `process_time` is
- `process_time` and `process_time_ns` always return zero as they do not count the `sleep()` time
"""
import multiprocessing
from collections import OrderedDict
from copy import deepcopy
from os import getpid
from time import perf_counter, perf_counter_ns, sleep, time, process_time, process_time_ns, get_clock_info
from timeit import default_timer
import numpy as np
# All the timer methods that will be compared. Randomize the order for fair comparison
TIMER_METHODS = [('time.time', time),
('time.perf_counter', perf_counter),
('time.perf_counter_ns', perf_counter_ns),
# these 2 will always be zero when time.sleep() is used
('time.process_time', process_time),
('time.process_time_ns', process_time_ns),
# this one is supposed to be identical to time.perf_counter
('timeit.default_timer', default_timer)]
shuffle(TIMER_METHODS)
TIMER_METHODS = OrderedDict(TIMER_METHODS)
# the number of parallel processes that should be run for each experiment
nb_processes = 10
# the number of timed "sleep" that should be executed by each process
nb_occ = 10000
# the time.sleep() argument, a delay in seconds.
sleep_duration = 0.005
# flag to disable garbage collector before running each experiment
disable_gc = True
def run_sleep_count(return_dict, timer=time, sleep_duration=sleep_duration, nb_occurences=nb_occ):
pid = getpid()
all_durations = []
print("[%s] started" % pid)
for i in range(nb_occurences): # , desc="[%s]" % pid
start = timer()
sleep(sleep_duration)
stop = timer()
duration = stop - start
if timer is process_time_ns or timer is perf_counter_ns:
duration = duration * 1e-9
# print("duration: %s" % duration)
all_durations.append(duration)
# assert (sleep_duration * (1 - tol)) < duration < (sleep_duration * (1 + tol))
print("[%s] completed" % pid)
return_dict[pid] = np.array(all_durations)
if __name__ == '__main__':
for name in ['monotonic', 'perf_counter', 'process_time', 'thread_time', 'time']:
ci = get_clock_info(name)
print("Clock '%s': adjustable: %s, monotonic: %s, resolution: %s, impl: %s" % (name, ci.adjustable, ci.monotonic, ci.resolution, ci.implementation))
all_results = []
for timer_method_name, timer_method in TIMER_METHODS.items():
print("starting experiments for timer method: %s" % timer_method_name)
if disable_gc:
gcold = gc.isenabled()
gc.disable()
else:
gcold = False
# create a shared variable
manager = multiprocessing.Manager()
return_dict = manager.dict()
# spawn the processes
processes = [multiprocessing.Process(target=run_sleep_count, args=(return_dict, timer_method))
for i in range(nb_processes)]
try:
for process in processes:
process.start()
except KeyboardInterrupt:
for process in processes:
process.terminate()
process.join()
try:
for process in processes:
process.join()
except KeyboardInterrupt:
for process in processes:
process.terminate()
process.join()
if gcold:
gc.enable()
assert len(return_dict) == nb_processes
# deep copy just in case the shared memory variable has some side-effect
all_results.append(deepcopy(dict(return_dict)))
assert len(all_results) == len(TIMER_METHODS)
# finally plot the results
import matplotlib.pyplot as plt
fig, axes = plt.subplots(1, len(TIMER_METHODS), sharey='all')
for i, (timer_method_name, return_dict) in enumerate(zip(TIMER_METHODS, all_results)):
ax = axes[i]
ax.boxplot(return_dict.values(), labels= list(return_dict.keys()))
ax.set_title("Method: %s" % timer_method_name)
figManager = plt.get_current_fig_manager()
backend = plt.get_backend()
print("Matplotlib backend: %s" % backend)
if backend == 'TkAgg':
figManager.window.state('zoomed')
elif backend.startswith('Qt'):
figManager.window.showMaximized()
elif backend == 'wxAgg':
figManager.frame.Maximize(True)
else:
# try:
figManager.frame.Maximize(True)
plt.show()
@smarie
Copy link
Author

smarie commented Mar 20, 2020

Example output:

image

@smarie
Copy link
Author

smarie commented Mar 20, 2020

The results are not very consistent across runs :

image

image

@smarie
Copy link
Author

smarie commented Mar 21, 2020

Update: I added a flag to disable garbage collector. Results do not seem to significantly change, we still see random "long" runs:

image

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment