Skip to content

Instantly share code, notes, and snippets.

@bwasti
Created August 29, 2023 19:52
Show Gist options
  • Save bwasti/7ae1fa2ef6d9cd6070c5883b9f5105ea to your computer and use it in GitHub Desktop.
Save bwasti/7ae1fa2ef6d9cd6070c5883b9f5105ea to your computer and use it in GitHub Desktop.
import time
import multiprocessing
def test_lock(lock, iterations, shared_value):
for _ in range(iterations):
with lock:
shared_value.value += 1
def benchmark(lock_type, num_processes, iterations_per_process):
shared_value = multiprocessing.Value('i', 0)
lock = lock_type()
start_time = time.time()
processes = []
for _ in range(num_processes):
process = multiprocessing.Process(target=test_lock, args=(lock, iterations_per_process, shared_value))
processes.append(process)
process.start()
for process in processes:
process.join()
end_time = time.time()
execution_time = end_time - start_time
total_iterations = num_processes * iterations_per_process
locks_per_second = total_iterations / execution_time
print(f"{lock_type.__name__} Benchmark Results:")
print(f"Number of Processes: {num_processes}")
print(f"Iterations per Process: {iterations_per_process}")
print(f"Total Iterations: {total_iterations}")
print(f"Shared Value: {shared_value.value}")
print(f"Execution Time: {execution_time:.6f} seconds")
print(f"Locks per Second: {locks_per_second:.2f}")
if __name__ == "__main__":
num_processes = 4
iterations_per_process = 100000
benchmark(multiprocessing.Lock, num_processes, iterations_per_process)
benchmark(multiprocessing.RLock, num_processes, iterations_per_process)
// g++ -std=c++11 -o mp_lock_bench mp_lock_bench.cc -lpthread
// ./mp_lock_bench
#include <iostream>
#include <sys/mman.h>
#include <unistd.h>
#include <sys/wait.h>
#include <sys/file.h>
#include <fcntl.h>
#include <chrono>
void test_lock(int* shared_value, int iterations, int lock_fd) {
for (int i = 0; i < iterations; ++i) {
flock(lock_fd, LOCK_EX);
++(*shared_value);
flock(lock_fd, LOCK_UN);
}
}
void benchmark(int num_processes, int iterations_per_process) {
int* shared_value = static_cast<int*>(mmap(nullptr, sizeof(int), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0));
*shared_value = 0;
int lock_fd = open("/tmp/lockfile", O_CREAT | O_RDWR, 0666);
auto start_time = std::chrono::high_resolution_clock::now();
for (int i = 0; i < num_processes; ++i) {
pid_t pid = fork();
if (pid == 0) {
test_lock(shared_value, iterations_per_process, lock_fd);
exit(0);
}
}
for (int i = 0; i < num_processes; ++i) {
wait(nullptr);
}
auto end_time = std::chrono::high_resolution_clock::now();
auto execution_time = std::chrono::duration_cast<std::chrono::microseconds>(end_time - start_time).count();
double locks_per_second = (num_processes * iterations_per_process) / (execution_time / 1e6);
std::cout << "Fork-based Benchmark Results:" << std::endl;
std::cout << "Number of Processes: " << num_processes << std::endl;
std::cout << "Iterations per Process: " << iterations_per_process << std::endl;
std::cout << "Total Iterations: " << (num_processes * iterations_per_process) << std::endl;
std::cout << "Shared Value: " << *shared_value << std::endl;
std::cout << "Execution Time: " << execution_time << " microseconds" << std::endl;
std::cout << "Locks per Second: " << locks_per_second << std::endl;
close(lock_fd);
munmap(shared_value, sizeof(int));
}
int main() {
int num_processes = 4;
int iterations_per_process = 100000;
benchmark(num_processes, iterations_per_process);
return 0;
}
// g++ -std=c++11 -o thread_lock_bench thread_lock_bench.cc -lpthread
// ./thread_lock_bench
#include <iostream>
#include <thread>
#include <mutex>
#include <chrono>
#include <vector>
std::mutex mtx;
void test_lock(std::mutex& lock, int iterations, int& shared_value) {
for (int i = 0; i < iterations; ++i) {
std::lock_guard<std::mutex> guard(lock);
++shared_value;
}
}
void benchmark(std::string lock_type, int num_threads, int iterations_per_thread) {
int shared_value = 0;
auto start_time = std::chrono::high_resolution_clock::now();
std::vector<std::thread> threads;
for (int i = 0; i < num_threads; ++i) {
threads.emplace_back(test_lock, std::ref(mtx), iterations_per_thread, std::ref(shared_value));
}
for (auto& thread : threads) {
thread.join();
}
auto end_time = std::chrono::high_resolution_clock::now();
auto execution_time = std::chrono::duration_cast<std::chrono::microseconds>(end_time - start_time).count();
double locks_per_second = (num_threads * iterations_per_thread) / (execution_time / 1e6);
std::cout << lock_type << " Benchmark Results:" << std::endl;
std::cout << "Number of Threads: " << num_threads << std::endl;
std::cout << "Iterations per Thread: " << iterations_per_thread << std::endl;
std::cout << "Total Iterations: " << (num_threads * iterations_per_thread) << std::endl;
std::cout << "Shared Value: " << shared_value << std::endl;
std::cout << "Execution Time: " << execution_time << " microseconds" << std::endl;
std::cout << "Locks per Second: " << locks_per_second << std::endl;
}
int main() {
int num_threads = 4;
int iterations_per_thread = 100000;
benchmark("std::mutex", num_threads, iterations_per_thread);
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment