Go:
package main
import (
"fmt"
"sync"
"sync/atomic"
)
func main() {
v := uint64(0)
wg := sync.WaitGroup{}
for i := 0; i < 30000; i++ {
wg.Add(1)
go func() {
for i := 0; i < 1000; i++ {
atomic.AddUint64(&v, 1)
}
wg.Done()
}()
}
wg.Wait()
fmt.Printf("v is now %d\n", v)
}
Rust:
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread;
fn main() {
let v = Arc::new(AtomicUsize::new(0));
let mut join_handles = vec![];
for _ in 0..30_000 {
let v = v.clone();
let join_handle = thread::spawn(move || for _ in 0..1000 {
v.fetch_add(1, Ordering::Relaxed);
});
join_handles.push(join_handle);
}
for join_handle in join_handles.into_iter() {
join_handle.join().unwrap();
}
println!("v is now {}", v.load(Ordering::Acquire));
}
Go should definitely be faster at spinning up 30k concurrency primitives and getting some work done, because goroutines, right?
λ go build atomic.go
λ time ./atomic
v is now 30000000
./atomic 2.71s user 0.01s system 391% cpu 0.695 total
3.91 cores utilized * 0.695 seconds = 2.7 CPU seconds
not bad, but:
λ cargo build --release
λ time target/release/atomic
v is now 30000000
target/release/atomic 0.71s user 0.80s system 236% cpu 0.637 total
2.36 cores utilized * 0.637 seconds = 1.5 CPU seconds
Wait, but aren't threads bad? Hmm.... maybe it's not so simple.