Skip to content

Instantly share code, notes, and snippets.

@bsodmike
Created October 15, 2021 13:15
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save bsodmike/b86f2fb42af6ef97a1fe5ba0f94e50c0 to your computer and use it in GitHub Desktop.
Save bsodmike/b86f2fb42af6ef97a1fe5ba0f94e50c0 to your computer and use it in GitHub Desktop.
Crust of Rust: Atomics and Memory Ordering // My Notes
use std::cell::UnsafeCell;
use std::sync::atomic::{AtomicBool, Ordering};
const LOCKED: bool = true;
const UNLOCKED: bool = false;
pub struct Mutex<T> {
locked: AtomicBool,
v: UnsafeCell<T>,
}
unsafe impl<T> Sync for Mutex<T> where T: Send {}
impl<T> Mutex<T> {
pub fn new(t: T) -> Self {
Self {
locked: AtomicBool::new(UNLOCKED),
v: UnsafeCell::new(t),
}
}
/// pesky thread interleavings
///
/// One of the reasons this is broken is due to race between the `load` and `store`, and multiple
/// threads might "win" the race at the same time.
//pub fn with_lock<R>(&self, f: impl FnOnce(&mut T) -> R) -> R {
// while self.locked.load(Ordering::Relaxed) != UNLOCKED {}
// // maybe another thread runs here
// std::thread::yield_now();
// self.locked.store(LOCKED, Ordering::Relaxed);
// // Safety: we hold the lock, therefore we can create a mutable reference.
// let ret = f(unsafe { &mut *self.v.get() });
// self.locked.store(UNLOCKED, Ordering::Relaxed);
// ret
//}
/// compare_exchange
///
/// There is no space between loading and storing, it's one atomic operation. However, calling
/// it within a loop can be an expensive operation as multi-core CPU architecture can experience
/// contention with each core trying to obtain exclusive access to the underlying memory
/// location.
//pub fn with_lock<R>(&self, f: impl FnOnce(&mut T) -> R) -> R {
// while self
// .locked
// .compare_exchange(UNLOCKED, LOCKED, Ordering::Relaxed, Ordering::Relaxed)
// .is_err()
// {
// // MESI protocol
// while self.locked.load(Ordering::Relaxed) == LOCKED {}
// }
// // Safety: we hold the lock, therefore we can create a mutable reference.
// let ret = f(unsafe { &mut *self.v.get() });
// self.locked.store(UNLOCKED, Ordering::Relaxed);
// ret
//}
/// Ordering: Acquire/Release
pub fn with_lock<R>(&self, f: impl FnOnce(&mut T) -> R) -> R {
while self
.locked
.compare_exchange(UNLOCKED, LOCKED, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
// MESI protocol
while self.locked.load(Ordering::Relaxed) == LOCKED {
std::thread::yield_now();
}
std::thread::yield_now();
}
// Safety: we hold the lock, therefore we can create a mutable reference.
let ret = f(unsafe { &mut *self.v.get() });
self.locked.store(UNLOCKED, Ordering::Release);
ret
}
// Ordering: SeqCst // See the chapter in the video: https://youtu.be/rMGWeSjctlY
}
use std::thread::spawn;
fn main() {
let l: &'static _ = Box::leak(Box::new(Mutex::new(0)));
let handles: Vec<_> = (0..10)
.map(|_| {
spawn(move || {
for _ in 0..100 {
l.with_lock(|v| {
*v += 1;
});
}
})
})
.collect();
for handle in handles {
handle.join().expect("join all handles");
}
assert_eq!(l.with_lock(|v| *v), 10 * 100);
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment