Skip to content

Instantly share code, notes, and snippets.

Created December 23, 2017 16:23
Show Gist options
  • Save anonymous/58143d1295ad973cdbc21ce7182a1039 to your computer and use it in GitHub Desktop.
Save anonymous/58143d1295ad973cdbc21ce7182a1039 to your computer and use it in GitHub Desktop.
LazyArc
[dependencies]
parking_lot_core = "0.1"
// Copyright 2017 Simon Sapin
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option.
//! Similar to https://crates.io/crates/lazy_static but:
//!
//! * The static value can be “deinitialized” (dropped).
//! `Arc` is used to do so safely without invalidating existing references.
//! * Initialization can return an error (for example if it involves parsing).
//!
//! # Example
//!
//! ```rust
//! static FOO: LazyArc<Foo> = LazyArc::INIT;
//!
//! let foo = FOO.get_or_create(|| Ok(Arc::new(include_str!("something").parse()?))?;
//! ```
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::marker::PhantomData;
use std::mem::{self, ManuallyDrop};
use self::raw_mutex::RawMutex;
mod raw_mutex;
pub struct LazyArc<T: Send + Sync> {
poltergeist: PhantomData<Arc<T>>,
mutex: RawMutex,
ptr: AtomicUsize,
}
impl<T: Send + Sync> LazyArc<T> {
pub const INIT: Self = LazyArc {
poltergeist: PhantomData,
mutex: RawMutex::INIT,
ptr: ATOMIC_USIZE_INIT,
};
// FIXME: figure out minimal Ordering for atomic operations
/// Return a new `Arc` reference to the singleton `T` object.
///
/// If this singleton was not already initialized,
/// try to call the closure now (this may return an error) to initialize it.
///
/// Calling this reapeatedly will only initialize once (until `.drop()` is called).
pub fn get_or_create<F, E>(&self, create: F) -> Result<Arc<T>, E>
where F: FnOnce() -> Result<Arc<T>, E>
{
macro_rules! try_load {
() => {
let ptr = self.ptr.load(Ordering::SeqCst);
if ptr != 0 {
// Already initialized
// We want to create a new strong reference (with `clone()`)
// but not drop the existing one.
// `Arc::from_raw` normally takes ownership of a strong reference,
// so use `ManuallyDrop` to skip running that destructor.
let ptr = ptr as *const T;
let careful_dont_drop_it = ManuallyDrop::new(unsafe { Arc::from_raw(ptr) });
return Ok(Arc::clone(&*careful_dont_drop_it))
}
}
}
// First try to obtain an Arc from the atomic pointer without taking the mutex
try_load!();
// Synchronize initialization
struct RawMutexGuard<'a>(&'a RawMutex);
impl<'a> Drop for RawMutexGuard<'a> {
fn drop(&mut self) {
self.0.unlock()
}
}
self.mutex.lock();
let _guard = RawMutexGuard(&self.mutex);
// Try again in case some other thread raced us while we were taking the mutex
try_load!();
// Now we’ve observed the atomic pointer uninitialized after taking the mutex:
// we’re definitely first
let data = create()?;
let new_ptr = Arc::into_raw(data.clone()) as usize;
self.ptr.store(new_ptr, Ordering::SeqCst);
Ok(data)
}
/// Deinitialize this singleton, dropping the internal `Arc` reference.
///
/// Calling `.get()` again afterwards will create a new `T` object.
///
/// The previous `T` object may continue to live as long
/// as other `Arc` references to it exist.
pub fn drop(&self) {
let ptr = self.ptr.swap(0, Ordering::SeqCst);
if ptr != 0 {
unsafe {
mem::drop(Arc::from_raw(ptr as *const T))
}
}
}
}
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! `Mutex<()>` with a `const` initializer.
//!
//! Imported from an older (simpler) version of https://crates.io/crates/parking_lot
//! https://github.com/Amanieu/parking_lot/blob/79af298dd8f3b1b848baffce992c340ce8ac73de/src/raw_mutex.rs
use std::sync::atomic::{AtomicUsize as AtomicU8, ATOMIC_USIZE_INIT as ATOMIC_U8_INIT, Ordering};
use parking_lot_core::{self, UnparkResult, SpinWait, UnparkToken, DEFAULT_PARK_TOKEN};
type U8 = usize;
// UnparkToken used to indicate that that the target thread should attempt to
// lock the mutex again as soon as it is unparked.
pub const TOKEN_NORMAL: UnparkToken = UnparkToken(0);
// UnparkToken used to indicate that the mutex is being handed off to the target
// thread directly without unlocking it.
pub const TOKEN_HANDOFF: UnparkToken = UnparkToken(1);
const LOCKED_BIT: U8 = 1;
const PARKED_BIT: U8 = 2;
pub struct RawMutex {
state: AtomicU8,
}
impl RawMutex {
pub const INIT: Self = RawMutex {
state: ATOMIC_U8_INIT,
};
#[inline]
pub fn lock(&self) {
if self.state
.compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
.is_ok() {
return;
}
self.lock_slow();
}
#[inline]
pub fn unlock(&self) {
if self.state
.compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok() {
return;
}
self.unlock_slow(false);
}
#[cold]
#[inline(never)]
fn lock_slow(&self) {
let mut spinwait = SpinWait::new();
let mut state = self.state.load(Ordering::Relaxed);
loop {
// Grab the lock if it isn't locked, even if there is a queue on it
if state & LOCKED_BIT == 0 {
match self.state
.compare_exchange_weak(state,
state | LOCKED_BIT,
Ordering::Acquire,
Ordering::Relaxed) {
Ok(_) => return,
Err(x) => state = x,
}
continue;
}
// If there is no queue, try spinning a few times
if state & PARKED_BIT == 0 && spinwait.spin() {
state = self.state.load(Ordering::Relaxed);
continue;
}
// Set the parked bit
if state & PARKED_BIT == 0 {
if let Err(x) = self.state.compare_exchange_weak(state,
state | PARKED_BIT,
Ordering::Relaxed,
Ordering::Relaxed) {
state = x;
continue;
}
}
// Park our thread until we are woken up by an unlock
unsafe {
let addr = self as *const _ as usize;
let validate = || self.state.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT;
let before_sleep = || {};
let timed_out = |_, _| unreachable!();
if parking_lot_core::park(addr,
validate,
before_sleep,
timed_out,
DEFAULT_PARK_TOKEN,
None) == Some(TOKEN_HANDOFF) {
// The thread that unparked us passed the lock on to us
// directly without unlocking it.
return;
}
}
// Loop back and try locking again
spinwait.reset();
state = self.state.load(Ordering::Relaxed);
}
}
#[cold]
#[inline(never)]
fn unlock_slow(&self, force_fair: bool) {
// Unlock directly if there are no parked threads
if self.state
.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok() {
return;
}
// Unpark one thread and leave the parked bit set if there might
// still be parked threads on this address.
unsafe {
let addr = self as *const _ as usize;
let callback = |result: UnparkResult| {
// If we are using a fair unlock then we should keep the
// mutex locked and hand it off to the unparked thread.
if result.unparked_threads != 0 && (force_fair || result.be_fair) {
// Clear the parked bit if there are no more parked
// threads.
if !result.have_more_threads {
self.state.store(LOCKED_BIT, Ordering::Relaxed);
}
return TOKEN_HANDOFF;
}
// Clear the locked bit, and the parked bit as well if there
// are no more parked threads.
if result.have_more_threads {
self.state.store(PARKED_BIT, Ordering::Release);
} else {
self.state.store(0, Ordering::Release);
}
TOKEN_NORMAL
};
parking_lot_core::unpark_one(addr, callback);
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment