Skip to content

Instantly share code, notes, and snippets.

@pythonesque
Last active August 29, 2015 14:06
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save pythonesque/61f33df7df3d0ead029d to your computer and use it in GitHub Desktop.
Save pythonesque/61f33df7df3d0ead029d to your computer and use it in GitHub Desktop.
// Implements http://rosettacode.org/wiki/Checkpoint_synchronization
//
// We implement this task using Rust's Barriers. Barriers are simply thread
// synchronization points--if a task waits at a barrier, it will not continue
// until the number of tasks for which the variable was initialized are also
// waiting at the barrier, at which point all of them will stop waiting. This
// can be used to allow threads to do asynchronous work and guarantee properties
// at checkpoints.
use std::sync::atomic::AtomicBool;
use std::sync::atomics;
use std::sync::{Arc, Barrier};
pub fn checkpoint() {
static NUM_TASKS: uint = 10;
static NUM_ITERATIONS: u8 = 10;
let barrier = Barrier::new(NUM_TASKS);
let mut events: [AtomicBool, ..NUM_TASKS];
unsafe {
// Unsafe because it's hard to initialize arrays whose type is not Clone.
events = ::std::mem::uninitialized();
for e in events.iter_mut() {
// Events are initially off
*e = AtomicBool::new(false);
}
}
// Arc for sharing between tasks
let arc = Arc::new((barrier, events));
// Channel for communicating when tasks are done
let (tx, rx) = channel();
for i in range(0, NUM_TASKS) {
let arc = arc.clone();
let tx = tx.clone();
// Spawn a new worker
spawn(proc() {
let (ref barrier, ref events) = *arc;
// Assign an event to this task
let ref event = events[i];
// Start processing events
for _ in range(0, NUM_ITERATIONS) {
// Between checkpoints 4 and 1, turn this task's event on.
event.store(true, atomics::Release);
// Checkpoint 1
barrier.wait();
// Between checkpoints 1 and 2, all events are on.
assert!(events.iter().all( |e| e.load(atomics::Acquire) ));
// Checkpoint 2
barrier.wait();
// Between checkpoints 2 and 3, turn this task's event off.
event.store(false, atomics::Release);
// Checkpoint 3
barrier.wait();
// Between checkpoints 3 and 4, all events are off.
assert!(events.iter().all( |e| !e.load(atomics::Acquire) ));
// Checkpoint 4
barrier.wait();
}
// Finish processing events.
tx.send(());
println!("{}", i);
})
}
drop(tx);
// The main thread will not exit until all tasks have exited.
for _ in range(0, NUM_TASKS) {
rx.recv();
}
}
#[cfg(not(test))]
fn main() {
checkpoint();
}
#[test]
fn test_checkpoint() {
checkpoint();
}
/*
// Rust has a perfectly good Semaphore type already. It lacks count(), though,
// so we can't use it directly.
//
// Instead of just copying its implementation, which would be boring, we present
// a very silly, but correct, variant.
#![feature(unsafe_destructor)]
extern crate sync;
use std::io::timer;
use std::ptr;
use std::sync::Arc;
use std::sync::atomic::AtomicUint;
use std::sync::atomics;
use std::time::duration::Duration;
use sync::mutex::{Guard, Mutex};
static MAX_COUNT: uint = 4;
pub struct CountingSemaphore {
locks: [Mutex, .. MAX_COUNT],
contending: AtomicUint,
remaining: AtomicUint,
}
pub struct CountingSemaphoreGuard<'a> {
_guard: Guard<'a>,
sem: &'a CountingSemaphore,
}
impl CountingSemaphore {
pub fn new() -> CountingSemaphore {
CountingSemaphore {
locks: unsafe {
let mut locks: [Mutex, .. MAX_COUNT] = ::std::mem::uninitialized();
for l in locks.iter_mut() {
ptr::write(l as *mut _, Mutex::new());
}
locks
},
contending: AtomicUint::new(0),
remaining: AtomicUint::new(MAX_COUNT),
}
}
// Acquire the resource, blocking if the count hits zero, and return the
// resource count before it was acquired as well as a Guard that, when
// dropped, will release the resource.
pub fn acquire(&self) -> (CountingSemaphoreGuard, uint) {
let contending = self.contending.fetch_add(1, atomics::SeqCst);
let guard = self.locks[contending % MAX_COUNT].lock();
let count = self.remaining.fetch_sub(1, atomics::SeqCst);
(CountingSemaphoreGuard { _guard: guard, sem: self }, count)
}
}
impl<'a> CountingSemaphoreGuard<'a> {
// Release the resources, returning the resource count after it was released
pub fn release(self) -> uint {
self.sem.remaining.fetch_add(1, atomics::SeqCst) + 1
}
}
#[unsafe_destructor]
impl<'a> Drop for CountingSemaphoreGuard<'a> {
fn drop(&mut self) {
self.sem.contending.fetch_sub(1, atomics::SeqCst);
}
}
fn main() {
static NUM_THREADS: u8 = 10;
let sem = Arc::new(CountingSemaphore::new());
let duration = Duration::seconds(1) / 10;
let (tx, rx) = channel();
for i in range(0, NUM_THREADS) {
let sem = sem.clone();
let tx = tx.clone();
spawn(proc() {
let (guard, count) = sem.acquire();
let (guard, count) = sem.acquire();
println!("Worker {} before acquire: count = {}", i, count);
timer::sleep(duration);
let count = guard.release();
println!("Worker {} after release: count = {}", i, count);
tx.send(());
})
}
drop(tx);
for _ in range(0, NUM_THREADS) {
rx.recv();
}
}
*/
// Rust has a perfectly good Semaphore type already. It lacks count(), though,
// so we can't use it directly.
//
// Instead of just copying its implementation, which would be boring, we present
// a very silly, but correct, variant.
#![feature(unsafe_destructor)]
extern crate sync;
use std::io::timer;
use std::ptr;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicUint};
use std::sync::atomics;
use std::time::duration::Duration;
use sync::mutex::{Guard, Mutex};
static MAX_COUNT: uint = 4;
pub struct CountingSemaphore {
locks: [(Mutex, AtomicBool), .. MAX_COUNT],
count: AtomicUint,
}
pub struct CountingSemaphoreGuard<'a> {
_guard: Guard<'a>,
sem: &'a CountingSemaphore,
locked: &'a AtomicBool,
}
impl CountingSemaphore {
pub fn new() -> CountingSemaphore {
CountingSemaphore {
locks: unsafe {
let mut locks: [(Mutex, AtomicBool), .. MAX_COUNT] = ::std::mem::uninitialized();
for l in locks.iter_mut() {
ptr::write(l as *mut _, (Mutex::new(), AtomicBool::new(false)));
}
locks
},
count: AtomicUint::new(0),
}
}
// Acquire the resource, blocking if the count hits zero, and return a Guard
// that, when dropped, will release the resource.
pub fn acquire(&self) -> CountingSemaphoreGuard {
let count = self.count.fetch_add(1, atomics::SeqCst);
let (ref lock, ref locked) = self.locks[count % MAX_COUNT];
//let mut guard = lock.lock();
let guard = lock.lock();
//while locked.compare_and_swap(false, true, atomics::SeqCst) {
// guard = lock.lock();
//}
locked.store(true, atomics::SeqCst);
CountingSemaphoreGuard {
_guard: guard,
sem: self,
locked: locked,
}
}
// Count the resource. Since resources may not be released in order, this
// may return an inconsistent count.
pub fn count(&self) -> uint {
self.locks.iter().filter( |&&(_, ref locked)| locked.load(atomics::SeqCst) ).count()
}
}
impl<'a> CountingSemaphoreGuard<'a> {
// Release the resource, returning the resource count after it was released
pub fn release(self) -> uint {
self.sem.count() - 1
}
}
#[unsafe_destructor]
impl<'a> Drop for CountingSemaphoreGuard<'a> {
// When the guard is dropped, a resource is released back to the pool.
fn drop(&mut self) {
// Decrement the number of acquisition attempts.
self.sem.count.fetch_sub(1, atomics::SeqCst);
// Store false in the lock position to keep the count accurate.
self.locked.store(false, atomics::SeqCst);
}
}
fn main() {
static NUM_THREADS: u8 = 10;
let sem = Arc::new(CountingSemaphore::new());
let duration = Duration::seconds(1) / 10;
let (tx, rx) = channel();
for i in range(0, NUM_THREADS) {
let sem = sem.clone();
let tx = tx.clone();
spawn(proc() {
let guard = sem.acquire();
println!("Worker {} after acquire: count = {}", i, sem.count());
timer::sleep(duration);
let count = guard.release();
println!("Worker {} after release: count = {}", i, count);
tx.send(());
})
}
drop(tx);
for _ in range(0, NUM_THREADS) {
rx.recv();
}
}
#![feature(if_let)]
use std::io::{Acceptor, BufferedReader, Listener, IoResult};
use std::io::net::tcp::{TcpListener, TcpStream};
// The actual echo server
fn echo_server() -> IoResult<()> {
static HOST: &'static str = "127.0.0.1";
static PORT: u16 = 12321;
// Create a new TCP listener at HOST:PORT.
let mut listener = try!(TcpListener::bind(HOST, PORT));
println!("Starting echo server on {}", listener.socket_name());
let mut acceptor = try!(listener.listen());
println!("Echo server started");
acceptor.set_timeout(Some(30));
// Process each new connection to the server
for stream in acceptor.incoming() {
match stream {
Err(e) => println!("Connection failed: {}", e),
Ok(mut stream) => {
println!("New connection: {}", stream.peer_name());
// Launch a new thread to deal with the connection.
spawn(proc() {
if let Err(e) = echo_session(stream.clone()) {
println!("I/O error: {} -- {}", stream.peer_name(), e);
}
println!("Closing connection: {}", stream.peer_name());
drop(stream);
})
}
}
}
Ok(())
// Server closes automatically at end of block
}
// The echo session
fn echo_session(stream: TcpStream) -> IoResult<()> {
let ref mut writer = stream.clone();
let mut reader = BufferedReader::new(stream);
for line in reader.lines() {
let l = try!(line);
println!("Received line from {}: {}", writer.peer_name(), l);
try!(writer.write_line(l[]));
println!("Wrote line to {}: {}", writer.peer_name(), l);
}
Ok(())
}
pub fn main() {
//let future = try_future(proc() echo_server() );
//future.unwrap();
echo_server().unwrap();
}
// I want to represent a type with some weird properties:
// * The type itself is NoSync (T).
// * it produces references of type NoSync + NoSend (&'a mut S).
// * THEY produce references of type Sync (&'a S: Sync)
#![feature(unboxed_closures)]
extern crate arena;
use arena::TypedArena;
use parallel::{fork_join_section, ForkJoinScheduler};
mod parallel {
use std::kinds::marker;
use std::rt::thread::Thread;
struct ForkJoinWorker<'a> {
marker: marker::NoSend,
thread: Thread<()>
}
impl<'a> ForkJoinWorker<'a> {
fn new<F: Fn<(), ()> + Sync + 'a>(job: F) -> ForkJoinWorker<'a> {
let job: proc(): 'a = proc() { job.call(()) };
//let marker: Co
let thread = Thread::start(proc() { });
ForkJoinWorker {
marker: marker::NoSend,
thread: thread,
}
}
}
pub struct ForkJoinScheduler<'a> {
workers: Vec<ForkJoinWorker<'a>>,
}
impl<'a> ForkJoinScheduler<'a> {
fn new() -> ForkJoinScheduler<'a> {
ForkJoinScheduler {
workers: Vec::new(),
}
}
pub fn fork<'b, F: Fn<(), ()> + Sync + 'b>(&'b mut self, job: F) {
self.workers.push(ForkJoinWorker::new(job));
}
}
// 1. Workers are not Sendable. This is required to ensure that they
// terminate within the block.
pub fn fork_join_section<'a, F: FnMut<(ForkJoinScheduler<'a>,), ()>>(mut section: F) {
let mut sched = ForkJoinScheduler::new();
section.call_mut((sched,));
}
}
fn main() {
let arenas = TypedArena::new();
let ref arenas = arenas;
fork_join_section(|&mut: mut sched: ForkJoinScheduler| {
for i in range(0u8, 10) {
let arena = arenas.alloc(TypedArena::new());
let test = arena.alloc(());
let closure = |&:| {
let arena = arena;
// let test = arena.alloc(());
};
sched.fork(closure);
}
})
}
extern crate arena;
use arena::TypedArena;
use std::rt::thread::Thread;
use tree::{TreeNode};
pub mod atomic {
use std::sync::atomic::{AtomicPtr, Ordering};
pub struct AtomicOption<T> {
p: AtomicPtr<T>,
}
impl<T> AtomicOption<T> {
/// Create a new `AtomicOption`
/// Note: if opt is Some(null_mut()), it will be treated like None. The function does
/// not check for this, but it does not cause a memory safety issue so the
/// function is not marked unsafe.
pub fn new(opt: Option<*mut T>) -> AtomicOption<T> {
AtomicOption {
p: AtomicPtr::new(match opt {
Some(p) => p,
None => ::std::ptr::null_mut()
})
}
}
/// Load the value
///
/// # Failure
///
/// Fails if `order` is `Release` or `AcqRel`.
#[inline]
pub fn load(&self, order: Ordering) -> Option<*mut T> {
let p = self.p.load(order);
if p == ::std::ptr::null_mut() {
None
} else {
Some(p)
}
}
/// Store the value
///
/// # Failure
///
/// Fails if `order` is `Acquire` or `AcqRel`.
/// Note: if opt is Some(null_mut()), it will be treated like None. The function does
/// not check for this, but it does not cause a memory safety issue so the
/// function is not marked unsafe.
#[inline]
pub fn store(&self, opt: Option<*mut T>, order: Ordering) {
self.p.store(match opt {
Some(p) => p,
None => ::std::ptr::null_mut()
}, order)
}
/// Store a value, returning the old value
/// Note: if opt is Some(null_mut()), it will be treated like None. The function does
/// not check for this, but it does not cause a memory safety issue so the
/// function is not marked unsafe.
#[inline]
pub fn swap(&self, opt: Option<*mut T>, order: Ordering) -> Option<*mut T> {
let p = self.p.swap(match opt {
Some(p) => p,
None => ::std::ptr::null_mut()
}, order);
if p == ::std::ptr::null_mut() {
None
} else {
Some(p)
}
}
/// If the current value is the same as expected, store a new value
///
/// Compare the current value with `old`; if they are the same then
/// replace the current value with `new`. Return the previous value.
/// If the return value is equal to `old` then the value was updated.
/// Note: if old or new is Some(null_mut()), it will be treated like None. The function does
/// not check for this, but it does not cause a memory safety issue so the
/// function is not marked unsafe.
#[inline]
pub fn compare_and_swap(&self, old: Option<*mut T>, new: Option<*mut T>, order: Ordering) -> Option<*mut T> {
let p = self.p.compare_and_swap(match old {
Some(p) => p,
None => ::std::ptr::null_mut()
}, match new {
Some(p) => p,
None => ::std::ptr::null_mut()
}, order);
if p == ::std::ptr::null_mut() {
None
} else {
Some(p)
}
}
}
}
mod tree {
use atomic::AtomicOption;
use std::fmt;
use std::sync::atomic::{Ordering, Relaxed, SeqCst};
pub struct TreeNode<'a, 'b, T: Sync + 'b> {
data: T,
pub left: TreeBranch<'a, 'b, T>,
parent: Option<&'b TreeNode<'a, 'b, T>>,
}
impl<'a, 'b, T: Sync> TreeNode<'a, 'b, T> {
pub fn new(parent: Option<&'b TreeNode<'a, 'b, T>>, data: T) -> TreeNode<'a, 'b, T> {
TreeNode {left: TreeBranch::new(), parent: parent, data: data }
}
}
impl<'a, 'b, T: fmt::Show + Sync> fmt::Show for TreeNode<'a, 'b, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"TreeNode {{ data: {}, left: {}, root: {} }}",
self.data,
self.left,
self.parent.map_or( "yes", |_| "no"))
}
}
pub struct TreeBranch<'a, 'b, T: Sync + 'b> {
marker: ::std::kinds::marker::InvariantLifetime<'b>,
b: AtomicOption<TreeNode<'a, 'b, T>>
}
impl<'a, 'b, T: Sync> TreeBranch<'a, 'b, T> {
fn new() -> TreeBranch<'a, 'b, T> {
TreeBranch { b: AtomicOption::new(None), marker: ::std::kinds::marker::InvariantLifetime }
}
pub fn get(&self, order: Ordering) -> Option<&TreeNode<'a, 'b, T>> {
self.b.load(order).map( |t| unsafe { &*t } )
}
pub fn set(&'a self, new: &'b TreeNode<'a, 'b, T>) -> Result<(), ()> {
self.b
.compare_and_swap(None, Some(new as *const _ as *mut _), SeqCst)
.map_or(Ok(()), |_| Err(()))
}
}
impl<'a, 'b, T: fmt::Show + Sync> fmt::Show for TreeBranch<'a, 'b, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.get(Relaxed))
}
}
}
pub fn parallel<'a, T: Send, S: 'a>(num_threads: uint, init: |uint| -> &'a S, f: |&'a S|: 'a + Sync -> proc(): 'a -> T) {
let mut threads = Vec::with_capacity(num_threads);
for i in range(0, num_threads) {
let f: proc(): 'a -> T = f(init(i));
let fun: proc(): Send -> T = unsafe { ::std::mem::transmute(f) };
threads.push(Thread::start(fun));
}
}
pub fn main() {
static MAX_THREADS: u16 = 10;
let ref root = TreeNode::new(None, ());
let ref env_arena = TypedArena::new();
parallel(
MAX_THREADS as uint,
|i| env_arena.alloc((i, TypedArena::new())),
|env| proc() -> Result<(),()> {
let (ref i, ref arena) = *env;
println!("{} {}", i, *root);
let left = arena.alloc(TreeNode::new(Some(root), ()));
let _ = root.left.set(left);
Ok(())
}
);
println!("{}", *root);
}
#![feature(unsafe_destructor)]
#![allow(missing_doc)]
use std::cell::{Cell, RefCell, UnsafeCell};
use std::fmt;
use std::intrinsics;
use std::kinds::marker;
use std::mem;
use std::ptr;
use std::rt::heap::{allocate, deallocate};
use std::sync::{Arc, Mutex};
use std::sync::atomic::AtomicBool;
use std::sync::atomics;
#[inline]
fn round_up(base: uint, align: uint) -> uint {
(base.checked_add(&(align - 1))).unwrap() & !(&(align - 1))
}
pub struct SyncArena<'a, T> {
contravariant_lifetime: marker::ContravariantLifetime<'a>,
arena: UnsafeCell<*mut TypedArena<T>>,
arenas: Arc<Mutex<TypedArena<TypedArena<T>>>>,
}
impl<'a, T: Send> SyncArena<'a, T> {
pub fn new() -> SyncArena<'a, T> {
let arenas = TypedArena::new();
SyncArena {
arena: UnsafeCell::new(arenas.alloc(TypedArena::new()) as *mut _),
arenas: Arc::new(Mutex::new(arenas)),
contravariant_lifetime: marker::ContravariantLifetime,
}
}
pub fn alloc(&self, object: T) -> &mut T {
unsafe { (&mut **self.arena.get()).alloc(object) }
}
}
impl<'a, T: Send> Clone for SyncArena<'a, T> {
fn clone(&self) -> SyncArena<'a, T> {
let arena = {
let guard = self.arenas.lock();
guard.alloc(TypedArena::new()) as *mut _
};
println!("Drop?");
SyncArena {
arena: UnsafeCell::new(arena as *mut _),
arenas: self.arenas.clone(),
contravariant_lifetime: marker::ContravariantLifetime,
}
}
}
/// A faster arena that can hold objects of only one type.
///
/// Safety note: Modifying objects in the arena that have already had their
/// `drop` destructors run can cause leaks, because the destructor will not
/// run again for these objects.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
ptr: Cell<*const T>,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
end: Cell<*const T>,
/// A pointer to the first arena segment.
first: RefCell<*mut TypedArenaChunk<T>>,
}
struct TypedArenaChunk<T> {
/// Pointer to the next arena segment.
next: *mut TypedArenaChunk<T>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
fn calculate_size<T>(capacity: uint) -> uint {
let mut size = mem::size_of::<TypedArenaChunk<T>>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(&capacity).unwrap();
size = size.checked_add(&elems_size).unwrap();
size
}
impl<T> TypedArenaChunk<T> {
#[inline]
unsafe fn new(next: *mut TypedArenaChunk<T>, capacity: uint)
-> *mut TypedArenaChunk<T> {
let size = calculate_size::<T>(capacity);
let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>())
as *mut TypedArenaChunk<T>;
(*chunk).next = next;
(*chunk).capacity = capacity;
chunk
}
/// Destroys this arena chunk. If the type descriptor is supplied, the
/// drop glue is called; otherwise, drop glue is not called.
#[inline]
unsafe fn destroy(&mut self, len: uint) {
// Destroy all the allocated objects.
if intrinsics::needs_drop::<T>() {
let mut start = self.start();
for _ in range(0, len) {
ptr::read(start as *const T); // run the destructor on the pointer
start = start.offset(mem::size_of::<T>() as int)
}
}
// Destroy the next chunk.
let next = self.next;
let size = calculate_size::<T>(self.capacity);
deallocate(self as *mut TypedArenaChunk<T> as *mut u8, size,
mem::min_align_of::<TypedArenaChunk<T>>());
if next.is_not_null() {
let capacity = (*next).capacity;
(*next).destroy(capacity);
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&self) -> *const u8 {
let this: *const TypedArenaChunk<T> = self;
unsafe {
mem::transmute(round_up(this.offset(1) as uint,
mem::min_align_of::<T>()))
}
}
// Returns a pointer to the end of the allocated space.
#[inline]
fn end(&self) -> *const u8 {
unsafe {
let size = mem::size_of::<T>().checked_mul(&self.capacity).unwrap();
self.start().offset(size as int)
}
}
}
impl<T> TypedArena<T> {
/// Creates a new `TypedArena` with preallocated space for eight objects.
#[inline]
pub fn new() -> TypedArena<T> {
TypedArena::with_capacity(8)
}
/// Creates a new `TypedArena` with preallocated space for the given number of
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
unsafe {
let chunk = TypedArenaChunk::<T>::new(ptr::null_mut(), capacity);
TypedArena {
ptr: Cell::new((*chunk).start() as *const T),
end: Cell::new((*chunk).end() as *const T),
first: RefCell::new(chunk),
}
}
}
/// Allocates an object in the `TypedArena`, returning a reference to it.
#[inline]
pub fn alloc(&self, object: T) -> &mut T {
if self.ptr == self.end {
self.grow()
}
let ptr = unsafe {
let ptr: &mut T = mem::transmute(self.ptr);
ptr::write(&mut *ptr as *mut T, object);
self.ptr.set(self.ptr.get().offset(1));
ptr
};
ptr
}
/// Grows the arena.
#[inline(never)]
fn grow(&self) {
unsafe {
let chunk = *self.first.borrow_mut();
let new_capacity = (*chunk).capacity.checked_mul(&2).unwrap();
let chunk = TypedArenaChunk::<T>::new(chunk, new_capacity);
self.ptr.set((*chunk).start() as *const T);
self.end.set((*chunk).end() as *const T);
*self.first.borrow_mut() = chunk
}
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
unsafe {
// Determine how much was filled.
let start = self.first.borrow().as_ref().unwrap().start() as uint;
let end = self.ptr.get() as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
(**self.first.borrow_mut()).destroy(diff)
}
}
}
#[cfg(test)]
mod tests {
extern crate test;
use self::test::Bencher;
use super::{Arena, TypedArena};
#[allow(dead_code)]
struct Point {
x: int,
y: int,
z: int,
}
#[test]
pub fn test_copy() {
let arena = TypedArena::new();
for _ in range(0u, 100000) {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
}
}
#[bench]
pub fn bench_copy(b: &mut Bencher) {
let arena = TypedArena::new();
b.iter(|| {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
})
})
}
#[bench]
pub fn bench_copy_nonarena(b: &mut Bencher) {
b.iter(|| {
box Point {
x: 1,
y: 2,
z: 3,
}
})
}
#[bench]
pub fn bench_copy_old_arena(b: &mut Bencher) {
let arena = Arena::new();
b.iter(|| {
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
})
})
}
#[allow(dead_code)]
struct Noncopy {
string: String,
array: Vec<int>,
}
#[test]
pub fn test_noncopy() {
let arena = TypedArena::new();
for _ in range(0u, 100000) {
arena.alloc(Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
});
}
}
#[bench]
pub fn bench_noncopy(b: &mut Bencher) {
let arena = TypedArena::new();
b.iter(|| {
arena.alloc(Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
#[bench]
pub fn bench_noncopy_nonarena(b: &mut Bencher) {
b.iter(|| {
box Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
}
})
}
#[bench]
pub fn bench_noncopy_old_arena(b: &mut Bencher) {
let arena = Arena::new();
b.iter(|| {
arena.alloc(|| Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
}
struct Foo {
v: Vec<AtomicBool>,
}
impl Drop for Foo {
fn drop(&mut self) {
println!("dropped {}", self.v.as_ptr());
}
}
impl fmt::Show for Foo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.v.iter().map( |b| b.load(atomics::Relaxed) ).collect::<Vec<_>>())
}
}
fn main() {
let foo = SyncArena::new();
//let foo = TypedArena::new();
{
let vec = foo.alloc(Foo { v: Vec::new() });
vec.v.push(AtomicBool::new(false));
let vec = &*vec;
let foo_ = foo.clone();
spawn(unsafe { mem::transmute::<proc(): Sync, proc(): Send>(proc() {
let bar = foo_.alloc(Foo { v: Vec::new() });
drop(foo_);
vec.v.last().iter_mut().map( |x| x.store(true, atomics::Relaxed) ).next();
println!("'kay");
println!("{}", vec);
})});
println!("bad {}", vec);
//*vec = Foo { v: Vec::new() };
//let new_vec = Foo { v: Vec::new() };
//vec.clone_from(&new_vec);
//*vec = new_vec;
//drop(vec);
//let vec = &*vec;
println!("good {}", vec);
}
//drop(foo);
println!("really bad");
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment