Skip to content

Instantly share code, notes, and snippets.

@ArtemGr ArtemGr/map_bench.rs
Last active Apr 3, 2017

Embed
What would you like to do?
small strings map benchmark
// [build] cd .. && cargo bench
#![feature(asm, test)]
extern crate inlinable_string;
extern crate ordermap;
extern crate seahash;
extern crate test;
use inlinable_string::{InlinableString, StringExt};
use ordermap::OrderMap;
use std::collections::{BTreeMap, HashMap};
use std::fmt::{self, Write};
use std::hash::BuildHasher;
use test::Bencher;
// NB: As of now `BTreeMap` is using linear page scan and might be slower than hash maps. This ought to change, some day...
const FETCH_MAP_SIZE: usize = 16 * 1024;
/// For seahash maps.
pub struct SeaRandomState;
impl BuildHasher for SeaRandomState {
type Hasher = seahash::SeaHasher;
fn build_hasher(&self) -> seahash::SeaHasher {
seahash::SeaHasher::new()
}
}
struct InlString<'a>(&'a mut InlinableString);
impl<'a> fmt::Write for InlString<'a> {
fn write_char(&mut self, ch: char) -> Result<(), fmt::Error> {
self.0.push(ch);
Ok(())
}
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
self.0.push_str(s);
Ok(())
}
}
/// Time Stamp Counter (number of cycles).
pub fn rdtsc() -> u64 {
// http://stackoverflow.com/a/7617612/257568; https://github.com/gz/rust-x86/blob/master/src/bits64/time.rs
#[allow(unused_mut)]
unsafe {
let mut low: u32;
let mut high: u32;
asm!("rdtsc" : "={eax}" (low), "={edx}" (high));
((high as u64) << 32) | (low as u64)
}
}
#[bench]
fn btreemap_fetch(bencher: &mut Bencher) {
let mut map = BTreeMap::new();
let mut ids = Vec::with_capacity(FETCH_MAP_SIZE);
for _ in 0..FETCH_MAP_SIZE {
let rid = format!("{}", rdtsc() as i32);
ids.push(rid.clone());
map.insert(rid, Box::new(()));
}
bencher.iter(|| {
let ref rid = ids[(rdtsc() as usize % FETCH_MAP_SIZE)];
assert!(map.get(rid).is_some());
})
}
#[bench]
fn hashmap_fetch(bencher: &mut Bencher) {
let mut map = HashMap::new();
let mut ids = Vec::with_capacity(FETCH_MAP_SIZE);
for _ in 0..FETCH_MAP_SIZE {
let rid = format!("{}", rdtsc() as i32);
ids.push(rid.clone());
map.insert(rid, Box::new(()));
}
bencher.iter(|| {
let ref rid = ids[(rdtsc() as usize % FETCH_MAP_SIZE)];
assert!(map.get(rid).is_some());
})
}
#[bench]
fn hashmap_seahash_fetch(bencher: &mut Bencher) {
let mut map = HashMap::with_hasher(SeaRandomState);
let mut ids = Vec::with_capacity(FETCH_MAP_SIZE);
for _ in 0..FETCH_MAP_SIZE {
let rid = format!("{}", rdtsc() as i32);
ids.push(rid.clone());
map.insert(rid, Box::new(()));
}
bencher.iter(|| {
let ref rid = ids[(rdtsc() as usize % FETCH_MAP_SIZE)];
assert!(map.get(rid).is_some());
})
}
#[bench]
fn hashmap_inlinable_fetch(bencher: &mut Bencher) {
let mut map = HashMap::new();
let mut ids = Vec::with_capacity(FETCH_MAP_SIZE);
for _ in 0..FETCH_MAP_SIZE {
let mut rid = InlinableString::new();
write!(&mut InlString(&mut rid), "{}", rdtsc() as i32).unwrap();
ids.push(rid.clone());
map.insert(rid, Box::new(()));
}
bencher.iter(|| {
let ref rid = ids[(rdtsc() as usize % FETCH_MAP_SIZE)];
assert!(map.get(rid).is_some());
})
}
#[bench]
fn ordermap_sea_fetch(bencher: &mut Bencher) {
let mut map = OrderMap::with_capacity_and_hasher(0, SeaRandomState);
let mut ids = Vec::with_capacity(FETCH_MAP_SIZE);
for _ in 0..FETCH_MAP_SIZE {
let rid = format!("{}", rdtsc() as i32);
ids.push(rid.clone());
map.insert(rid, Box::new(()));
}
bencher.iter(|| {
let ref rid = ids[(rdtsc() as usize % FETCH_MAP_SIZE)];
assert!(map.get(rid).is_some());
})
}
#[bench]
fn ordermap_fetch(bencher: &mut Bencher) {
let mut map = OrderMap::new();
let mut ids = Vec::with_capacity(FETCH_MAP_SIZE);
for _ in 0..FETCH_MAP_SIZE {
let rid = format!("{}", rdtsc() as i32);
ids.push(rid.clone());
map.insert(rid, Box::new(()));
}
bencher.iter(|| {
let ref rid = ids[(rdtsc() as usize % FETCH_MAP_SIZE)];
assert!(map.get(rid).is_some());
})
}
#[bench]
fn ordermap_inlinable_sea_fetch(bencher: &mut Bencher) {
let mut map = OrderMap::with_capacity_and_hasher(0, SeaRandomState);
let mut ids = Vec::with_capacity(FETCH_MAP_SIZE);
for _ in 0..FETCH_MAP_SIZE {
let mut rid = InlinableString::new();
write!(&mut InlString(&mut rid), "{}", rdtsc() as i32).unwrap();
ids.push(rid.clone());
map.insert(rid, Box::new(()));
}
bencher.iter(|| {
let ref rid = ids[(rdtsc() as usize % FETCH_MAP_SIZE)];
assert!(map.get(rid).is_some());
})
}
#[bench]
fn ordermap_inlinable_fetch(bencher: &mut Bencher) {
let mut map = OrderMap::new();
let mut ids = Vec::with_capacity(FETCH_MAP_SIZE);
for _ in 0..FETCH_MAP_SIZE {
let mut rid = InlinableString::new();
write!(&mut InlString(&mut rid), "{}", rdtsc() as i32).unwrap();
ids.push(rid.clone());
map.insert(rid, Box::new(()));
}
bencher.iter(|| {
let ref rid = ids[(rdtsc() as usize % FETCH_MAP_SIZE)];
assert!(map.get(rid).is_some());
})
}
#[bench]
fn hashmap_i32_fetch(bencher: &mut Bencher) {
let mut map = HashMap::new();
let mut ids = Vec::with_capacity(FETCH_MAP_SIZE);
for _ in 0..FETCH_MAP_SIZE {
let rid = rdtsc() as i32;
ids.push(rid);
map.insert(rid, Box::new(()));
}
bencher.iter(|| {
let ref rid = ids[(rdtsc() as usize % FETCH_MAP_SIZE)];
assert!(map.get(rid).is_some());
})
}
#[bench]
fn ordermap_i32_fetch(bencher: &mut Bencher) {
let mut map = OrderMap::new();
let mut ids = Vec::with_capacity(FETCH_MAP_SIZE);
for _ in 0..FETCH_MAP_SIZE {
let rid = rdtsc() as i32;
ids.push(rid);
map.insert(rid, Box::new(()));
}
bencher.iter(|| {
let ref rid = ids[(rdtsc() as usize % FETCH_MAP_SIZE)];
assert!(map.get(rid).is_some());
})
}
2017-04-03, Intel(R) Core(TM) i7-6700HQ CPU @ 2.60GHz, Windows 10 native, rustc 1.16.0-nightly (bf6d7b665 2017-01-15), ordermap 0.2.9.
test btreemap_fetch ... bench: 279 ns/iter (+/- 129)
test hashmap_fetch ... bench: 70 ns/iter (+/- 34)
test hashmap_inlinable_fetch ... bench: 63 ns/iter (+/- 18)
test hashmap_seahash_fetch ... bench: 83 ns/iter (+/- 33)
test ordermap_fetch ... bench: 71 ns/iter (+/- 10)
test ordermap_inlinable_fetch ... bench: 66 ns/iter (+/- 21)
test ordermap_inlinable_sea_fetch ... bench: 81 ns/iter (+/- 8)
test ordermap_sea_fetch ... bench: 91 ns/iter (+/- 11)
rustc 1.18.0-nightly (5e122f59b 2017-04-01)
test btreemap_fetch ... bench: 271 ns/iter (+/- 131)
test hashmap_fetch ... bench: 70 ns/iter (+/- 35)
test hashmap_i32_fetch ... bench: 34 ns/iter (+/- 4)
test hashmap_inlinable_fetch ... bench: 60 ns/iter (+/- 5)
test hashmap_seahash_fetch ... bench: 80 ns/iter (+/- 10)
test ordermap_fetch ... bench: 71 ns/iter (+/- 5)
test ordermap_i32_fetch ... bench: 36 ns/iter (+/- 3)
test ordermap_inlinable_fetch ... bench: 64 ns/iter (+/- 19)
test ordermap_inlinable_sea_fetch ... bench: 80 ns/iter (+/- 6)
test ordermap_sea_fetch ... bench: 88 ns/iter (+/- 12)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.