Skip to content

Instantly share code, notes, and snippets.

@AngelicosPhosphoros
Last active December 12, 2020 18:11
Show Gist options
  • Save AngelicosPhosphoros/f9a14cb9d2e9bf1423dcb257e0831e5d to your computer and use it in GitHub Desktop.
Save AngelicosPhosphoros/f9a14cb9d2e9bf1423dcb257e0831e5d to your computer and use it in GitHub Desktop.
Comparison between using itertools crate, indexing and slice iterations
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use itertools::Itertools;
use std::convert::TryInto;
fn gen_numbers(size: u32) -> Vec<u32> {
use rand::prelude::Rng;
let mut rng = rand::thread_rng();
let dist = rand::distributions::Uniform::new_inclusive(1u32, 10_000);
let size: usize = size.try_into().unwrap();
(0..size).map(|_| rng.sample(dist)).collect()
}
// To avoid allocation costs in benchmark
fn make_out_vec(size: u32) -> Vec<u32> {
let size: usize = size.try_into().unwrap();
Vec::with_capacity(size * size)
}
pub fn bench_get_priority(c: &mut Criterion) {
let sizes = [100u32, 200];
let mut group = c.benchmark_group("itertools");
for &size in &sizes {
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
b.iter_batched(
// Allocate everything here
// To avoid allocation costs in benched functions
move || (gen_numbers(size), make_out_vec(size)),
move |(source, mut outvec)| {
let iter = source
.iter()
.copied()
.tuple_combinations::<(u32, u32)>()
.map(|(a, b)| a + b);
outvec.extend(iter);
(outvec, source)
},
criterion::BatchSize::LargeInput,
)
});
}
group.finish();
let mut group = c.benchmark_group("indexing");
for &size in &sizes {
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
b.iter_batched(
// Allocate everything here
// To avoid allocation costs in benched functions
move || (gen_numbers(size), make_out_vec(size)),
move |(source, mut outvec)| {
let source_ref = &source[..];
let iter = (0..source_ref.len()).flat_map(|j| {
((j + 1)..source_ref.len()).map(move |k| source_ref[j] + source_ref[k])
});
outvec.extend(iter);
(outvec, source)
},
criterion::BatchSize::LargeInput,
)
});
}
group.finish();
let mut group = c.benchmark_group("iterators");
for &size in &sizes {
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
b.iter_batched(
// Allocate everything here
// To avoid allocation costs in benched functions
move || (gen_numbers(size), make_out_vec(size)),
move |(source, mut outvec)| {
let iter = source
.iter()
.enumerate()
.flat_map(|(j, &num)| source[j + 1..].iter().map(move |&x| x + num));
outvec.extend(iter);
(outvec, source)
},
criterion::BatchSize::LargeInput,
)
});
}
group.finish();
}
criterion_group!(benches, bench_get_priority);
criterion_main!(benches);
/**
Results
itertools/100 time: [12.781 us 12.805 us 12.831 us]
Found 13 outliers among 100 measurements (13.00%)
13 (13.00%) low severe
itertools/200 time: [48.211 us 48.693 us 49.071 us]
indexing/100 time: [9.9299 us 9.9378 us 9.9467 us]
Found 14 outliers among 100 measurements (14.00%)
1 (1.00%) low mild
1 (1.00%) high mild
12 (12.00%) high severe
indexing/200 time: [39.582 us 39.654 us 39.720 us]
Found 2 outliers among 100 measurements (2.00%)
1 (1.00%) high mild
1 (1.00%) high severe
iterators/100 time: [9.7633 us 9.7809 us 9.8010 us]
Found 1 outliers among 100 measurements (1.00%)
1 (1.00%) high severe
iterators/200 time: [38.732 us 38.785 us 38.840 us]
Found 5 outliers among 100 measurements (5.00%)
3 (3.00%) high mild
2 (2.00%) high severe
*/
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment