Skip to content

Commit

Permalink
Merge pull request #41 from gwy15/get-container-cached
Browse files Browse the repository at this point in the history
Cache some types for `get_container`
  • Loading branch information
droundy authored Oct 28, 2023
2 parents c4f1a3d + 7e2885f commit 11a5bb1
Show file tree
Hide file tree
Showing 5 changed files with 664 additions and 64 deletions.
9 changes: 8 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,11 @@ intern = []
default = ["intern"]

[dev-dependencies]

quickcheck = "^0.9.2"
scaling = "0.1.3"
rand = "0.7.2"
serde_json = "1.0.87"
criterion = { version = "0.4", features = ["html_reports"] }

# [profile.release]
# debug = true
Expand All @@ -65,3 +66,9 @@ rustdoc-args = ["--cfg", "docsrs"]
[[bench]]
name = "bench"
harness = false
required-features = ["bench"]

[[bench]]
name = "get_container"
harness = false
required-features = ["bench"]
84 changes: 84 additions & 0 deletions benches/get_container.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
//! This is a benchmark to demonstrate that cached types (`String`, `str` as of now)
//! are faster than non-cached types because the lack of getting container from a dashmap.
//!
//! The results show a whopping 26% performance gain for short `ArcIntern<String>`.
use criterion::*;
use internment::ArcIntern;

const ITER: usize = 200_000;
const RANGE: usize = 20_000;

#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
struct NewType<T>(T);

fn bench_get_container(c: &mut Criterion) {
let mut group = c.benchmark_group("cached");
// time: [17.635 ms 17.707 ms 17.782 ms]
group.bench_function(BenchmarkId::new("String", "short"), |b| {
b.iter_batched(
|| {},
|_| {
let mut ans = Vec::with_capacity(RANGE);
for idx in 0..ITER {
let s = ArcIntern::<String>::new(format!("short-{}", idx % RANGE));
ans.push(s);
}
},
criterion::BatchSize::PerIteration,
);
});
group.finish();

let mut group = c.benchmark_group("uncached");
// time: [22.209 ms 22.294 ms 22.399 ms] => that's 26% faster!
group.bench_function(BenchmarkId::new("NewType<String>", "short"), |b| {
b.iter_batched(
|| {},
|_| {
let mut ans = Vec::with_capacity(RANGE);
for idx in 0..ITER {
let s = ArcIntern::<NewType<String>>::new(NewType(format!(
"short-{}",
idx % RANGE
)));
ans.push(s);
}
},
criterion::BatchSize::PerIteration,
);
});
// demonstrate that NewType does not affect performance
// time: [8.0247 ms 8.0419 ms 8.0607 ms]
group.bench_function(BenchmarkId::new("usize", "short"), |b| {
b.iter_batched(
|| {},
|_| {
let mut ans = Vec::with_capacity(RANGE);
for idx in 0..ITER {
let s = ArcIntern::<usize>::new(idx % RANGE);
ans.push(s);
}
},
criterion::BatchSize::PerIteration,
);
});
// time: [8.0210 ms 8.0341 ms 8.0485 ms] => no changes! NewType does not affect performance.
group.bench_function(BenchmarkId::new("NewType<usize>", "short"), |b| {
b.iter_batched(
|| {},
|_| {
let mut ans = Vec::with_capacity(RANGE);
for idx in 0..ITER {
let s = ArcIntern::<NewType<usize>>::new(NewType(idx % RANGE));
ans.push(s);
}
},
criterion::BatchSize::PerIteration,
);
});
group.finish();
}

criterion_group!(benches, bench_get_container);
criterion_main!(benches);
Loading

0 comments on commit 11a5bb1

Please sign in to comment.