From 97ed85efc5e9dce896fbbcde277980c0e98d2763 Mon Sep 17 00:00:00 2001 From: Phoebe Goldman Date: Sat, 30 Sep 2023 00:39:23 -0400 Subject: [PATCH] Benchmarks: report tx/s as elements/s (#345) Based on my limited understanding of Criterion, I think we can get add transactions per second reporting to benchmarks using Criterion's Throughput::Elements tracking, by treating transactions as elements. This is not ideal, as the output says e.g. `thrpt: [38.763 Kelem/s 38.781 Kelem/s 38.799 Kelem/s]`, where we'd like `Ktx/s`, but it's not clear whether Criterion allows us that much customization. Each of the benchmarks currently does one tx per iteration, so pass `Throughput::Elements(1)` for each of them. Do this as close to the test as possible, despite redundancy, in case we ever add benchmarks to the same group which do multiple transactions per iteration. --- crates/bench/benches/generic.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/crates/bench/benches/generic.rs b/crates/bench/benches/generic.rs index 931714bcad4..ee32d7273ce 100644 --- a/crates/bench/benches/generic.rs +++ b/crates/bench/benches/generic.rs @@ -148,6 +148,9 @@ fn insert_1( let id = format!("insert_1/{table_params}/load={load}"); let data = create_sequential::(0xdeadbeef, load + 1, 1000); + // Each iteration performs one transaction. + g.throughput(criterion::Throughput::Elements(1)); + g.bench_function(&id, |b| { bench_harness( b, @@ -181,6 +184,9 @@ fn insert_bulk( let id = format!("insert_bulk/{table_params}/load={load}/count={count}"); let data = create_sequential::(0xdeadbeef, load + count, 1000); + // Each iteration performs one transaction, though it inserts many rows. + g.throughput(criterion::Throughput::Elements(1)); + g.bench_function(&id, |b| { bench_harness( b, @@ -217,6 +223,10 @@ fn iterate( db.insert_bulk(table_id, data)?; + // Each iteration performs a single transaction, + // though it iterates across many rows. + g.throughput(criterion::Throughput::Elements(1)); + g.bench_function(&id, |b| { bench_harness( b, @@ -261,6 +271,9 @@ fn filter( db.insert_bulk(&table_id, data.clone())?; + // Each iteration performs a single transaction. + g.throughput(criterion::Throughput::Elements(1)); + // We loop through all buckets found in the sample data. // This mildly increases variance on the benchmark, but makes "mean_result_count" more accurate. // Note that all databases have EXACTLY the same sample data. @@ -308,6 +321,9 @@ fn find( db.insert_bulk(&table_id, data.clone())?; + // Each iteration performs a single transaction. + g.throughput(criterion::Throughput::Elements(1)); + // We loop through all buckets found in the sample data. // This mildly increases variance on the benchmark, but makes "mean_result_count" more accurate. // Note that all benchmarks use exactly the same sample data.