From 4aa888206ae58497a3fe845b5dae3fbb3a3e724d Mon Sep 17 00:00:00 2001 From: Roman Lebedev Date: Thu, 15 Feb 2024 00:19:01 +0300 Subject: [PATCH 1/4] Rewrite complexity_test to use (hardcoded) manual time This test is fundamentally flaky, because it tried to read tea leafs, and is inherently misbehaving in CI environments, since there are unmitigated sources of noise. That being said, the computed Big-O also depends on the `--benchmark_min_time=` Fixes https://github.com/google/benchmark/issues/272 --- test/complexity_test.cc | 139 +++++++++++++++++++++++----------------- 1 file changed, 79 insertions(+), 60 deletions(-) diff --git a/test/complexity_test.cc b/test/complexity_test.cc index 1c746afb43..03e71472dc 100644 --- a/test/complexity_test.cc +++ b/test/complexity_test.cc @@ -69,35 +69,38 @@ int AddComplexityTest(const std::string &test_name, void BM_Complexity_O1(benchmark::State &state) { for (auto _ : state) { - for (int i = 0; i < 1024; ++i) { - benchmark::DoNotOptimize(i); - } + // This test requires a non-zero CPU time to avoid divide-by-zero + auto iterations = double(state.iterations()) * double(state.iterations()); + benchmark::DoNotOptimize(iterations); + + // always 1ns per iteration + state.SetIterationTime(42 * 1e-9); } state.SetComplexityN(state.range(0)); } -BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1); -BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(); BENCHMARK(BM_Complexity_O1) ->Range(1, 1 << 18) + ->UseManualTime() + ->Complexity(benchmark::o1); +BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->UseManualTime()->Complexity(); +BENCHMARK(BM_Complexity_O1) + ->Range(1, 1 << 18) + ->UseManualTime() ->Complexity([](benchmark::IterationCount) { return 1.0; }); -const char *one_test_name = "BM_Complexity_O1"; -const char *big_o_1_test_name = "BM_Complexity_O1_BigO"; -const char *rms_o_1_test_name = "BM_Complexity_O1_RMS"; -const char *enum_big_o_1 = "\\([0-9]+\\)"; -// FIXME: Tolerate both '(1)' and 'lgN' as output when the complexity is auto -// deduced. -// See https://github.com/google/benchmark/issues/272 -const char *auto_big_o_1 = "(\\([0-9]+\\))|(lgN)|(N\\^2)"; +const char *one_test_name = "BM_Complexity_O1/manual_time"; +const char *big_o_1_test_name = "BM_Complexity_O1/manual_time_BigO"; +const char *rms_o_1_test_name = "BM_Complexity_O1/manual_time_RMS"; +const char *enum_auto_big_o_1 = "\\([0-9]+\\)"; const char *lambda_big_o_1 = "f\\(N\\)"; // Add enum tests ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, - enum_big_o_1, /*family_index=*/0); + enum_auto_big_o_1, /*family_index=*/0); -// Add auto enum tests +// Add auto tests ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, - auto_big_o_1, /*family_index=*/1); + enum_auto_big_o_1, /*family_index=*/1); // Add lambda tests ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, @@ -107,43 +110,38 @@ ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, // --------------------------- Testing BigO O(N) --------------------------- // // ========================================================================= // -std::vector ConstructRandomVector(int64_t size) { - std::vector v; - v.reserve(static_cast(size)); - for (int i = 0; i < size; ++i) { - v.push_back(static_cast(std::rand() % size)); - } - return v; -} - void BM_Complexity_O_N(benchmark::State &state) { - auto v = ConstructRandomVector(state.range(0)); - // Test worst case scenario (item not in vector) - const int64_t item_not_in_vector = state.range(0) * 2; for (auto _ : state) { - auto it = std::find(v.begin(), v.end(), item_not_in_vector); - benchmark::DoNotOptimize(it); + // This test requires a non-zero CPU time to avoid divide-by-zero + auto iterations = double(state.iterations()) * double(state.iterations()); + benchmark::DoNotOptimize(iterations); + + // 1ns per iteration per entry + state.SetIterationTime(state.range(0) * 42 * 1e-9); } state.SetComplexityN(state.range(0)); } BENCHMARK(BM_Complexity_O_N) ->RangeMultiplier(2) - ->Range(1 << 10, 1 << 16) + ->Range(1 << 10, 1 << 20) + ->UseManualTime() ->Complexity(benchmark::oN); BENCHMARK(BM_Complexity_O_N) ->RangeMultiplier(2) - ->Range(1 << 10, 1 << 16) + ->Range(1 << 10, 1 << 20) + ->UseManualTime() + ->Complexity(); +BENCHMARK(BM_Complexity_O_N) + ->RangeMultiplier(2) + ->Range(1 << 10, 1 << 20) + ->UseManualTime() ->Complexity([](benchmark::IterationCount n) -> double { return static_cast(n); }); -BENCHMARK(BM_Complexity_O_N) - ->RangeMultiplier(2) - ->Range(1 << 10, 1 << 16) - ->Complexity(); -const char *n_test_name = "BM_Complexity_O_N"; -const char *big_o_n_test_name = "BM_Complexity_O_N_BigO"; -const char *rms_o_n_test_name = "BM_Complexity_O_N_RMS"; +const char *n_test_name = "BM_Complexity_O_N/manual_time"; +const char *big_o_n_test_name = "BM_Complexity_O_N/manual_time_BigO"; +const char *rms_o_n_test_name = "BM_Complexity_O_N/manual_time_RMS"; const char *enum_auto_big_o_n = "N"; const char *lambda_big_o_n = "f\\(N\\)"; @@ -151,53 +149,70 @@ const char *lambda_big_o_n = "f\\(N\\)"; ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, enum_auto_big_o_n, /*family_index=*/3); +// Add auto tests +ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, + enum_auto_big_o_n, /*family_index=*/4); + // Add lambda tests ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, - lambda_big_o_n, /*family_index=*/4); + lambda_big_o_n, /*family_index=*/5); // ========================================================================= // -// ------------------------- Testing BigO O(N*lgN) ------------------------- // +// ------------------------- Testing BigO O(NlgN) ------------------------- // // ========================================================================= // +static const double kLog2E = 1.44269504088896340736; static void BM_Complexity_O_N_log_N(benchmark::State &state) { - auto v = ConstructRandomVector(state.range(0)); for (auto _ : state) { - std::sort(v.begin(), v.end()); + // This test requires a non-zero CPU time to avoid divide-by-zero + auto iterations = double(state.iterations()) * double(state.iterations()); + benchmark::DoNotOptimize(iterations); + + state.SetIterationTime(state.range(0) * kLog2E * std::log(state.range(0)) * + 42 * 1e-9); } state.SetComplexityN(state.range(0)); } -static const double kLog2E = 1.44269504088896340736; BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) - ->Range(1 << 10, 1 << 16) + ->Range(1 << 10, 1U << 24) + ->UseManualTime() ->Complexity(benchmark::oNLogN); BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) - ->Range(1 << 10, 1 << 16) + ->Range(1 << 10, 1U << 24) + ->UseManualTime() + ->Complexity(); +BENCHMARK(BM_Complexity_O_N_log_N) + ->RangeMultiplier(2) + ->Range(1 << 10, 1U << 24) + ->UseManualTime() ->Complexity([](benchmark::IterationCount n) { return kLog2E * static_cast(n) * std::log(static_cast(n)); }); -BENCHMARK(BM_Complexity_O_N_log_N) - ->RangeMultiplier(2) - ->Range(1 << 10, 1 << 16) - ->Complexity(); -const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N"; -const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO"; -const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS"; -const char *enum_auto_big_o_n_lg_n = "NlgN"; +const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N/manual_time"; +const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N/manual_time_BigO"; +const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N/manual_time_RMS"; +const char *enum_big_o_n_lg_n = "NlgN"; +const char *auto_big_o_n_lg_n = "lgN"; const char *lambda_big_o_n_lg_n = "f\\(N\\)"; // Add enum tests ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, - rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n, + rms_o_n_lg_n_test_name, enum_big_o_n_lg_n, /*family_index=*/6); -// Add lambda tests +// NOTE: auto big-o is wron.g ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, - rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n, + rms_o_n_lg_n_test_name, auto_big_o_n_lg_n, /*family_index=*/7); +//// Add lambda tests +ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, + rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n, + /*family_index=*/8); + // ========================================================================= // // -------- Testing formatting of Complexity with captured args ------------ // // ========================================================================= // @@ -205,21 +220,25 @@ ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, void BM_ComplexityCaptureArgs(benchmark::State &state, int n) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); + + state.SetIterationTime(state.range(0) * 42 * 1e-9); } state.SetComplexityN(n); } BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100) + ->UseManualTime() ->Complexity(benchmark::oN) ->Ranges({{1, 2}, {3, 4}}); const std::string complexity_capture_name = - "BM_ComplexityCaptureArgs/capture_test"; + "BM_ComplexityCaptureArgs/capture_test/manual_time"; ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO", - complexity_capture_name + "_RMS", "N", /*family_index=*/9); + complexity_capture_name + "_RMS", "N", + /*family_index=*/9); // ========================================================================= // // --------------------------- TEST CASES END ------------------------------ // From e703685fd357f4c90ac67c884ec317025082e84b Mon Sep 17 00:00:00 2001 From: Roman Lebedev Date: Sat, 17 Feb 2024 05:27:48 +0300 Subject: [PATCH 2/4] Correctly compute Big-O for manual timings. Fixes #1758. --- include/benchmark/benchmark.h | 5 +++++ src/benchmark_runner.cc | 1 + src/complexity.cc | 15 +++++++++++++-- test/complexity_test.cc | 7 +++---- 4 files changed, 22 insertions(+), 6 deletions(-) diff --git a/include/benchmark/benchmark.h b/include/benchmark/benchmark.h index c9c1c4bab1..08cfe29da3 100644 --- a/include/benchmark/benchmark.h +++ b/include/benchmark/benchmark.h @@ -1792,6 +1792,7 @@ class BENCHMARK_EXPORT BenchmarkReporter { real_accumulated_time(0), cpu_accumulated_time(0), max_heapbytes_used(0), + use_real_time_for_initial_big_o(false), complexity(oNone), complexity_lambda(), complexity_n(0), @@ -1834,6 +1835,10 @@ class BENCHMARK_EXPORT BenchmarkReporter { // This is set to 0.0 if memory tracing is not enabled. double max_heapbytes_used; + // By default Big-O is computed for CPU time, but that is not what you want + // to happen when manual time was requested, which is stored as real time. + bool use_real_time_for_initial_big_o; + // Keep track of arguments to compute asymptotic complexity BigO complexity; BigOFunc* complexity_lambda; diff --git a/src/benchmark_runner.cc b/src/benchmark_runner.cc index d35bc30d49..dcddb437e3 100644 --- a/src/benchmark_runner.cc +++ b/src/benchmark_runner.cc @@ -96,6 +96,7 @@ BenchmarkReporter::Run CreateRunReport( } else { report.real_accumulated_time = results.real_time_used; } + report.use_real_time_for_initial_big_o = b.use_manual_time(); report.cpu_accumulated_time = results.cpu_time_used; report.complexity_n = results.complexity_n; report.complexity = b.complexity(); diff --git a/src/complexity.cc b/src/complexity.cc index e53dd342d1..eee3122646 100644 --- a/src/complexity.cc +++ b/src/complexity.cc @@ -186,8 +186,19 @@ std::vector ComputeBigO( result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda); result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda); } else { - result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity); - result_real = MinimalLeastSq(n, real_time, result_cpu.complexity); + const BigO* InitialBigO = &reports[0].complexity; + const bool use_real_time_for_initial_big_o = + reports[0].use_real_time_for_initial_big_o; + if (use_real_time_for_initial_big_o) { + result_real = MinimalLeastSq(n, real_time, *InitialBigO); + InitialBigO = &result_real.complexity; + // The Big-O complexity for CPU time must have the same Big-O function! + } + result_cpu = MinimalLeastSq(n, cpu_time, *InitialBigO); + InitialBigO = &result_cpu.complexity; + if (!use_real_time_for_initial_big_o) { + result_real = MinimalLeastSq(n, real_time, *InitialBigO); + } } // Drop the 'args' when reporting complexity. diff --git a/test/complexity_test.cc b/test/complexity_test.cc index 03e71472dc..a28fc41647 100644 --- a/test/complexity_test.cc +++ b/test/complexity_test.cc @@ -194,18 +194,17 @@ BENCHMARK(BM_Complexity_O_N_log_N) const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N/manual_time"; const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N/manual_time_BigO"; const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N/manual_time_RMS"; -const char *enum_big_o_n_lg_n = "NlgN"; -const char *auto_big_o_n_lg_n = "lgN"; +const char *enum_auto_big_o_n_lg_n = "NlgN"; const char *lambda_big_o_n_lg_n = "f\\(N\\)"; // Add enum tests ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, - rms_o_n_lg_n_test_name, enum_big_o_n_lg_n, + rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n, /*family_index=*/6); // NOTE: auto big-o is wron.g ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, - rms_o_n_lg_n_test_name, auto_big_o_n_lg_n, + rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n, /*family_index=*/7); //// Add lambda tests From 13e8159edffef8388928e19f47b96d929f9f8d9e Mon Sep 17 00:00:00 2001 From: Roman Lebedev Date: Sat, 17 Feb 2024 05:58:35 +0300 Subject: [PATCH 3/4] complexity_test: do more stuff in empty loop --- test/BUILD | 1 + test/CMakeLists.txt | 8 +------- test/complexity_test.cc | 40 ++++++++++++++++++++++++++++++++-------- 3 files changed, 34 insertions(+), 15 deletions(-) diff --git a/test/BUILD b/test/BUILD index 22b7dba4b9..e43b802350 100644 --- a/test/BUILD +++ b/test/BUILD @@ -35,6 +35,7 @@ PER_SRC_TEST_ARGS = { "repetitions_test.cc": [" --benchmark_repetitions=3"], "spec_arg_test.cc": ["--benchmark_filter=BM_NotChosen"], "spec_arg_verbosity_test.cc": ["--v=42"], + "complexity_test.cc": ["--benchmark_min_time=1000000x"], } cc_library( diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index eb7137efcc..1de175f98d 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -218,14 +218,8 @@ if(NOT (MSVC OR CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC")) benchmark_add_test(NAME cxx03 COMMAND cxx03_test --benchmark_min_time=0.01s) endif() -# Attempt to work around flaky test failures when running on Appveyor servers. -if (DEFINED ENV{APPVEYOR}) - set(COMPLEXITY_MIN_TIME "0.5s") -else() - set(COMPLEXITY_MIN_TIME "0.01s") -endif() compile_output_test(complexity_test) -benchmark_add_test(NAME complexity_benchmark COMMAND complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME}) +benchmark_add_test(NAME complexity_benchmark COMMAND complexity_test --benchmark_min_time=1000000x) ############################################################################### # GoogleTest Unit Tests diff --git a/test/complexity_test.cc b/test/complexity_test.cc index a28fc41647..0c159cd27d 100644 --- a/test/complexity_test.cc +++ b/test/complexity_test.cc @@ -70,8 +70,14 @@ int AddComplexityTest(const std::string &test_name, void BM_Complexity_O1(benchmark::State &state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = double(state.iterations()) * double(state.iterations()); - benchmark::DoNotOptimize(iterations); + benchmark::DoNotOptimize(state.iterations()); + double tmp = state.iterations(); + benchmark::DoNotOptimize(tmp); + for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) { + benchmark::DoNotOptimize(state.iterations()); + tmp *= state.iterations(); + benchmark::DoNotOptimize(tmp); + } // always 1ns per iteration state.SetIterationTime(42 * 1e-9); @@ -113,8 +119,14 @@ ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, void BM_Complexity_O_N(benchmark::State &state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = double(state.iterations()) * double(state.iterations()); - benchmark::DoNotOptimize(iterations); + benchmark::DoNotOptimize(state.iterations()); + double tmp = state.iterations(); + benchmark::DoNotOptimize(tmp); + for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) { + benchmark::DoNotOptimize(state.iterations()); + tmp *= state.iterations(); + benchmark::DoNotOptimize(tmp); + } // 1ns per iteration per entry state.SetIterationTime(state.range(0) * 42 * 1e-9); @@ -165,8 +177,14 @@ static const double kLog2E = 1.44269504088896340736; static void BM_Complexity_O_N_log_N(benchmark::State &state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = double(state.iterations()) * double(state.iterations()); - benchmark::DoNotOptimize(iterations); + benchmark::DoNotOptimize(state.iterations()); + double tmp = state.iterations(); + benchmark::DoNotOptimize(tmp); + for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) { + benchmark::DoNotOptimize(state.iterations()); + tmp *= state.iterations(); + benchmark::DoNotOptimize(tmp); + } state.SetIterationTime(state.range(0) * kLog2E * std::log(state.range(0)) * 42 * 1e-9); @@ -219,8 +237,14 @@ ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, void BM_ComplexityCaptureArgs(benchmark::State &state, int n) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = double(state.iterations()) * double(state.iterations()); - benchmark::DoNotOptimize(iterations); + benchmark::DoNotOptimize(state.iterations()); + double tmp = state.iterations(); + benchmark::DoNotOptimize(tmp); + for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) { + benchmark::DoNotOptimize(state.iterations()); + tmp *= state.iterations(); + benchmark::DoNotOptimize(tmp); + } state.SetIterationTime(state.range(0) * 42 * 1e-9); } From 8f64ed25036e912b3b84786a90f6f8b77751e7e6 Mon Sep 17 00:00:00 2001 From: Roman Lebedev Date: Sat, 17 Feb 2024 06:49:25 +0300 Subject: [PATCH 4/4] Make all empty loops be a bit longer empty Looks like on windows, some of these tests still fail, i guess clock precision is too small. --- test/basic_test.cc | 2 +- test/diagnostics_test.cc | 4 ++-- test/link_main_test.cc | 2 +- test/memory_manager_test.cc | 2 +- test/perf_counters_test.cc | 2 +- test/reporter_output_test.cc | 6 +++--- test/skip_with_error_test.cc | 2 +- test/user_counters_tabular_test.cc | 2 +- test/user_counters_test.cc | 14 +++++++------- 9 files changed, 18 insertions(+), 18 deletions(-) diff --git a/test/basic_test.cc b/test/basic_test.cc index cba1b0f992..c25bec7ddd 100644 --- a/test/basic_test.cc +++ b/test/basic_test.cc @@ -5,7 +5,7 @@ void BM_empty(benchmark::State& state) { for (auto _ : state) { - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } } diff --git a/test/diagnostics_test.cc b/test/diagnostics_test.cc index 0cd3edbd42..7c68a98929 100644 --- a/test/diagnostics_test.cc +++ b/test/diagnostics_test.cc @@ -49,7 +49,7 @@ void BM_diagnostic_test(benchmark::State& state) { if (called_once == false) try_invalid_pause_resume(state); for (auto _ : state) { - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } @@ -65,7 +65,7 @@ void BM_diagnostic_test_keep_running(benchmark::State& state) { if (called_once == false) try_invalid_pause_resume(state); while (state.KeepRunning()) { - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } diff --git a/test/link_main_test.cc b/test/link_main_test.cc index e806500a9a..131937eebc 100644 --- a/test/link_main_test.cc +++ b/test/link_main_test.cc @@ -2,7 +2,7 @@ void BM_empty(benchmark::State& state) { for (auto _ : state) { - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } } diff --git a/test/memory_manager_test.cc b/test/memory_manager_test.cc index d94bd5161b..4df674d586 100644 --- a/test/memory_manager_test.cc +++ b/test/memory_manager_test.cc @@ -14,7 +14,7 @@ class TestMemoryManager : public benchmark::MemoryManager { void BM_empty(benchmark::State& state) { for (auto _ : state) { - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } } diff --git a/test/perf_counters_test.cc b/test/perf_counters_test.cc index b0a3ab0619..3cc593e629 100644 --- a/test/perf_counters_test.cc +++ b/test/perf_counters_test.cc @@ -14,7 +14,7 @@ BM_DECLARE_string(benchmark_perf_counters); static void BM_Simple(benchmark::State& state) { for (auto _ : state) { - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } } diff --git a/test/reporter_output_test.cc b/test/reporter_output_test.cc index ea5381d20b..7867165d1f 100644 --- a/test/reporter_output_test.cc +++ b/test/reporter_output_test.cc @@ -96,7 +96,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}}); void BM_bytes_per_second(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } state.SetBytesProcessed(1); @@ -128,7 +128,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}}); void BM_items_per_second(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } state.SetItemsProcessed(1); @@ -409,7 +409,7 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"}, void BM_Complexity_O1(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } state.SetComplexityN(state.range(0)); diff --git a/test/skip_with_error_test.cc b/test/skip_with_error_test.cc index b4c5e154c4..2139a19e25 100644 --- a/test/skip_with_error_test.cc +++ b/test/skip_with_error_test.cc @@ -143,7 +143,7 @@ ADD_CASES("BM_error_during_running_ranged_for", void BM_error_after_running(benchmark::State& state) { for (auto _ : state) { - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } if (state.thread_index() <= (state.threads() / 2)) diff --git a/test/user_counters_tabular_test.cc b/test/user_counters_tabular_test.cc index ffd3c0992c..cfc1ab069c 100644 --- a/test/user_counters_tabular_test.cc +++ b/test/user_counters_tabular_test.cc @@ -64,7 +64,7 @@ ADD_CASES(TC_CSVOut, {{"%csv_header," void BM_Counters_Tabular(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; diff --git a/test/user_counters_test.cc b/test/user_counters_test.cc index 4cd8ee3739..22252acbf6 100644 --- a/test/user_counters_test.cc +++ b/test/user_counters_test.cc @@ -67,7 +67,7 @@ int num_calls1 = 0; void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } state.counters["foo"] = 1; @@ -119,7 +119,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec", void BM_Counters_Rate(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; @@ -163,7 +163,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate); void BM_Invert(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; @@ -204,7 +204,7 @@ CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert); void BM_Counters_InvertedRate(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; @@ -333,7 +333,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int", void BM_Counters_AvgThreadsRate(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; @@ -421,7 +421,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant", void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; @@ -513,7 +513,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations); void BM_Counters_kAvgIterationsRate(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } namespace bm = benchmark;