Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rewrite complexity_test to use (hardcoded) manual time #1757

Merged
merged 4 commits into from
Feb 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions include/benchmark/benchmark.h
Original file line number Diff line number Diff line change
Expand Up @@ -1792,6 +1792,7 @@ class BENCHMARK_EXPORT BenchmarkReporter {
real_accumulated_time(0),
cpu_accumulated_time(0),
max_heapbytes_used(0),
use_real_time_for_initial_big_o(false),
complexity(oNone),
complexity_lambda(),
complexity_n(0),
Expand Down Expand Up @@ -1834,6 +1835,10 @@ class BENCHMARK_EXPORT BenchmarkReporter {
// This is set to 0.0 if memory tracing is not enabled.
double max_heapbytes_used;

// By default Big-O is computed for CPU time, but that is not what you want
// to happen when manual time was requested, which is stored as real time.
bool use_real_time_for_initial_big_o;

// Keep track of arguments to compute asymptotic complexity
BigO complexity;
BigOFunc* complexity_lambda;
Expand Down
1 change: 1 addition & 0 deletions src/benchmark_runner.cc
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ BenchmarkReporter::Run CreateRunReport(
} else {
report.real_accumulated_time = results.real_time_used;
}
report.use_real_time_for_initial_big_o = b.use_manual_time();
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

to check this logic:

if we're using manual time, we'll report manual time as "real" but then use "real" time for bigO.
if we're not using manual time, then we'll report real time as "real" but use "cpu" time for bigO.

maybe this could be a comment somewhere just to track the interaction between manual, real, and cpu times for bigO.

report.cpu_accumulated_time = results.cpu_time_used;
report.complexity_n = results.complexity_n;
report.complexity = b.complexity();
Expand Down
15 changes: 13 additions & 2 deletions src/complexity.cc
Original file line number Diff line number Diff line change
Expand Up @@ -186,8 +186,19 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda);
result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda);
} else {
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
const BigO* InitialBigO = &reports[0].complexity;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this section needs some comments. maybe there's a way to simplify the logic?

const bool use_real_time_for_initial_big_o =
reports[0].use_real_time_for_initial_big_o;
if (use_real_time_for_initial_big_o) {
result_real = MinimalLeastSq(n, real_time, *InitialBigO);
InitialBigO = &result_real.complexity;
// The Big-O complexity for CPU time must have the same Big-O function!
}
result_cpu = MinimalLeastSq(n, cpu_time, *InitialBigO);
InitialBigO = &result_cpu.complexity;
if (!use_real_time_for_initial_big_o) {
result_real = MinimalLeastSq(n, real_time, *InitialBigO);
}
}

// Drop the 'args' when reporting complexity.
Expand Down
1 change: 1 addition & 0 deletions test/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ PER_SRC_TEST_ARGS = {
"repetitions_test.cc": [" --benchmark_repetitions=3"],
"spec_arg_test.cc": ["--benchmark_filter=BM_NotChosen"],
"spec_arg_verbosity_test.cc": ["--v=42"],
"complexity_test.cc": ["--benchmark_min_time=1000000x"],
}

cc_library(
Expand Down
8 changes: 1 addition & 7 deletions test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -218,14 +218,8 @@ if(NOT (MSVC OR CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC"))
benchmark_add_test(NAME cxx03 COMMAND cxx03_test --benchmark_min_time=0.01s)
endif()

# Attempt to work around flaky test failures when running on Appveyor servers.
if (DEFINED ENV{APPVEYOR})
set(COMPLEXITY_MIN_TIME "0.5s")
else()
set(COMPLEXITY_MIN_TIME "0.01s")
endif()
compile_output_test(complexity_test)
benchmark_add_test(NAME complexity_benchmark COMMAND complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME})
benchmark_add_test(NAME complexity_benchmark COMMAND complexity_test --benchmark_min_time=1000000x)

###############################################################################
# GoogleTest Unit Tests
Expand Down
2 changes: 1 addition & 1 deletion test/basic_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

void BM_empty(benchmark::State& state) {
for (auto _ : state) {
auto iterations = state.iterations();
auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
}
Expand Down
158 changes: 100 additions & 58 deletions test/complexity_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -69,35 +69,44 @@ int AddComplexityTest(const std::string &test_name,

void BM_Complexity_O1(benchmark::State &state) {
for (auto _ : state) {
for (int i = 0; i < 1024; ++i) {
benchmark::DoNotOptimize(i);
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
double tmp = state.iterations();
benchmark::DoNotOptimize(tmp);
for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) {
benchmark::DoNotOptimize(state.iterations());
tmp *= state.iterations();
benchmark::DoNotOptimize(tmp);
}

// always 1ns per iteration
state.SetIterationTime(42 * 1e-9);
}
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity();
BENCHMARK(BM_Complexity_O1)
->Range(1, 1 << 18)
->UseManualTime()
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

does this mean complexity only works for manual time? do we have any tests where we use complexity with real time?

->Complexity(benchmark::o1);
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->UseManualTime()->Complexity();
BENCHMARK(BM_Complexity_O1)
->Range(1, 1 << 18)
->UseManualTime()
->Complexity([](benchmark::IterationCount) { return 1.0; });

const char *one_test_name = "BM_Complexity_O1";
const char *big_o_1_test_name = "BM_Complexity_O1_BigO";
const char *rms_o_1_test_name = "BM_Complexity_O1_RMS";
const char *enum_big_o_1 = "\\([0-9]+\\)";
// FIXME: Tolerate both '(1)' and 'lgN' as output when the complexity is auto
// deduced.
// See https://github.com/google/benchmark/issues/272
const char *auto_big_o_1 = "(\\([0-9]+\\))|(lgN)|(N\\^2)";
const char *one_test_name = "BM_Complexity_O1/manual_time";
const char *big_o_1_test_name = "BM_Complexity_O1/manual_time_BigO";
const char *rms_o_1_test_name = "BM_Complexity_O1/manual_time_RMS";
const char *enum_auto_big_o_1 = "\\([0-9]+\\)";
const char *lambda_big_o_1 = "f\\(N\\)";

// Add enum tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
enum_big_o_1, /*family_index=*/0);
enum_auto_big_o_1, /*family_index=*/0);

// Add auto enum tests
// Add auto tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
auto_big_o_1, /*family_index=*/1);
enum_auto_big_o_1, /*family_index=*/1);

// Add lambda tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
Expand All @@ -107,84 +116,102 @@ ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
// --------------------------- Testing BigO O(N) --------------------------- //
// ========================================================================= //

std::vector<int> ConstructRandomVector(int64_t size) {
std::vector<int> v;
v.reserve(static_cast<size_t>(size));
for (int i = 0; i < size; ++i) {
v.push_back(static_cast<int>(std::rand() % size));
}
return v;
}

void BM_Complexity_O_N(benchmark::State &state) {
auto v = ConstructRandomVector(state.range(0));
// Test worst case scenario (item not in vector)
const int64_t item_not_in_vector = state.range(0) * 2;
for (auto _ : state) {
auto it = std::find(v.begin(), v.end(), item_not_in_vector);
benchmark::DoNotOptimize(it);
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
double tmp = state.iterations();
benchmark::DoNotOptimize(tmp);
for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) {
benchmark::DoNotOptimize(state.iterations());
tmp *= state.iterations();
benchmark::DoNotOptimize(tmp);
}

// 1ns per iteration per entry
state.SetIterationTime(state.range(0) * 42 * 1e-9);
}
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Range(1 << 10, 1 << 20)
->UseManualTime()
->Complexity(benchmark::oN);
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Range(1 << 10, 1 << 20)
->UseManualTime()
->Complexity();
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 20)
->UseManualTime()
->Complexity([](benchmark::IterationCount n) -> double {
return static_cast<double>(n);
});
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity();

const char *n_test_name = "BM_Complexity_O_N";
const char *big_o_n_test_name = "BM_Complexity_O_N_BigO";
const char *rms_o_n_test_name = "BM_Complexity_O_N_RMS";
const char *n_test_name = "BM_Complexity_O_N/manual_time";
const char *big_o_n_test_name = "BM_Complexity_O_N/manual_time_BigO";
const char *rms_o_n_test_name = "BM_Complexity_O_N/manual_time_RMS";
const char *enum_auto_big_o_n = "N";
const char *lambda_big_o_n = "f\\(N\\)";

// Add enum tests
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
enum_auto_big_o_n, /*family_index=*/3);

// Add auto tests
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
enum_auto_big_o_n, /*family_index=*/4);

// Add lambda tests
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
lambda_big_o_n, /*family_index=*/4);
lambda_big_o_n, /*family_index=*/5);

// ========================================================================= //
// ------------------------- Testing BigO O(N*lgN) ------------------------- //
// ------------------------- Testing BigO O(NlgN) ------------------------- //
// ========================================================================= //

static const double kLog2E = 1.44269504088896340736;
static void BM_Complexity_O_N_log_N(benchmark::State &state) {
auto v = ConstructRandomVector(state.range(0));
for (auto _ : state) {
std::sort(v.begin(), v.end());
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
double tmp = state.iterations();
benchmark::DoNotOptimize(tmp);
for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) {
benchmark::DoNotOptimize(state.iterations());
tmp *= state.iterations();
benchmark::DoNotOptimize(tmp);
}

state.SetIterationTime(state.range(0) * kLog2E * std::log(state.range(0)) *
42 * 1e-9);
}
state.SetComplexityN(state.range(0));
}
static const double kLog2E = 1.44269504088896340736;
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Range(1 << 10, 1U << 24)
->UseManualTime()
->Complexity(benchmark::oNLogN);
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Range(1 << 10, 1U << 24)
->UseManualTime()
->Complexity();
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1U << 24)
->UseManualTime()
->Complexity([](benchmark::IterationCount n) {
return kLog2E * static_cast<double>(n) * std::log(static_cast<double>(n));
});
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity();

const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N";
const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO";
const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS";
const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N/manual_time";
const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N/manual_time_BigO";
const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N/manual_time_RMS";
const char *enum_auto_big_o_n_lg_n = "NlgN";
const char *lambda_big_o_n_lg_n = "f\\(N\\)";

Expand All @@ -193,33 +220,48 @@ ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n,
/*family_index=*/6);

// Add lambda tests
// NOTE: auto big-o is wron.g
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n,
rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n,
/*family_index=*/7);

//// Add lambda tests
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n,
/*family_index=*/8);

// ========================================================================= //
// -------- Testing formatting of Complexity with captured args ------------ //
// ========================================================================= //

void BM_ComplexityCaptureArgs(benchmark::State &state, int n) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
auto iterations = state.iterations();
benchmark::DoNotOptimize(iterations);
benchmark::DoNotOptimize(state.iterations());
double tmp = state.iterations();
benchmark::DoNotOptimize(tmp);
for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) {
benchmark::DoNotOptimize(state.iterations());
tmp *= state.iterations();
benchmark::DoNotOptimize(tmp);
}

state.SetIterationTime(state.range(0) * 42 * 1e-9);
}
state.SetComplexityN(n);
}

BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100)
->UseManualTime()
->Complexity(benchmark::oN)
->Ranges({{1, 2}, {3, 4}});

const std::string complexity_capture_name =
"BM_ComplexityCaptureArgs/capture_test";
"BM_ComplexityCaptureArgs/capture_test/manual_time";

ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO",
complexity_capture_name + "_RMS", "N", /*family_index=*/9);
complexity_capture_name + "_RMS", "N",
/*family_index=*/9);

// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
Expand Down
4 changes: 2 additions & 2 deletions test/diagnostics_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ void BM_diagnostic_test(benchmark::State& state) {
if (called_once == false) try_invalid_pause_resume(state);

for (auto _ : state) {
auto iterations = state.iterations();
auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}

Expand All @@ -65,7 +65,7 @@ void BM_diagnostic_test_keep_running(benchmark::State& state) {
if (called_once == false) try_invalid_pause_resume(state);

while (state.KeepRunning()) {
auto iterations = state.iterations();
auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}

Expand Down
2 changes: 1 addition & 1 deletion test/link_main_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

void BM_empty(benchmark::State& state) {
for (auto _ : state) {
auto iterations = state.iterations();
auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
}
Expand Down
2 changes: 1 addition & 1 deletion test/memory_manager_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ class TestMemoryManager : public benchmark::MemoryManager {

void BM_empty(benchmark::State& state) {
for (auto _ : state) {
auto iterations = state.iterations();
auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
}
Expand Down
2 changes: 1 addition & 1 deletion test/perf_counters_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ BM_DECLARE_string(benchmark_perf_counters);

static void BM_Simple(benchmark::State& state) {
for (auto _ : state) {
auto iterations = state.iterations();
auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
}
Expand Down
Loading
Loading