Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added the functionality for a dry run benchmark called through the cli argument --benchmark_dry_run. #1851

Merged
merged 9 commits into from
Sep 12, 2024
10 changes: 10 additions & 0 deletions src/benchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,11 @@ BM_DEFINE_double(benchmark_min_warmup_time, 0.0);
// standard deviation of the runs will be reported.
BM_DEFINE_int32(benchmark_repetitions, 1);

// If enabled, forces each benchmark to execute exactly one iteration and one
// repetition, bypassing any configured
// MinTime()/MinWarmUpTime()/Iterations()/Repetitions()
BM_DEFINE_bool(benchmark_dry_run, false);

// If set, enable random interleaving of repetitions of all benchmarks.
// See http://github.com/google/benchmark/issues/1051 for details.
BM_DEFINE_bool(benchmark_enable_random_interleaving, false);
Expand Down Expand Up @@ -717,6 +722,7 @@ void ParseCommandLineFlags(int* argc, char** argv) {
&FLAGS_benchmark_min_warmup_time) ||
ParseInt32Flag(argv[i], "benchmark_repetitions",
&FLAGS_benchmark_repetitions) ||
ParseBoolFlag(argv[i], "benchmark_dry_run", &FLAGS_benchmark_dry_run) ||
ParseBoolFlag(argv[i], "benchmark_enable_random_interleaving",
&FLAGS_benchmark_enable_random_interleaving) ||
ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
Expand Down Expand Up @@ -755,6 +761,9 @@ void ParseCommandLineFlags(int* argc, char** argv) {
if (FLAGS_benchmark_color.empty()) {
PrintUsageAndExit();
}
if (FLAGS_benchmark_dry_run) {
AddCustomContext("dry_run", "true");
}
for (const auto& kv : FLAGS_benchmark_context) {
AddCustomContext(kv.first, kv.second);
}
Expand Down Expand Up @@ -783,6 +792,7 @@ void PrintDefaultHelp() {
" [--benchmark_min_time=`<integer>x` OR `<float>s` ]\n"
" [--benchmark_min_warmup_time=<min_warmup_time>]\n"
" [--benchmark_repetitions=<num_repetitions>]\n"
" [--benchmark_dry_run={true|false}]\n"
" [--benchmark_enable_random_interleaving={true|false}]\n"
" [--benchmark_report_aggregates_only={true|false}]\n"
" [--benchmark_display_aggregates_only={true|false}]\n"
Expand Down
39 changes: 28 additions & 11 deletions src/benchmark_runner.cc
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,14 @@

namespace benchmark {

BM_DECLARE_bool(benchmark_dry_run);
BM_DECLARE_string(benchmark_min_time);
BM_DECLARE_double(benchmark_min_warmup_time);
BM_DECLARE_int32(benchmark_repetitions);
BM_DECLARE_bool(benchmark_report_aggregates_only);
BM_DECLARE_bool(benchmark_display_aggregates_only);
BM_DECLARE_string(benchmark_perf_counters);

namespace internal {

MemoryManager* memory_manager = nullptr;
Expand Down Expand Up @@ -228,20 +236,29 @@ BenchmarkRunner::BenchmarkRunner(
: b(b_),
reports_for_family(reports_for_family_),
parsed_benchtime_flag(ParseBenchMinTime(FLAGS_benchmark_min_time)),
min_time(ComputeMinTime(b_, parsed_benchtime_flag)),
min_warmup_time((!IsZero(b.min_time()) && b.min_warmup_time() > 0.0)
? b.min_warmup_time()
: FLAGS_benchmark_min_warmup_time),
warmup_done(!(min_warmup_time > 0.0)),
repeats(b.repetitions() != 0 ? b.repetitions()
: FLAGS_benchmark_repetitions),
min_time(FLAGS_benchmark_dry_run
? 0
: ComputeMinTime(b_, parsed_benchtime_flag)),
min_warmup_time(
FLAGS_benchmark_dry_run
? 0
: ((!IsZero(b.min_time()) && b.min_warmup_time() > 0.0)
? b.min_warmup_time()
: FLAGS_benchmark_min_warmup_time)),
warmup_done(FLAGS_benchmark_dry_run ? true : !(min_warmup_time > 0.0)),
repeats(FLAGS_benchmark_dry_run
? 1
: (b.repetitions() != 0 ? b.repetitions()
: FLAGS_benchmark_repetitions)),
has_explicit_iteration_count(b.iterations() != 0 ||
parsed_benchtime_flag.tag ==
BenchTimeType::ITERS),
pool(static_cast<size_t>(b.threads() - 1)),
iters(has_explicit_iteration_count
? ComputeIters(b_, parsed_benchtime_flag)
: 1),
iters(FLAGS_benchmark_dry_run
? 1
: (has_explicit_iteration_count
? ComputeIters(b_, parsed_benchtime_flag)
: 1)),
perf_counters_measurement_ptr(pcm_) {
run_results.display_report_aggregates_only =
(FLAGS_benchmark_report_aggregates_only ||
Expand Down Expand Up @@ -339,7 +356,7 @@ bool BenchmarkRunner::ShouldReportIterationResults(
// Determine if this run should be reported;
// Either it has run for a sufficient amount of time
// or because an error was reported.
return i.results.skipped_ ||
return i.results.skipped_ || FLAGS_benchmark_dry_run ||
i.iters >= kMaxIterations || // Too many iterations already.
i.seconds >=
GetMinTimeToApply() || // The elapsed time is large enough.
Expand Down
7 changes: 0 additions & 7 deletions src/benchmark_runner.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,6 @@

namespace benchmark {

BM_DECLARE_string(benchmark_min_time);
BM_DECLARE_double(benchmark_min_warmup_time);
BM_DECLARE_int32(benchmark_repetitions);
BM_DECLARE_bool(benchmark_report_aggregates_only);
BM_DECLARE_bool(benchmark_display_aggregates_only);
BM_DECLARE_string(benchmark_perf_counters);

namespace internal {

extern MemoryManager* memory_manager;
Expand Down
Loading