diff --git a/src/docs/tutorial.rst b/src/docs/tutorial.rst index 698ea09..15ca3ed 100644 --- a/src/docs/tutorial.rst +++ b/src/docs/tutorial.rst @@ -342,6 +342,11 @@ output: :language: c++ :linenos: +Nanobench allows to specify further context information, which may be accessed using ``{{context(name)}}`` where ``name`` names a variable defined in ``Bench::context()``. + +.. literalinclude:: ../test/tutorial_context.cpp + :language: c++ + :linenos: .. _tutorial-template-csv: diff --git a/src/include/nanobench.h b/src/include/nanobench.h index adc195a..6e11ca2 100644 --- a/src/include/nanobench.h +++ b/src/include/nanobench.h @@ -43,6 +43,7 @@ #include // memcpy #include // for std::ostream* custom output target in Config #include // all names +#include // holds context information of results #include // holds all results #define ANKERL_NANOBENCH(x) ANKERL_NANOBENCH_PRIVATE_##x() @@ -177,6 +178,8 @@ class BigO; * * * `{{relative}}` True or false, depending on the setting you have used. See Bench::relative(). * + * * `{{context(variableName)}} See Bench::context(). + * * Apart from these tags, it is also possible to use some mathematical operations on the measurement data. The operations * are of the form `{{command(name)}}`. Currently `name` can be one of `elapsed`, `iterations`. If performance counters * are available (currently only on current Linux systems), you also have `pagefaults`, `cpucycles`, @@ -395,6 +398,7 @@ struct Config { std::string mTimeUnitName = "ns"; bool mShowPerformanceCounters = true; bool mIsRelative = false; + std::unordered_map mContext{}; Config(); ~Config(); @@ -442,6 +446,8 @@ class Result { ANKERL_NANOBENCH(NODISCARD) double sumProduct(Measure m1, Measure m2) const noexcept; ANKERL_NANOBENCH(NODISCARD) double minimum(Measure m) const noexcept; ANKERL_NANOBENCH(NODISCARD) double maximum(Measure m) const noexcept; + ANKERL_NANOBENCH(NODISCARD) std::string const& context(char const*) const; + ANKERL_NANOBENCH(NODISCARD) std::string const& context(std::string const&) const; ANKERL_NANOBENCH(NODISCARD) bool has(Measure m) const noexcept; ANKERL_NANOBENCH(NODISCARD) double get(size_t idx, Measure m) const; @@ -674,6 +680,31 @@ class Bench { Bench& name(std::string const& benchmarkName); ANKERL_NANOBENCH(NODISCARD) std::string const& name() const noexcept; + /** + * @brief Set context information. + * + * The information can be accessed using custom render templates via `{{context(variableName)}}`. + * Trying to render a variable that hasn't been set before raises an exception. + * Not included in (default) markdown table. + * + * @see clearContext(), render() + * + * @param variableName The name of the context variable. + * @param variableValue The value of the context variable. + */ + Bench& context(char const* variableName, char const* variableValue); + Bench& context(std::string const& variableName, std::string const& variableValue); + + /** + * @brief Reset context information. + * + * This may be improve efficiency when using many context entries, + * or improve robustness by removing spurious context entries. + * + * @see context() + */ + Bench& clearContext(); + /** * @brief Sets the batch size. * @@ -1600,6 +1631,10 @@ static std::ostream& generateResultTag(Node const& n, Result const& r, std::ostr std::vector matchResult; if (matchCmdArgs(std::string(n.begin, n.end), matchResult)) { if (matchResult.size() == 2) { + if (matchResult[0] == "context") { + return out << r.context(matchResult[1]); + } + auto m = Result::fromString(matchResult[1]); if (m == Result::Measure::_size) { return out << 0.0; @@ -2991,6 +3026,14 @@ double Result::maximum(Measure m) const noexcept { return *std::max_element(data.begin(), data.end()); } +std::string const& Result::context(char const* variableName) const { + return mConfig.mContext.at(variableName); +} + +std::string const& Result::context(std::string const& variableName) const { + return mConfig.mContext.at(variableName); +} + Result::Measure Result::fromString(std::string const& str) { if (str == "elapsed") { return Measure::elapsed; @@ -3118,6 +3161,21 @@ std::string const& Bench::name() const noexcept { return mConfig.mBenchmarkName; } +Bench& Bench::context(char const* variableName, char const* variableValue) { + mConfig.mContext[variableName] = variableValue; + return *this; +} + +Bench& Bench::context(std::string const& variableName, std::string const& variableValue) { + mConfig.mContext[variableName] = variableValue; + return *this; +} + +Bench& Bench::clearContext() { + mConfig.mContext.clear(); + return *this; +} + // Number of epochs to evaluate. The reported result will be the median of evaluation of each epoch. Bench& Bench::epochs(size_t numEpochs) noexcept { mConfig.mNumEpochs = numEpochs; diff --git a/src/test/CMakeLists.txt b/src/test/CMakeLists.txt index 77414a1..d544752 100644 --- a/src/test/CMakeLists.txt +++ b/src/test/CMakeLists.txt @@ -19,6 +19,7 @@ target_sources_local(nb PRIVATE example_shuffle.cpp tutorial_complexity_set.cpp tutorial_complexity_sort.cpp + tutorial_context.cpp tutorial_fast_v1.cpp tutorial_fast_v2.cpp tutorial_fluctuating_v1.cpp diff --git a/src/test/tutorial_context.cpp b/src/test/tutorial_context.cpp new file mode 100644 index 0000000..9f5f1bc --- /dev/null +++ b/src/test/tutorial_context.cpp @@ -0,0 +1,54 @@ +#include +#include + +#include +#include + +namespace { + +template +void fma() { + T x(1), y(2), z(3); + z = std::fma(x, y, z); + ankerl::nanobench::doNotOptimizeAway(z); +} + +template +void plus_eq() { + T x(1), y(2), z(3); + z += x*y; + ankerl::nanobench::doNotOptimizeAway(z); +} + +char const* csv() { + return R"DELIM("title";"name";"scalar";"foo";"elapsed";"total" +{{#result}}"{{title}}";"{{name}}";"{{context(scalar)}}";"{{context(foo)}}";{{median(elapsed)}};{{sumProduct(iterations, elapsed)}} +{{/result}})DELIM"; +} + +} // namespace + +TEST_CASE("tutorial_context") { + ankerl::nanobench::Bench bench; + bench.title("Addition").output(nullptr); + bench + .context("scalar", "f32") + .context("foo", "bar") + .run("+=", plus_eq) + .run("fma", fma); + bench + .context("scalar", "f64") + .context("foo", "baz") + .run("+=", plus_eq) + .run("fma", fma); + bench.render(csv(), std::cout); + // Changing the title resets the results, but not the context: + bench.title("New Title"); + bench.run("+=", plus_eq); + bench.render(csv(), std::cout); + CHECK_EQ(bench.results().front().context("foo"), "baz"); // != bar + // The context has to be reset manually, which causes render to fail: + bench.title("Yet Another Title").clearContext(); + bench.run("+=", plus_eq); + CHECK_THROWS(bench.render(csv(), std::cout)); +}