diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 83c5bbf81e..7544b24758 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -18,7 +18,7 @@ jobs: - name: Install Python 3.12 uses: actions/setup-python@v5 with: - python-version: 3.12 + python-version: "3.12" - run: python -m pip install build - name: Build sdist run: python -m build --sdist @@ -40,68 +40,23 @@ jobs: with: fetch-depth: 0 + - uses: actions/setup-python@v5 + name: Install Python 3.12 + with: + python-version: "3.12" + - run: pip install --upgrade pip uv + - name: Set up QEMU if: runner.os == 'Linux' uses: docker/setup-qemu-action@v3 with: platforms: all - - name: Build 3.8 wheels on ${{ matrix.os }} using cibuildwheel - uses: pypa/cibuildwheel@v2.20 - env: - CIBW_BUILD: "cp38-*" - CIBW_SKIP: "*-musllinux_*" - CIBW_TEST_SKIP: "cp38-macosx_*:arm64" - CIBW_ARCHS_LINUX: auto64 aarch64 - CIBW_ARCHS_WINDOWS: auto64 - CIBW_BEFORE_ALL_LINUX: bash .github/install_bazel.sh - # Grab the rootless Bazel installation inside the container. - CIBW_ENVIRONMENT_LINUX: PATH=$PATH:$HOME/bin - CIBW_TEST_COMMAND: python {project}/bindings/python/google_benchmark/example.py - - - name: Build 3.9 wheels on ${{ matrix.os }} using cibuildwheel - uses: pypa/cibuildwheel@v2.20 - env: - CIBW_BUILD: "cp39-*" - CIBW_SKIP: "*-musllinux_*" - CIBW_TEST_SKIP: "cp38-macosx_*:arm64" - CIBW_ARCHS_LINUX: auto64 aarch64 - CIBW_ARCHS_WINDOWS: auto64 - CIBW_BEFORE_ALL_LINUX: bash .github/install_bazel.sh - # Grab the rootless Bazel installation inside the container. - CIBW_ENVIRONMENT_LINUX: PATH=$PATH:$HOME/bin - CIBW_TEST_COMMAND: python {project}/bindings/python/google_benchmark/example.py - - - name: Build 3.10 wheels on ${{ matrix.os }} using cibuildwheel + - name: Build wheels on ${{ matrix.os }} using cibuildwheel uses: pypa/cibuildwheel@v2.20 env: - CIBW_BUILD: "cp310-*" - CIBW_SKIP: "*-musllinux_*" - CIBW_TEST_SKIP: "cp38-macosx_*:arm64" - CIBW_ARCHS_LINUX: auto64 aarch64 - CIBW_ARCHS_WINDOWS: auto64 - CIBW_BEFORE_ALL_LINUX: bash .github/install_bazel.sh - # Grab the rootless Bazel installation inside the container. - CIBW_ENVIRONMENT_LINUX: PATH=$PATH:$HOME/bin - CIBW_TEST_COMMAND: python {project}/bindings/python/google_benchmark/example.py - - - name: Build 3.11 wheels on ${{ matrix.os }} using cibuildwheel - uses: pypa/cibuildwheel@v2.20 - env: - CIBW_BUILD: "cp311-*" - CIBW_SKIP: "*-musllinux_*" - CIBW_TEST_SKIP: "cp38-macosx_*:arm64" - CIBW_ARCHS_LINUX: auto64 aarch64 - CIBW_ARCHS_WINDOWS: auto64 - CIBW_BEFORE_ALL_LINUX: bash .github/install_bazel.sh - # Grab the rootless Bazel installation inside the container. - CIBW_ENVIRONMENT_LINUX: PATH=$PATH:$HOME/bin - CIBW_TEST_COMMAND: python {project}/bindings/python/google_benchmark/example.py - - - name: Build 3.12 wheels on ${{ matrix.os }} using cibuildwheel - uses: pypa/cibuildwheel@v2.20 - env: - CIBW_BUILD: "cp312-*" + CIBW_BUILD: "cp310-* cp311-* cp312-*" + CIBW_BUILD_FRONTEND: "build[uv]" CIBW_SKIP: "*-musllinux_*" CIBW_TEST_SKIP: "cp38-macosx_*:arm64" CIBW_ARCHS_LINUX: auto64 aarch64 @@ -110,6 +65,8 @@ jobs: # Grab the rootless Bazel installation inside the container. CIBW_ENVIRONMENT_LINUX: PATH=$PATH:$HOME/bin CIBW_TEST_COMMAND: python {project}/bindings/python/google_benchmark/example.py + # unused by Bazel, but needed explicitly by delocate on MacOS. + MACOSX_DEPLOYMENT_TARGET: "10.14" - name: Upload Google Benchmark ${{ matrix.os }} wheels uses: actions/upload-artifact@v4 @@ -133,11 +90,11 @@ jobs: name: Publish google-benchmark wheels to PyPI needs: [merge_wheels] runs-on: ubuntu-latest + if: github.event_name == 'release' && github.event.action == 'published' + permissions: + id-token: write steps: - uses: actions/download-artifact@v4 with: path: dist - uses: pypa/gh-action-pypi-publish@release/v1 - with: - user: __token__ - password: ${{ secrets.PYPI_PASSWORD }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 99976d9459..ef13c1dabd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,17 +1,17 @@ repos: - repo: https://github.com/keith/pre-commit-buildifier - rev: 6.4.0 + rev: 7.1.2 hooks: - id: buildifier - id: buildifier-lint - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.11.0 + rev: v1.11.1 hooks: - id: mypy types_or: [ python, pyi ] args: [ "--ignore-missing-imports", "--scripts-are-modules" ] - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.10 + rev: v0.6.1 hooks: - id: ruff args: [ --fix, --exit-non-zero-on-fix ] diff --git a/CMakeLists.txt b/CMakeLists.txt index e0cd6962e0..a86a5686ed 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -147,7 +147,7 @@ set(CMAKE_CXX_EXTENSIONS OFF) if (MSVC) # Turn compiler warnings up to 11 string(REGEX REPLACE "[-/]W[1-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4 /MP") add_definitions(-D_CRT_SECURE_NO_WARNINGS) if(BENCHMARK_ENABLE_WERROR) diff --git a/bindings/python/google_benchmark/__init__.py b/bindings/python/google_benchmark/__init__.py index c1393b4e58..e7870c854c 100644 --- a/bindings/python/google_benchmark/__init__.py +++ b/bindings/python/google_benchmark/__init__.py @@ -49,7 +49,8 @@ def my_benchmark(state): oNone as oNone, oNSquared as oNSquared, ) -from google_benchmark.version import __version__ as __version__ + +__version__ = "1.9.0" class __OptionMaker: diff --git a/bindings/python/google_benchmark/version.py b/bindings/python/google_benchmark/version.py deleted file mode 100644 index a324693e2d..0000000000 --- a/bindings/python/google_benchmark/version.py +++ /dev/null @@ -1,7 +0,0 @@ -from importlib.metadata import PackageNotFoundError, version - -try: - __version__ = version("google-benchmark") -except PackageNotFoundError: - # package is not installed - pass diff --git a/cmake/benchmark.pc.in b/cmake/benchmark.pc.in index 043f2fc759..bbed29d1eb 100644 --- a/cmake/benchmark.pc.in +++ b/cmake/benchmark.pc.in @@ -5,7 +5,7 @@ includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@ Name: @PROJECT_NAME@ Description: Google microbenchmark framework -Version: @VERSION@ +Version: @NORMALIZED_VERSION@ Libs: -L${libdir} -lbenchmark Libs.private: -lpthread @BENCHMARK_PRIVATE_LINK_LIBRARIES@ diff --git a/cmake/benchmark_main.pc.in b/cmake/benchmark_main.pc.in index a90f3cd060..e9d81a05ee 100644 --- a/cmake/benchmark_main.pc.in +++ b/cmake/benchmark_main.pc.in @@ -2,6 +2,6 @@ libdir=@CMAKE_INSTALL_FULL_LIBDIR@ Name: @PROJECT_NAME@ Description: Google microbenchmark framework (with main() function) -Version: @VERSION@ +Version: @NORMALIZED_VERSION@ Requires: benchmark Libs: -L${libdir} -lbenchmark_main diff --git a/docs/dependencies.md b/docs/dependencies.md index 07760e10e3..98ce996391 100644 --- a/docs/dependencies.md +++ b/docs/dependencies.md @@ -11,3 +11,9 @@ distributions include newer versions, for example: * Ubuntu 20.04 provides CMake 3.16.3 * Debian 11.4 provides CMake 3.18.4 * Ubuntu 22.04 provides CMake 3.22.1 + +## Python + +The Python bindings require Python 3.10+ as of v1.9.0 (2024-08-16) for installation from PyPI. +Building from source for older versions probably still works, though. See the [user guide](python_bindings.md) for details on how to build from source. +The minimum theoretically supported version is Python 3.8, since the used bindings generator (nanobind) only supports Python 3.8+. diff --git a/docs/releasing.md b/docs/releasing.md index 09bf93764d..ab664a8640 100644 --- a/docs/releasing.md +++ b/docs/releasing.md @@ -8,16 +8,24 @@ * `git log $(git describe --abbrev=0 --tags)..HEAD` gives you the list of commits between the last annotated tag and HEAD * Pick the most interesting. -* Create one last commit that updates the version saved in `CMakeLists.txt` and `MODULE.bazel` - to the release version you're creating. (This version will be used if benchmark is installed - from the archive you'll be creating in the next step.) +* Create one last commit that updates the version saved in `CMakeLists.txt`, `MODULE.bazel`, + and `bindings/python/google_benchmark/__init__.py` to the release version you're creating. + (This version will be used if benchmark is installed from the archive you'll be creating + in the next step.) ``` -project (benchmark VERSION 1.8.0 LANGUAGES CXX) +# CMakeLists.txt +project (benchmark VERSION 1.9.0 LANGUAGES CXX) ``` ``` -module(name = "com_github_google_benchmark", version="1.8.0") +# MODULE.bazel +module(name = "com_github_google_benchmark", version="1.9.0") +``` + +``` +# google_benchmark/__init__.py +__version__ = "1.9.0" ``` * Create a release through github's interface @@ -28,4 +36,3 @@ module(name = "com_github_google_benchmark", version="1.8.0") * `git push --force --tags origin` * Confirm that the "Build and upload Python wheels" action runs to completion * Run it manually if it hasn't run. - * IMPORTANT: When re-running manually, make sure to select the newly created `` as the workflow version in the "Run workflow" tab on the GitHub Actions page. diff --git a/docs/user_guide.md b/docs/user_guide.md index e38262099d..046d7dea87 100644 --- a/docs/user_guide.md +++ b/docs/user_guide.md @@ -82,9 +82,9 @@ tabular data on stdout. Example tabular output looks like: ``` Benchmark Time(ns) CPU(ns) Iterations ---------------------------------------------------------------------- -BM_SetInsert/1024/1 28928 29349 23853 133.097kB/s 33.2742k items/s -BM_SetInsert/1024/8 32065 32913 21375 949.487kB/s 237.372k items/s -BM_SetInsert/1024/10 33157 33648 21431 1.13369MB/s 290.225k items/s +BM_SetInsert/1024/1 28928 29349 23853 133.097kiB/s 33.2742k items/s +BM_SetInsert/1024/8 32065 32913 21375 949.487kiB/s 237.372k items/s +BM_SetInsert/1024/10 33157 33648 21431 1.13369MiB/s 290.225k items/s ``` The JSON format outputs human readable json split into two top level attributes. @@ -167,6 +167,13 @@ line interface or by setting environment variables before execution. For every prevails). A complete list of CLI options is available running benchmarks with the `--help` switch. +### Dry runs + +To confirm that benchmarks can run successfully without needing to wait for +multiple repetitions and iterations, the `--benchmark_dry_run` flag can be +used. This will run the benchmarks as normal, but for 1 iteration and 1 +repetition only. + ## Running a Subset of Benchmarks diff --git a/include/benchmark/benchmark.h b/include/benchmark/benchmark.h index 53a22247f2..86f9dbbabb 100644 --- a/include/benchmark/benchmark.h +++ b/include/benchmark/benchmark.h @@ -1554,6 +1554,7 @@ class Fixture : public internal::Benchmark { BaseClass##_##Method##_Benchmark #define BENCHMARK_PRIVATE_DECLARE(n) \ + /* NOLINTNEXTLINE(misc-use-anonymous-namespace) */ \ static ::benchmark::internal::Benchmark* BENCHMARK_PRIVATE_NAME(n) \ BENCHMARK_UNUSED diff --git a/pyproject.toml b/pyproject.toml index 62507a8703..14f173f956 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,25 +1,21 @@ [build-system] -requires = ["setuptools", "setuptools-scm[toml]", "wheel"] +requires = ["setuptools<73"] build-backend = "setuptools.build_meta" [project] name = "google_benchmark" description = "A library to benchmark code snippets." -requires-python = ">=3.8" -license = {file = "LICENSE"} +requires-python = ">=3.10" +license = { file = "LICENSE" } keywords = ["benchmark"] -authors = [ - {name = "Google", email = "benchmark-discuss@googlegroups.com"}, -] +authors = [{ name = "Google", email = "benchmark-discuss@googlegroups.com" }] classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", @@ -29,14 +25,10 @@ classifiers = [ dynamic = ["readme", "version"] -dependencies = [ - "absl-py>=0.7.1", -] +dependencies = ["absl-py>=0.7.1"] [project.optional-dependencies] -dev = [ - "pre-commit>=3.3.3", -] +dev = ["pre-commit>=3.3.3"] [project.urls] Homepage = "https://github.com/google/benchmark" @@ -45,7 +37,7 @@ Repository = "https://github.com/google/benchmark.git" Discord = "https://discord.gg/cz7UX7wKC2" [tool.setuptools] -package-dir = {"" = "bindings/python"} +package-dir = { "" = "bindings/python" } zip-safe = false [tool.setuptools.packages.find] @@ -53,8 +45,7 @@ where = ["bindings/python"] [tool.setuptools.dynamic] readme = { file = "README.md", content-type = "text/markdown" } - -[tool.setuptools_scm] +version = { attr = "google_benchmark.__version__" } [tool.mypy] check_untyped_defs = true diff --git a/setup.py b/setup.py index d171476f7e..1e4c0db761 100644 --- a/setup.py +++ b/setup.py @@ -138,7 +138,6 @@ def bazel_build(self, ext: BazelExtension) -> None: dirs[:] = [d for d in dirs if "runfiles" not in d] for f in files: - print(f) fp = Path(f) should_copy = False # we do not want the bare .so file included diff --git a/src/benchmark.cc b/src/benchmark.cc index b7767bd00a..2605077444 100644 --- a/src/benchmark.cc +++ b/src/benchmark.cc @@ -92,6 +92,11 @@ BM_DEFINE_double(benchmark_min_warmup_time, 0.0); // standard deviation of the runs will be reported. BM_DEFINE_int32(benchmark_repetitions, 1); +// If enabled, forces each benchmark to execute exactly one iteration and one +// repetition, bypassing any configured +// MinTime()/MinWarmUpTime()/Iterations()/Repetitions() +BM_DEFINE_bool(benchmark_dry_run, false); + // If set, enable random interleaving of repetitions of all benchmarks. // See http://github.com/google/benchmark/issues/1051 for details. BM_DEFINE_bool(benchmark_enable_random_interleaving, false); @@ -717,6 +722,7 @@ void ParseCommandLineFlags(int* argc, char** argv) { &FLAGS_benchmark_min_warmup_time) || ParseInt32Flag(argv[i], "benchmark_repetitions", &FLAGS_benchmark_repetitions) || + ParseBoolFlag(argv[i], "benchmark_dry_run", &FLAGS_benchmark_dry_run) || ParseBoolFlag(argv[i], "benchmark_enable_random_interleaving", &FLAGS_benchmark_enable_random_interleaving) || ParseBoolFlag(argv[i], "benchmark_report_aggregates_only", @@ -755,6 +761,9 @@ void ParseCommandLineFlags(int* argc, char** argv) { if (FLAGS_benchmark_color.empty()) { PrintUsageAndExit(); } + if (FLAGS_benchmark_dry_run) { + AddCustomContext("dry_run", "true"); + } for (const auto& kv : FLAGS_benchmark_context) { AddCustomContext(kv.first, kv.second); } @@ -783,6 +792,7 @@ void PrintDefaultHelp() { " [--benchmark_min_time=`x` OR `s` ]\n" " [--benchmark_min_warmup_time=]\n" " [--benchmark_repetitions=]\n" + " [--benchmark_dry_run={true|false}]\n" " [--benchmark_enable_random_interleaving={true|false}]\n" " [--benchmark_report_aggregates_only={true|false}]\n" " [--benchmark_display_aggregates_only={true|false}]\n" diff --git a/src/benchmark_runner.cc b/src/benchmark_runner.cc index c658d574ca..463f69fc52 100644 --- a/src/benchmark_runner.cc +++ b/src/benchmark_runner.cc @@ -58,6 +58,14 @@ namespace benchmark { +BM_DECLARE_bool(benchmark_dry_run); +BM_DECLARE_string(benchmark_min_time); +BM_DECLARE_double(benchmark_min_warmup_time); +BM_DECLARE_int32(benchmark_repetitions); +BM_DECLARE_bool(benchmark_report_aggregates_only); +BM_DECLARE_bool(benchmark_display_aggregates_only); +BM_DECLARE_string(benchmark_perf_counters); + namespace internal { MemoryManager* memory_manager = nullptr; @@ -228,20 +236,29 @@ BenchmarkRunner::BenchmarkRunner( : b(b_), reports_for_family(reports_for_family_), parsed_benchtime_flag(ParseBenchMinTime(FLAGS_benchmark_min_time)), - min_time(ComputeMinTime(b_, parsed_benchtime_flag)), - min_warmup_time((!IsZero(b.min_time()) && b.min_warmup_time() > 0.0) - ? b.min_warmup_time() - : FLAGS_benchmark_min_warmup_time), - warmup_done(!(min_warmup_time > 0.0)), - repeats(b.repetitions() != 0 ? b.repetitions() - : FLAGS_benchmark_repetitions), + min_time(FLAGS_benchmark_dry_run + ? 0 + : ComputeMinTime(b_, parsed_benchtime_flag)), + min_warmup_time( + FLAGS_benchmark_dry_run + ? 0 + : ((!IsZero(b.min_time()) && b.min_warmup_time() > 0.0) + ? b.min_warmup_time() + : FLAGS_benchmark_min_warmup_time)), + warmup_done(FLAGS_benchmark_dry_run ? true : !(min_warmup_time > 0.0)), + repeats(FLAGS_benchmark_dry_run + ? 1 + : (b.repetitions() != 0 ? b.repetitions() + : FLAGS_benchmark_repetitions)), has_explicit_iteration_count(b.iterations() != 0 || parsed_benchtime_flag.tag == BenchTimeType::ITERS), pool(static_cast(b.threads() - 1)), - iters(has_explicit_iteration_count - ? ComputeIters(b_, parsed_benchtime_flag) - : 1), + iters(FLAGS_benchmark_dry_run + ? 1 + : (has_explicit_iteration_count + ? ComputeIters(b_, parsed_benchtime_flag) + : 1)), perf_counters_measurement_ptr(pcm_) { run_results.display_report_aggregates_only = (FLAGS_benchmark_report_aggregates_only || @@ -339,7 +356,7 @@ bool BenchmarkRunner::ShouldReportIterationResults( // Determine if this run should be reported; // Either it has run for a sufficient amount of time // or because an error was reported. - return i.results.skipped_ || + return i.results.skipped_ || FLAGS_benchmark_dry_run || i.iters >= kMaxIterations || // Too many iterations already. i.seconds >= GetMinTimeToApply() || // The elapsed time is large enough. diff --git a/src/benchmark_runner.h b/src/benchmark_runner.h index cd34d2d5bb..6e5ceb31e0 100644 --- a/src/benchmark_runner.h +++ b/src/benchmark_runner.h @@ -25,13 +25,6 @@ namespace benchmark { -BM_DECLARE_string(benchmark_min_time); -BM_DECLARE_double(benchmark_min_warmup_time); -BM_DECLARE_int32(benchmark_repetitions); -BM_DECLARE_bool(benchmark_report_aggregates_only); -BM_DECLARE_bool(benchmark_display_aggregates_only); -BM_DECLARE_string(benchmark_perf_counters); - namespace internal { extern MemoryManager* memory_manager; diff --git a/src/sysinfo.cc b/src/sysinfo.cc index a153b20cf3..7148598264 100644 --- a/src/sysinfo.cc +++ b/src/sysinfo.cc @@ -353,6 +353,12 @@ std::vector GetCacheSizesWindows() { C.size = static_cast(cache.Size); C.type = "Unknown"; switch (cache.Type) { +// Windows SDK version >= 10.0.26100.0 +// 0x0A000010 is the value of NTDDI_WIN11_GE +#if NTDDI_VERSION >= 0x0A000010 + case CacheUnknown: + break; +#endif case CacheUnified: C.type = "Unified"; break;