diff --git a/Makefile b/Makefile index 752f7b8e8..ef8c267cd 100644 --- a/Makefile +++ b/Makefile @@ -121,10 +121,24 @@ dockerdown: ## spin up docker compose services for adapter testing ############## # BENCHMARKS # ############## -.PHONY: benchmark benchmarks benchmark-regen benchmark-view benchmarks-regen benchmarks-view +.PHONY: benchmark benchmarks benchmark-quick benchmarks-quick benchmark-local benchmarks-local benchmark-debug benchmarks-debug benchmark-regen benchmarks-regen benchmark-view benchmarks-view benchmark: ## run benchmarks python -m asv run --config csp/benchmarks/asv.conf.jsonc --verbose `git rev-parse --abbrev-ref HEAD`^! +benchmark-quick: ## run quick benchmark + python -m asv run --quick --config csp/benchmarks/asv.conf.jsonc --verbose `git rev-parse --abbrev-ref HEAD`^! + +benchmark-local: ## run benchmark using the local env + python -m asv run --python=same --config csp/benchmarks/asv.conf.jsonc --verbose + +benchmark-debug: ## debug a failing benchmark + if [ -z "${BENCHMARK_NAME}" ]; then echo 'Usage: make benchmark-debug BENCHMARK_NAME= [PARAM_INDEX=]'; exit 1; fi + if [ -z "${PARAM_INDEX}" ]; then \ + python -m pdb -m asv.benchmark run csp/benchmarks ${BENCHMARK_NAME} "{}" debug_profile.txt debug_results.txt; \ + else \ + python -m pdb -m asv.benchmark run csp/benchmarks ${BENCHMARK_NAME}-${PARAM_INDEX} "{}" debug_profile.txt debug_results.txt; \ + fi; + # https://github.com/airspeed-velocity/asv/issues/1027 # https://github.com/airspeed-velocity/asv/issues/488 benchmark-regen: @@ -137,6 +151,9 @@ benchmark-view: ## generate viewable website of benchmark results # Alias benchmarks: benchmark +benchmarks-quick: benchmark-quick +benchmarks-local: benchmark-local +benchmarks-debug: benchmark-debug benchmarks-regen: benchmark-regen benchmarks-view: benchmark-view @@ -202,6 +219,7 @@ clean: ## clean the repository ifneq ($(OS),Windows_NT) rm -rf .coverage coverage cover htmlcov logs build dist wheelhouse *.egg-info rm -rf csp/lib csp/bin csp/include _skbuild + rm -rf debug_*.txt else del /s /q .coverage coverage cover htmlcov logs build dist wheelhouse *.egg-info del /s/ q csp\lib csp\bin csp\include _skbuild diff --git a/csp/benchmarks/__init__.py b/csp/benchmarks/__init__.py index 55e5f844b..e69de29bb 100644 --- a/csp/benchmarks/__init__.py +++ b/csp/benchmarks/__init__.py @@ -1 +0,0 @@ -from .common import * diff --git a/csp/benchmarks/common.py b/csp/benchmarks/common.py deleted file mode 100644 index 90844a455..000000000 --- a/csp/benchmarks/common.py +++ /dev/null @@ -1,63 +0,0 @@ -from asv_runner.benchmarks import benchmark_types -from asv_runner.benchmarks.mark import SkipNotImplemented -from logging import getLogger - -__all__ = ("ASVBenchmarkHelper",) - - -class ASVBenchmarkHelper: - """A helper base class to mimic some of what ASV does when running benchmarks, to - test them outside of ASV. - - NOTE: should be removed in favor of calling ASV itself from python, if possible. - """ - - def __init__(self, *args, **kwargs): - self.log = getLogger(self.__class__.__name__) - - def run_all(self): - # https://asv.readthedocs.io/en/v0.6.3/writing_benchmarks.html#benchmark-types - benchmarks = {} - - for method in dir(self): - for cls in benchmark_types: - if cls.name_regex.match(method): - benchmark_type = cls.__name__.replace("Benchmark", "") - if benchmark_type not in benchmarks: - benchmarks[benchmark_type] = [] - - name = f"{self.__class__.__qualname__}.{method}" - func = getattr(self, method) - benchmarks[benchmark_type].append(cls(name, func, (func, self))) - - def run_benchmark(benchmark): - skip = benchmark.do_setup() - try: - if skip: - return - try: - benchmark.do_run() - except SkipNotImplemented: - pass - finally: - benchmark.do_teardown() - - for type, benchmarks_to_run in benchmarks.items(): - if benchmarks_to_run: - self.log.warn(f"Running benchmarks for {type}") - for benchmark in benchmarks_to_run: - if len(getattr(self, "params", [])): - # TODO: cleaner - param_count = 0 - while param_count < 100: - try: - benchmark.set_param_idx(param_count) - params = benchmark._current_params - self.log.warn(f"[{type}][{benchmark.name}][{'.'.join(str(_) for _ in params)}]") - run_benchmark(benchmark=benchmark) - param_count += 1 - except ValueError: - break - else: - self.log.warn(f"Running [{type}][{benchmark.func.__name__}]") - run_benchmark(benchmark=benchmark) diff --git a/csp/benchmarks/stats/basic.py b/csp/benchmarks/stats/basic.py index 9b212ba2d..4ce8a22b5 100644 --- a/csp/benchmarks/stats/basic.py +++ b/csp/benchmarks/stats/basic.py @@ -3,12 +3,11 @@ from timeit import Timer import csp -from csp.benchmarks import ASVBenchmarkHelper __all__ = ("StatsBenchmarkSuite",) -class StatsBenchmarkSuite(ASVBenchmarkHelper): +class StatsBenchmarkSuite: """ python -m csp.benchmarks.stats.basic """ @@ -43,8 +42,3 @@ def g(): ) elapsed = timer.timeit(1) return elapsed - - -if __name__ == "__main__": - sbs = StatsBenchmarkSuite() - sbs.run_all()