Skip to content

Commit

Permalink
Setup benchmark suite
Browse files Browse the repository at this point in the history
Signed-off-by: Tim Paine <3105306+timkpaine@users.noreply.github.com>
  • Loading branch information
timkpaine committed Jul 14, 2024
1 parent 323122e commit 8790a83
Show file tree
Hide file tree
Showing 11 changed files with 203 additions and 0 deletions.
5 changes: 5 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,11 @@ csp/lib/
*.so
*.tsbuildinfo

# Benchmarks
.asv
ci/benchmarks/*
!ci/benchmarks/benchmarks.json

# Jupyter / Editors
.ipynb_checkpoints
.autoversion
Expand Down
22 changes: 22 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,28 @@ dockerps: ## spin up docker compose services for adapter testing
dockerdown: ## spin up docker compose services for adapter testing
$(DOCKER) compose -f ci/$(ADAPTER)/docker-compose.yml down

##############
# BENCHMARKS #
##############
.PHONY: benchmark benchmarks benchmark-regen benchmark-view benchmarks-regen benchmarks-view
benchmark: ## run benchmarks
python -m asv run --config csp/benchmarks/asv.conf.jsonc --verbose `git rev-parse --abbrev-ref HEAD`^!

# https://github.com/airspeed-velocity/asv/issues/1027
# https://github.com/airspeed-velocity/asv/issues/488
benchmark-regen:
python -m asv run --config csp/benchmarks/asv.conf.jsonc --verbose v0.0.4^!
python -m asv run --config csp/benchmarks/asv.conf.jsonc --verbose v0.0.5^!

benchmark-view: ## generate viewable website of benchmark results
python -m asv publish --config csp/benchmarks/asv.conf.jsonc
python -m asv preview --config csp/benchmarks/asv.conf.jsonc

# Alias
benchmarks: benchmark
benchmarks-regen: benchmark-regen
benchmarks-view: benchmark-view

###########
# VERSION #
###########
Expand Down
25 changes: 25 additions & 0 deletions ci/benchmarks/benchmarks.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
{
"stats.basic.StatsBenchmarkSuite.time_stats": {
"code": "class StatsBenchmarkSuite:\n def time_stats(self, function):\n def g():\n data = csp.curve(typ=np.ndarray, data=self.data)\n value = getattr(csp.stats, function)(data, interval=self.interval, **self.function_args.get(function, {}))\n csp.add_graph_output(\"final_value\", value, tick_count=1)\n \n timer = Timer(\n lambda: csp.run(g, realtime=False, starttime=self.start_date, endtime=timedelta(seconds=self.num_rows))\n )\n elapsed = timer.timeit(1)\n return elapsed\n\n def setup(self, _):\n self.start_date = datetime(2020, 1, 1)\n self.num_rows = 1_000\n self.array_size = 100\n self.test_times = [self.start_date + timedelta(seconds=i) for i in range(self.num_rows)]\n self.random_values = [\n np.random.normal(size=(self.array_size,)) for i in range(self.num_rows)\n ] # 100 element np array\n self.data = list(zip(self.test_times, self.random_values))\n self.interval = 500",
"min_run_count": 2,
"name": "stats.basic.StatsBenchmarkSuite.time_stats",
"number": 0,
"param_names": [
"param1"
],
"params": [
[
"'median'",
"'quantile'",
"'rank'"
]
],
"rounds": 5,
"sample_time": 0.01,
"type": "time",
"unit": "seconds",
"version": "f57f3ee288b0805597f9edee91b4d1dddf41046d34fbd46cfbd7135f459e62e3",
"warmup_time": -1
},
"version": 2
}
1 change: 1 addition & 0 deletions conda/dev-environment-unix.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ channels:
- conda-forge
- nodefaults
dependencies:
- asv
- bison
- brotli
- build
Expand Down
1 change: 1 addition & 0 deletions conda/dev-environment-win.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ channels:
- conda-forge
- nodefaults
dependencies:
- asv
- brotli
- build
- bump2version>=1
Expand Down
1 change: 1 addition & 0 deletions csp/benchmarks/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .common import *
33 changes: 33 additions & 0 deletions csp/benchmarks/asv.conf.jsonc
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
// https://asv.readthedocs.io/en/v0.6.3/asv.conf.json.html
{
"version": 1,
"project": "csp",
"project_url": "https://github.com/Point72/csp",
"repo": "../..",
"branches": ["main"],
"dvcs": "git",

"install_command": ["in-dir={env_dir} python -mpip install {wheel_file}"],
"uninstall_command": ["return-code=any python -mpip uninstall -y {project}"],
"build_command": [
"python -m pip install build",
"python -m build --wheel -o {build_cache_dir} {build_dir}"
],
"environment_type": "virtualenv",
"install_timeout": 600,
"show_commit_url": "http://github.com/point72/csp/commit/",

"pythons": ["3.11"],

// "environment_type": "mamba",
// "conda_channels": ["conda-forge"],
// "conda_environment_file": "conda/dev-environment-unix.yml",

"benchmark_dir": "../../csp/benchmarks",
"env_dir": "../../.asv/env",
"results_dir": "../../ci/benchmarks",
"html_dir": "../../.asv/html",

"hash_length": 8,
"build_cache_size": 2
}
63 changes: 63 additions & 0 deletions csp/benchmarks/common.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
from asv_runner.benchmarks import benchmark_types
from asv_runner.benchmarks.mark import SkipNotImplemented
from logging import getLogger

__all__ = ("ASVBenchmarkHelper",)


class ASVBenchmarkHelper:
"""A helper base class to mimic some of what ASV does when running benchmarks, to
test them outside of ASV.
NOTE: should be removed in favor of calling ASV itself from python, if possible.
"""

def __init__(self, *args, **kwargs):
self.log = getLogger(self.__class__.__name__)

def run_all(self):
# https://asv.readthedocs.io/en/v0.6.3/writing_benchmarks.html#benchmark-types
benchmarks = {}

for method in dir(self):
for cls in benchmark_types:
if cls.name_regex.match(method):
benchmark_type = cls.__name__.replace("Benchmark", "")
if benchmark_type not in benchmarks:
benchmarks[benchmark_type] = []

name = f"{self.__class__.__qualname__}.{method}"
func = getattr(self, method)
benchmarks[benchmark_type].append(cls(name, func, (func, self)))

def run_benchmark(benchmark):
skip = benchmark.do_setup()
try:
if skip:
return
try:
benchmark.do_run()
except SkipNotImplemented:
pass
finally:
benchmark.do_teardown()

for type, benchmarks_to_run in benchmarks.items():
if benchmarks_to_run:
self.log.warn(f"Running benchmarks for {type}")
for benchmark in benchmarks_to_run:
if len(getattr(self, "params", [])):
# TODO: cleaner
param_count = 0
while param_count < 100:
try:
benchmark.set_param_idx(param_count)
params = benchmark._current_params
self.log.warn(f"[{type}][{benchmark.name}][{'.'.join(str(_) for _ in params)}]")
run_benchmark(benchmark=benchmark)
param_count += 1
except ValueError:
break
else:
self.log.warn(f"Running [{type}][{benchmark.func.__name__}]")
run_benchmark(benchmark=benchmark)
Empty file.
50 changes: 50 additions & 0 deletions csp/benchmarks/stats/basic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import numpy as np
from datetime import datetime, timedelta
from timeit import Timer

import csp
from csp.benchmarks import ASVBenchmarkHelper

__all__ = ("StatsBenchmarkSuite",)


class StatsBenchmarkSuite(ASVBenchmarkHelper):
"""
python -m csp.benchmarks.stats.basic
"""

params = (("median", "quantile", "rank"),)
# param_names = ("function")

rounds = 5
repeat = (100, 200, 60.0)

function_args = {"quantile": {"quant": 0.95}}

def setup(self, _):
self.start_date = datetime(2020, 1, 1)
self.num_rows = 1_000
self.array_size = 100
self.test_times = [self.start_date + timedelta(seconds=i) for i in range(self.num_rows)]
self.random_values = [
np.random.normal(size=(self.array_size,)) for i in range(self.num_rows)
] # 100 element np array
self.data = list(zip(self.test_times, self.random_values))
self.interval = 500

def time_stats(self, function):
def g():
data = csp.curve(typ=np.ndarray, data=self.data)
value = getattr(csp.stats, function)(data, interval=self.interval, **self.function_args.get(function, {}))
csp.add_graph_output("final_value", value, tick_count=1)

timer = Timer(
lambda: csp.run(g, realtime=False, starttime=self.start_date, endtime=timedelta(seconds=self.num_rows))
)
elapsed = timer.timeit(1)
return elapsed


if __name__ == "__main__":
sbs = StatsBenchmarkSuite()
sbs.run_all()
2 changes: 2 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,8 @@ develop = [
"sqlalchemy", # db
"threadpoolctl", # test_random
"tornado", # profiler, perspective, websocket
# benchmarks
"asv",
]
showgraph = [
"graphviz",
Expand Down

0 comments on commit 8790a83

Please sign in to comment.