Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] Setup benchmark suite #332

Draft
wants to merge 2 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,11 @@ csp/lib/
*.so
*.tsbuildinfo

# Benchmarks
.asv
ci/benchmarks/*
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

get some standardized machines before putting in raw data

!ci/benchmarks/benchmarks.json

# Jupyter / Editors
.ipynb_checkpoints
.autoversion
Expand Down
41 changes: 41 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,45 @@ dockerps: ## spin up docker compose services for adapter testing
dockerdown: ## spin up docker compose services for adapter testing
$(DOCKER) compose -f ci/$(ADAPTER)/docker-compose.yml down

##############
# BENCHMARKS #
##############
.PHONY: benchmark benchmarks benchmark-quick benchmarks-quick benchmark-local benchmarks-local benchmark-debug benchmarks-debug benchmark-regen benchmarks-regen benchmark-view benchmarks-view
benchmark: ## run benchmarks
python -m asv run --config csp/benchmarks/asv.conf.jsonc --verbose `git rev-parse --abbrev-ref HEAD`^!

benchmark-quick: ## run quick benchmark
python -m asv run --quick --config csp/benchmarks/asv.conf.jsonc --verbose `git rev-parse --abbrev-ref HEAD`^!

benchmark-local: ## run benchmark using the local env
python -m asv run --python=same --config csp/benchmarks/asv.conf.jsonc --verbose

benchmark-debug: ## debug a failing benchmark
if [ -z "${BENCHMARK_NAME}" ]; then echo 'Usage: make benchmark-debug BENCHMARK_NAME=<name of benchmark> [PARAM_INDEX=<index of param permutation>]'; exit 1; fi
if [ -z "${PARAM_INDEX}" ]; then \
python -m pdb -m asv.benchmark run csp/benchmarks ${BENCHMARK_NAME} "{}" debug_profile.txt debug_results.txt; \
else \
python -m pdb -m asv.benchmark run csp/benchmarks ${BENCHMARK_NAME}-${PARAM_INDEX} "{}" debug_profile.txt debug_results.txt; \
fi;

# https://github.com/airspeed-velocity/asv/issues/1027
# https://github.com/airspeed-velocity/asv/issues/488
benchmark-regen:
python -m asv run --config csp/benchmarks/asv.conf.jsonc --verbose v0.0.4^!
python -m asv run --config csp/benchmarks/asv.conf.jsonc --verbose v0.0.5^!

benchmark-view: ## generate viewable website of benchmark results
python -m asv publish --config csp/benchmarks/asv.conf.jsonc
python -m asv preview --config csp/benchmarks/asv.conf.jsonc

# Alias
benchmarks: benchmark
benchmarks-quick: benchmark-quick
benchmarks-local: benchmark-local
benchmarks-debug: benchmark-debug
benchmarks-regen: benchmark-regen
benchmarks-view: benchmark-view

###########
# VERSION #
###########
Expand Down Expand Up @@ -183,9 +222,11 @@ clean: ## clean the repository
ifneq ($(OS),Windows_NT)
rm -rf .coverage coverage cover htmlcov logs build dist wheelhouse *.egg-info
rm -rf csp/lib csp/bin csp/include _skbuild
rm -rf debug_*.txt
else
del /s /q .coverage coverage cover htmlcov logs build dist wheelhouse *.egg-info
del /s/ q csp\lib csp\bin csp\include _skbuild
del debug_*.txt
endif

################
Expand Down
25 changes: 25 additions & 0 deletions ci/benchmarks/benchmarks.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
{
"stats.basic.StatsBenchmarkSuite.time_stats": {
"code": "class StatsBenchmarkSuite:\n def time_stats(self, function):\n def g():\n data = csp.curve(typ=np.ndarray, data=self.data)\n value = getattr(csp.stats, function)(data, interval=self.interval, **self.function_args.get(function, {}))\n csp.add_graph_output(\"final_value\", value, tick_count=1)\n \n timer = Timer(\n lambda: csp.run(g, realtime=False, starttime=self.start_date, endtime=timedelta(seconds=self.num_rows))\n )\n elapsed = timer.timeit(1)\n return elapsed\n\n def setup(self, _):\n self.start_date = datetime(2020, 1, 1)\n self.num_rows = 1_000\n self.array_size = 100\n self.test_times = [self.start_date + timedelta(seconds=i) for i in range(self.num_rows)]\n self.random_values = [\n np.random.normal(size=(self.array_size,)) for i in range(self.num_rows)\n ] # 100 element np array\n self.data = list(zip(self.test_times, self.random_values))\n self.interval = 500",
"min_run_count": 2,
"name": "stats.basic.StatsBenchmarkSuite.time_stats",
"number": 0,
"param_names": [
"function"
],
"params": [
[
"'median'",
"'quantile'",
"'rank'"
]
],
"rounds": 2,
"sample_time": 0.01,
"type": "time",
"unit": "seconds",
"version": "f57f3ee288b0805597f9edee91b4d1dddf41046d34fbd46cfbd7135f459e62e3",
"warmup_time": -1
},
"version": 2
}
1 change: 1 addition & 0 deletions conda/dev-environment-unix.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ channels:
- conda-forge
- nodefaults
dependencies:
- asv
- bison
- brotli
- bump2version>=1
Expand Down
1 change: 1 addition & 0 deletions conda/dev-environment-win.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ channels:
- conda-forge
- nodefaults
dependencies:
- asv
- brotli
- bump2version>=1
- cmake
Expand Down
Empty file added csp/benchmarks/__init__.py
Empty file.
33 changes: 33 additions & 0 deletions csp/benchmarks/asv.conf.jsonc
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
// https://asv.readthedocs.io/en/v0.6.3/asv.conf.json.html
{
"version": 1,
"project": "csp",
"project_url": "https://github.com/Point72/csp",
"repo": "../..",
"branches": ["main", "tkp/bm"],
"dvcs": "git",

"install_command": ["in-dir={env_dir} python -mpip install {wheel_file}"],
"uninstall_command": ["return-code=any python -mpip uninstall -y {project}"],
"build_command": [
"python -m pip install build",
"python -m build --wheel -o {build_cache_dir} {build_dir}"
],
"environment_type": "virtualenv",
"install_timeout": 600,
"show_commit_url": "http://github.com/point72/csp/commit/",

"pythons": ["3.11"],

// "environment_type": "mamba",
// "conda_channels": ["conda-forge"],
// "conda_environment_file": "conda/dev-environment-unix.yml",

"benchmark_dir": "../../csp/benchmarks",
"env_dir": "../../.asv/env",
"results_dir": "../../ci/benchmarks",
"html_dir": "../../.asv/html",

"hash_length": 8,
"build_cache_size": 2
}
Empty file.
44 changes: 44 additions & 0 deletions csp/benchmarks/stats/basic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import numpy as np
from datetime import datetime, timedelta
from timeit import Timer

import csp

__all__ = ("StatsBenchmarkSuite",)


class StatsBenchmarkSuite:
"""
python -m csp.benchmarks.stats.basic
"""

params = (("median", "quantile", "rank"),)
param_names = ("function",)

rounds = 5
repeat = (100, 200, 60.0)

function_args = {"quantile": {"quant": 0.95}}

def setup(self, _):
self.start_date = datetime(2020, 1, 1)
self.num_rows = 1_000
self.array_size = 100
self.test_times = [self.start_date + timedelta(seconds=i) for i in range(self.num_rows)]
self.random_values = [
np.random.normal(size=(self.array_size,)) for i in range(self.num_rows)
] # 100 element np array
self.data = list(zip(self.test_times, self.random_values))
self.interval = 500

def time_stats(self, function):
def g():
data = csp.curve(typ=np.ndarray, data=self.data)
value = getattr(csp.stats, function)(data, interval=self.interval, **self.function_args.get(function, {}))
csp.add_graph_output("final_value", value, tick_count=1)

timer = Timer(
lambda: csp.run(g, realtime=False, starttime=self.start_date, endtime=timedelta(seconds=self.num_rows))
)
elapsed = timer.timeit(1)
return elapsed
2 changes: 2 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,8 @@ develop = [
"sqlalchemy", # db
"threadpoolctl", # test_random
"tornado", # profiler, perspective, websocket
# benchmarks
"asv",
]
showgraph = [
"graphviz",
Expand Down
Loading