Skip to content

Commit

Permalink
Merge pull request #355 from dyson-ai/feature/remove_plot_option
Browse files Browse the repository at this point in the history
Feature/remove plot option
  • Loading branch information
blooop authored May 8, 2024
2 parents 7f2b11c + 50c41a9 commit fa6a61e
Show file tree
Hide file tree
Showing 6 changed files with 16 additions and 18 deletions.
20 changes: 9 additions & 11 deletions bencher/bencher.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ def sweep_sequential(
group_size: int = 1,
iterations: int = 1,
relationship_cb=None,
plot=None,
plot_callbacks: List | bool = None,
) -> List[BenchResult]:
results = []
if relationship_cb is None:
Expand All @@ -233,7 +233,7 @@ def sweep_sequential(
result_vars=result_vars,
const_vars=const_vars,
run_cfg=run_cfg,
plot=plot,
plot_callbacks=plot_callbacks,
)

if optimise_var is not None:
Expand All @@ -253,8 +253,7 @@ def plot_sweep(
pass_repeat: bool = False,
tag: str = "",
run_cfg: BenchRunCfg = None,
plot: bool = None,
plot_callbacks=None,
plot_callbacks: List | bool = None,
) -> BenchResult:
"""The all in 1 function benchmarker and results plotter.
Expand All @@ -270,7 +269,7 @@ def plot_sweep(
you want the benchmark function to be passed the repeat number
tag (str,optional): Use tags to group different benchmarks together.
run_cfg: (BenchRunCfg, optional): A config for storing how the benchmarks and run
plot_callbacks: A list of plot callbacks to clal on the results
plot_callbacks: (List | bool) A list of plot callbacks to call on the results. Pass false or an empty list to turn off plotting
Raises:
ValueError: If a result variable is not set
Expand Down Expand Up @@ -329,8 +328,6 @@ def plot_sweep(
cv_list = list(const_vars[i])
cv_list[0] = self.convert_vars_to_params(cv_list[0], "const")
const_vars[i] = cv_list
if plot is None:
plot = self.plot

if run_cfg is None:
if self.run_cfg is None:
Expand Down Expand Up @@ -387,10 +384,12 @@ def plot_sweep(
)

if plot_callbacks is None:
if self.plot_callbacks is not None and len(self.plot_callbacks) == 0:
plot_callbacks = [] if self.plot_callbacks is None else self.plot_callbacks
else:
if isinstance(plot_callbacks, bool):
plot_callbacks = [BenchResult.to_auto_plots] if plot_callbacks else []
elif len(self.plot_callbacks) == 0:
plot_callbacks = [BenchResult.to_auto_plots]
else:
plot_callbacks = self.plot_callbacks

bench_cfg = BenchCfg(
input_vars=input_vars,
Expand All @@ -403,7 +402,6 @@ def plot_sweep(
title=title,
pass_repeat=pass_repeat,
tag=run_cfg.run_tag + tag,
auto_plot=plot,
plot_callbacks=plot_callbacks,
)
return self.run_sweep(bench_cfg, run_cfg, time_src)
Expand Down
2 changes: 1 addition & 1 deletion bencher/example/example_float_cat.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def example_float_cat(
title="Float 1D Cat 1D Example",
description="""Following from the previous example lets add another input parameter to see how that affects the output. We pass the boolean 'noisy' and keep the other parameters the same""",
post_description="Now the plot has two lines, one for each of the boolean values where noisy=true and noisy=false.",
plot=False,
plot_callbacks=False,
)

# report.append(bench.get_result().to_curve())
Expand Down
2 changes: 1 addition & 1 deletion bencher/example/meta/example_meta.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def __call__(self, **kwargs: Any) -> Any:
# result_vars=[BenchableObject.param.distance, BenchableObject.param.sample_noise],
# result_vars=[ BenchableObject.param.sample_noise],
# result_vars=[BenchableObject.param.result_hmap],
plot=False,
plot_callbacks=False,
)

self.plots = bch.ResultReference()
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "holobench"
version = "1.11.0"
version = "1.12.0"

authors = [{ name = "Austin Gregg-Smith", email = "blooop@gmail.com" }]
description = "A package for benchmarking the performance of arbitrary functions"
Expand Down
6 changes: 3 additions & 3 deletions test/test_bench_result_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,15 @@ def test_to_dataset(self):
input_vars=[BenchableObject.param.float1],
result_vars=[BenchableObject.param.distance, BenchableObject.param.sample_noise],
run_cfg=bch.BenchRunCfg(repeats=1),
plot=False,
plot_callbacks=False,
)

res_repeat2 = bench.plot_sweep(
"sweep2repeat",
input_vars=[BenchableObject.param.float1],
result_vars=[BenchableObject.param.distance, BenchableObject.param.sample_noise],
run_cfg=bch.BenchRunCfg(repeats=2),
plot=False,
plot_callbacks=False,
)

# print(res_repeat1.to_dataset())
Expand All @@ -57,7 +57,7 @@ def test_select_level(self):
res = bench.plot_sweep(
input_vars=["float_var", "cat_var"],
run_cfg=bch.BenchRunCfg(level=4),
plot=False,
plot_callbacks=False,
)

def asserts(ds, expected_float, expected_cat):
Expand Down
2 changes: 1 addition & 1 deletion test/test_sweep_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def test_setting_const(self) -> None:
"tst",
input_vars=[AllSweepVars.param.var_float.with_samples(3)],
const_vars=consts,
plot=False,
plot_callbacks=False,
)

consts_after = [i[0] for i in res.bench_cfg.const_vars]
Expand Down

0 comments on commit fa6a61e

Please sign in to comment.