From e7f2179ba7664a25b4ae336d113d05f1bdd9e427 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Fri, 14 Oct 2022 14:56:36 -0700 Subject: [PATCH 01/33] Add an auto_goal function and use it in the Runner --- adaptive/runner.py | 97 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 91 insertions(+), 6 deletions(-) diff --git a/adaptive/runner.py b/adaptive/runner.py index ecc7fe14b..9cb9732d4 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -13,6 +13,7 @@ import traceback import warnings from contextlib import suppress +from datetime import datetime, timedelta from typing import TYPE_CHECKING, Any, Callable import loky @@ -129,10 +130,11 @@ def __init__( shutdown_executor=False, retries=0, raise_if_retries_exceeded=True, + allow_running_forever=False, ): self.executor = _ensure_executor(executor) - self.goal = goal + self.goal = auto_goal(goal, learner, allow_running_forever) self._max_tasks = ntasks @@ -396,6 +398,7 @@ def __init__( shutdown_executor=shutdown_executor, retries=retries, raise_if_retries_exceeded=raise_if_retries_exceeded, + allow_running_forever=False, ) self._run() @@ -518,11 +521,6 @@ def __init__( raise_if_retries_exceeded=True, ): - if goal is None: - - def goal(_): - return False - if ( executor is None and _default_executor is concurrent.ProcessPoolExecutor @@ -548,6 +546,7 @@ def goal(_): shutdown_executor=shutdown_executor, retries=retries, raise_if_retries_exceeded=raise_if_retries_exceeded, + allow_running_forever=True, ) self.ioloop = ioloop or asyncio.get_event_loop() self.task = None @@ -861,3 +860,89 @@ def _get_ncores(ex): return ex._pool.size # not public API! else: raise TypeError(f"Cannot get number of cores for {ex.__class__}") + + +class _TimeGoal: + def __init__(self, dt: timedelta | datetime): + self.dt = dt + self.start_time = None + + def __call__(self, _): + if isinstance(self.dt, timedelta): + if self.start_time is None: + self.start_time = datetime.now() + return datetime.now() - self.start_time > self.dt + elif isinstance(self.dt, datetime): + return datetime.now() > self.dt + else: + raise TypeError(f"{self.dt=} is not a datetime or timedelta.") + + +def auto_goal( + goal: Callable[[BaseLearner], bool] | int | float | datetime | timedelta | None, + learner: BaseLearner, + allow_running_forever: bool = True, +): + """Extract a goal from the learners. + + Parameters + ---------- + goal + The goal to extract. Can be a callable, an integer, a float, a datetime, + a timedelta or None. + If it is a callable, it is returned as is. + If it is an integer, the goal is reached after that many points have been + returned. + If it is a float, the goal is reached when the learner has reached a loss + less than that. + If it is a datetime, the goal is reached when the current time is after the + datetime. + If it is a timedelta, the goal is reached when the current time is after + the start time plus that timedelta. + If it is None, and + - the learner type is `adaptive.SequenceLearner`, it continues until + it no more points to add + - the learner type is `adaptive.Integrator`, it continues until the + error is less than the tolerance. + - otherwise, it continues forever, unless `allow_running_forever` is + False, in which case it raises a ValueError. + learner + Learner for which to determine the goal. + allow_running_forever + If True, and the goal is None and the learner is not a SequenceLearner, + then a goal that never stops is returned, otherwise an exception is raised. + + Returns + ------- + Callable[[adaptive.BaseLearner], bool] + """ + from adaptive import BalancingLearner, IntegratorLearner, SequenceLearner + + if callable(goal): + return goal + if isinstance(goal, float): + return lambda learner: learner.loss() <= goal + if isinstance(learner, BalancingLearner): + # Note that the float loss goal is more efficiently implemented in the + # BalancingLearner itself. That is why the previous if statement is + # above this one. + goals = [auto_goal(goal, l, allow_running_forever) for l in learner.learners] + return lambda learner: all(goal(l) for l, goal in zip(learner.learners, goals)) + if isinstance(goal, int): + return lambda learner: learner.npoints >= goal + if isinstance(goal, (timedelta, datetime)): + return _TimeGoal(goal) + if goal is None: + if isinstance(learner, SequenceLearner): + return SequenceLearner.done + if isinstance(learner, IntegratorLearner): + return IntegratorLearner.done + warnings.warn("Goal is None which means the learners continue forever!") + if allow_running_forever: + return lambda _: False + else: + raise ValueError( + "Goal is None which means the learners" + " continue forever and this is not allowed." + ) + raise ValueError("Cannot determine goal from {goal}.") From 30bccb7d7d82928cedc0a3e0639c604f9326126c Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Fri, 14 Oct 2022 15:25:04 -0700 Subject: [PATCH 02/33] Use new auto goal functionality --- README.md | 2 +- adaptive/tests/test_average_learner.py | 2 +- adaptive/tests/test_balancing_learner.py | 6 +-- adaptive/tests/test_learnernd.py | 6 +-- docs/logo.py | 2 +- docs/source/logo.md | 2 +- .../tutorial/tutorial.AverageLearner.md | 4 +- .../tutorial/tutorial.BalancingLearner.md | 4 +- docs/source/tutorial/tutorial.Learner1D.md | 6 +-- docs/source/tutorial/tutorial.Learner2D.md | 2 +- docs/source/tutorial/tutorial.LearnerND.md | 2 +- .../tutorial/tutorial.advanced-topics.md | 14 ++--- docs/source/tutorial/tutorial.custom_loss.md | 6 +-- docs/source/tutorial/tutorial.parallelism.md | 10 ++-- example-notebook.ipynb | 51 ++++++++++--------- 15 files changed, 60 insertions(+), 59 deletions(-) diff --git a/README.md b/README.md index f5ad0b922..64a6a7e84 100644 --- a/README.md +++ b/README.md @@ -75,7 +75,7 @@ def peak(x, a=0.01): learner = Learner1D(peak, bounds=(-1, 1)) -runner = Runner(learner, goal=lambda l: l.loss() < 0.01) +runner = Runner(learner, goal=0.01) runner.live_info() runner.live_plot() ``` diff --git a/adaptive/tests/test_average_learner.py b/adaptive/tests/test_average_learner.py index 5de3ced45..519188dc4 100644 --- a/adaptive/tests/test_average_learner.py +++ b/adaptive/tests/test_average_learner.py @@ -61,7 +61,7 @@ def constant_function(seed): learner = AverageLearner( constant_function, atol=0.01, rtol=0.01, min_npoints=min_npoints ) - simple(learner, lambda l: l.loss() < 1) + simple(learner, 1.0) assert learner.npoints >= max(2, min_npoints) diff --git a/adaptive/tests/test_balancing_learner.py b/adaptive/tests/test_balancing_learner.py index 0cf3dcd6d..01e13cb53 100644 --- a/adaptive/tests/test_balancing_learner.py +++ b/adaptive/tests/test_balancing_learner.py @@ -52,10 +52,10 @@ def test_ask_0(strategy): @pytest.mark.parametrize( "strategy, goal", [ - ("loss", lambda l: l.loss() < 0.1), - ("loss_improvements", lambda l: l.loss() < 0.1), + ("loss", 0.1), + ("loss_improvements", 0.1), ("npoints", lambda bl: all(l.npoints > 10 for l in bl.learners)), - ("cycle", lambda l: l.loss() < 0.1), + ("cycle", 0.1), ], ) def test_strategies(strategy, goal): diff --git a/adaptive/tests/test_learnernd.py b/adaptive/tests/test_learnernd.py index 9ae359a8f..2c41fabee 100644 --- a/adaptive/tests/test_learnernd.py +++ b/adaptive/tests/test_learnernd.py @@ -33,8 +33,8 @@ def test_interior_vs_bbox_gives_same_result(): hull = scipy.spatial.ConvexHull(control._bounds_points) learner = LearnerND(f, bounds=hull) - simple(control, goal=lambda l: l.loss() < 0.1) - simple(learner, goal=lambda l: l.loss() < 0.1) + simple(control, goal=0.1) + simple(learner, goal=0.1) assert learner.data == control.data @@ -47,4 +47,4 @@ def test_vector_return_with_a_flat_layer(): h3 = lambda xy: np.array([0 * f(xy), g(xy)]) # noqa: E731 for function in [h1, h2, h3]: learner = LearnerND(function, bounds=[(-1, 1), (-1, 1)]) - simple(learner, goal=lambda l: l.loss() < 0.1) + simple(learner, goal=0.1) diff --git a/docs/logo.py b/docs/logo.py index 5ba52ed67..3b0a64031 100644 --- a/docs/logo.py +++ b/docs/logo.py @@ -22,7 +22,7 @@ def ring(xy): return x + np.exp(-((x**2 + y**2 - 0.75**2) ** 2) / a**4) learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)]) - adaptive.runner.simple(learner, goal=lambda l: l.loss() < 0.01) + adaptive.runner.simple(learner, goal=0.01) return learner diff --git a/docs/source/logo.md b/docs/source/logo.md index c70a8f98b..2f46d3920 100644 --- a/docs/source/logo.md +++ b/docs/source/logo.md @@ -110,7 +110,7 @@ def create_and_run_learner(): return x + np.exp(-((x**2 + y**2 - 0.75**2) ** 2) / a**4) learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)]) - adaptive.runner.simple(learner, goal=lambda l: l.loss() < 0.005) + adaptive.runner.simple(learner, goal=0.005) return learner diff --git a/docs/source/tutorial/tutorial.AverageLearner.md b/docs/source/tutorial/tutorial.AverageLearner.md index 5bdd5ae27..ad66398fb 100644 --- a/docs/source/tutorial/tutorial.AverageLearner.md +++ b/docs/source/tutorial/tutorial.AverageLearner.md @@ -45,8 +45,8 @@ def g(n): ```{code-cell} ipython3 learner = adaptive.AverageLearner(g, atol=None, rtol=0.01) -# `loss < 1` means that we reached the `rtol` or `atol` -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 1) +# `loss < 1.0` means that we reached the `rtol` or `atol` +runner = adaptive.Runner(learner, goal=1.0) ``` ```{code-cell} ipython3 diff --git a/docs/source/tutorial/tutorial.BalancingLearner.md b/docs/source/tutorial/tutorial.BalancingLearner.md index 4276dd0d4..c9f17ca24 100644 --- a/docs/source/tutorial/tutorial.BalancingLearner.md +++ b/docs/source/tutorial/tutorial.BalancingLearner.md @@ -46,7 +46,7 @@ learners = [ ] bal_learner = adaptive.BalancingLearner(learners) -runner = adaptive.Runner(bal_learner, goal=lambda l: l.loss() < 0.01) +runner = adaptive.Runner(bal_learner, goal=0.01) ``` ```{code-cell} ipython3 @@ -86,7 +86,7 @@ learner = adaptive.BalancingLearner.from_product( jacobi, adaptive.Learner1D, dict(bounds=(0, 1)), combos ) -runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01) +runner = adaptive.BlockingRunner(learner, goal=0.01) # The `cdims` will automatically be set when using `from_product`, so # `plot()` will return a HoloMap with correctly labeled sliders. diff --git a/docs/source/tutorial/tutorial.Learner1D.md b/docs/source/tutorial/tutorial.Learner1D.md index 60cb8aac5..02b3f1bf8 100644 --- a/docs/source/tutorial/tutorial.Learner1D.md +++ b/docs/source/tutorial/tutorial.Learner1D.md @@ -65,7 +65,7 @@ A {class}`~concurrent.futures.ProcessPoolExecutor` cannot be used on Windows for ```{code-cell} ipython3 # The end condition is when the "loss" is less than 0.1. In the context of the # 1D learner this means that we will resolve features in 'func' with width 0.1 or wider. -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) +runner = adaptive.Runner(learner, goal=0.01) ``` ```{code-cell} ipython3 @@ -124,7 +124,7 @@ The `Learner1D` can be used for such functions: ```{code-cell} ipython3 learner = adaptive.Learner1D(f_levels, bounds=(-1, 1)) -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) +runner = adaptive.Runner(learner, goal=0.01) ``` ```{code-cell} ipython3 @@ -156,7 +156,7 @@ from adaptive.learner.learner1D import ( curvature_loss = curvature_loss_function() learner = adaptive.Learner1D(f, bounds=(-1, 1), loss_per_interval=curvature_loss) -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) +runner = adaptive.Runner(learner, goal=0.01) ``` ```{code-cell} ipython3 diff --git a/docs/source/tutorial/tutorial.Learner2D.md b/docs/source/tutorial/tutorial.Learner2D.md index c2f6ddba5..05107de85 100644 --- a/docs/source/tutorial/tutorial.Learner2D.md +++ b/docs/source/tutorial/tutorial.Learner2D.md @@ -46,7 +46,7 @@ learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)]) ``` ```{code-cell} ipython3 -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) +runner = adaptive.Runner(learner, goal=0.01) ``` ```{code-cell} ipython3 diff --git a/docs/source/tutorial/tutorial.LearnerND.md b/docs/source/tutorial/tutorial.LearnerND.md index aca8f187e..dae8ab2ec 100644 --- a/docs/source/tutorial/tutorial.LearnerND.md +++ b/docs/source/tutorial/tutorial.LearnerND.md @@ -50,7 +50,7 @@ def sphere(xyz): learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)]) -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 1e-3) +runner = adaptive.Runner(learner, goal=1e-3) ``` ```{code-cell} ipython3 diff --git a/docs/source/tutorial/tutorial.advanced-topics.md b/docs/source/tutorial/tutorial.advanced-topics.md index bcea65792..8d53e44c0 100644 --- a/docs/source/tutorial/tutorial.advanced-topics.md +++ b/docs/source/tutorial/tutorial.advanced-topics.md @@ -51,7 +51,7 @@ learner = adaptive.Learner1D(f, bounds=(-1, 1)) control = adaptive.Learner1D(f, bounds=(-1, 1)) # Let's only run the learner -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) +runner = adaptive.Runner(learner, goal=0.01) ``` ```{code-cell} ipython3 @@ -134,7 +134,7 @@ The simplest way to accomplish this is to use {class}`adaptive.BlockingRunner`: ```{code-cell} ipython3 learner = adaptive.Learner1D(f, bounds=(-1, 1)) -adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01) +adaptive.BlockingRunner(learner, goal=0.01) # This will only get run after the runner has finished learner.plot() ``` @@ -155,7 +155,7 @@ The simplest way is to use {class}`adaptive.runner.simple` to run your learner: learner = adaptive.Learner1D(f, bounds=(-1, 1)) # blocks until completion -adaptive.runner.simple(learner, goal=lambda l: l.loss() < 0.01) +adaptive.runner.simple(learner, goal=0.01) learner.plot() ``` @@ -169,7 +169,7 @@ from adaptive.runner import SequentialExecutor learner = adaptive.Learner1D(f, bounds=(-1, 1)) runner = adaptive.Runner( - learner, executor=SequentialExecutor(), goal=lambda l: l.loss() < 0.01 + learner, executor=SequentialExecutor(), goal=0.01 ) ``` @@ -292,7 +292,7 @@ One way to inspect runners is to instantiate one with `log=True`: ```{code-cell} ipython3 learner = adaptive.Learner1D(f, bounds=(-1, 1)) -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01, log=True) +runner = adaptive.Runner(learner, goal=0.01, log=True) ``` ```{code-cell} ipython3 @@ -351,7 +351,7 @@ async def time(runner): ioloop = asyncio.get_event_loop() learner = adaptive.Learner1D(f, bounds=(-1, 1)) -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) +runner = adaptive.Runner(learner, goal=0.01) timer = ioloop.create_task(time(runner)) ``` @@ -462,7 +462,7 @@ def f(x): learner = adaptive.Learner1D(f, (-1, 1)) -adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.1) +adaptive.BlockingRunner(learner, goal=0.1) ``` If you use `asyncio` already in your script and want to integrate `adaptive` into it, then you can use the default {class}`~adaptive.Runner` as you would from a notebook. diff --git a/docs/source/tutorial/tutorial.custom_loss.md b/docs/source/tutorial/tutorial.custom_loss.md index be1f78669..73ff707b7 100644 --- a/docs/source/tutorial/tutorial.custom_loss.md +++ b/docs/source/tutorial/tutorial.custom_loss.md @@ -74,7 +74,7 @@ def f_divergent_1d(x): learner = adaptive.Learner1D( f_divergent_1d, (-1, 1), loss_per_interval=uniform_sampling_1d ) -runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01) +runner = adaptive.BlockingRunner(learner, goal=0.01) learner.plot().select(y=(0, 10000)) ``` @@ -99,7 +99,7 @@ learner = adaptive.Learner2D( ) # this takes a while, so use the async Runner so we know *something* is happening -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.03 or l.npoints > 1000) +runner = adaptive.Runner(learner, goal= l.loss() < 0.03 or l.npoints > 1000) ``` ```{code-cell} ipython3 @@ -154,7 +154,7 @@ def resolution_loss_function(min_distance=0, max_distance=1): loss = resolution_loss_function(min_distance=0.01) learner = adaptive.Learner2D(f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=loss) -runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.02) +runner = adaptive.BlockingRunner(learner, goal=0.02) learner.plot(tri_alpha=0.3).relabel("1 / (x^2 + y^2) in log scale").opts( hv.opts.EdgePaths(color="w"), hv.opts.Image(logz=True, colorbar=True) ) diff --git a/docs/source/tutorial/tutorial.parallelism.md b/docs/source/tutorial/tutorial.parallelism.md index f3c1985f6..bc71d909c 100644 --- a/docs/source/tutorial/tutorial.parallelism.md +++ b/docs/source/tutorial/tutorial.parallelism.md @@ -24,7 +24,7 @@ from concurrent.futures import ProcessPoolExecutor executor = ProcessPoolExecutor(max_workers=4) learner = adaptive.Learner1D(f, bounds=(-1, 1)) -runner = adaptive.Runner(learner, executor=executor, goal=lambda l: l.loss() < 0.05) +runner = adaptive.Runner(learner, executor=executor, goal=0.05) runner.live_info() runner.live_plot(update_interval=0.1) ``` @@ -37,7 +37,7 @@ import ipyparallel client = ipyparallel.Client() # You will need to start an `ipcluster` to make this work learner = adaptive.Learner1D(f, bounds=(-1, 1)) -runner = adaptive.Runner(learner, executor=client, goal=lambda l: l.loss() < 0.01) +runner = adaptive.Runner(learner, executor=client, goal=0.01) runner.live_info() runner.live_plot() ``` @@ -52,7 +52,7 @@ import distributed client = distributed.Client() learner = adaptive.Learner1D(f, bounds=(-1, 1)) -runner = adaptive.Runner(learner, executor=client, goal=lambda l: l.loss() < 0.01) +runner = adaptive.Runner(learner, executor=client, goal=0.01) runner.live_info() runner.live_plot(update_interval=0.1) ``` @@ -80,7 +80,7 @@ if __name__ == "__main__": learner, executor=MPIPoolExecutor(), shutdown_executor=True, - goal=lambda l: l.loss() < 0.01, + goal=0.01, ) # periodically save the data (in case the job dies) @@ -132,6 +132,6 @@ ex = get_reusable_executor() f = lambda x: x learner = adaptive.Learner1D(f, bounds=(-1, 1)) -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01, executor=ex) +runner = adaptive.Runner(learner, goal=0.01, executor=ex) runner.live_info() ``` diff --git a/example-notebook.ipynb b/example-notebook.ipynb index b00ac622c..e910874cc 100644 --- a/example-notebook.ipynb +++ b/example-notebook.ipynb @@ -106,7 +106,7 @@ "source": [ "# The end condition is when the \"loss\" is less than 0.01. In the context of the\n", "# 1D learner this means that we will resolve features in 'func' with width 0.01 or wider.\n", - "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)\n", + "runner = adaptive.Runner(learner, goal=0.01)\n", "runner.live_info()" ] }, @@ -194,7 +194,7 @@ "metadata": {}, "outputs": [], "source": [ - "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)\n", + "runner = adaptive.Runner(learner, goal=0.01)\n", "runner.live_info()" ] }, @@ -279,7 +279,7 @@ "outputs": [], "source": [ "learner = adaptive.AverageLearner(g, atol=None, rtol=0.01)\n", - "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 2)\n", + "runner = adaptive.Runner(learner, goal=2.0)\n", "runner.live_info()" ] }, @@ -465,7 +465,7 @@ "# We use a SequentialExecutor, which runs the function to be learned in *this* process only.\n", "# This means we don't pay the overhead of evaluating the function in another process.\n", "executor = SequentialExecutor()\n", - "runner = adaptive.Runner(learner, executor=executor, goal=lambda l: l.done())\n", + "runner = adaptive.Runner(learner, executor=executor)\n", "runner.live_info()" ] }, @@ -535,7 +535,7 @@ "outputs": [], "source": [ "learner = adaptive.Learner1D(f_levels, bounds=(-1, 1))\n", - "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)\n", + "runner = adaptive.Runner(learner, goal=0.01)\n", "runner.live_info()" ] }, @@ -564,7 +564,6 @@ "metadata": {}, "outputs": [], "source": [ - "# this step takes a lot of time, it will finish at about 3300 points, which can take up to 6 minutes\n", "def sphere(xyz):\n", " x, y, z = xyz\n", " a = 0.4\n", @@ -572,7 +571,7 @@ "\n", "\n", "learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)])\n", - "runner = adaptive.Runner(learner, goal=lambda l: l.npoints > 2000)\n", + "runner = adaptive.Runner(learner, goal=2000)\n", "runner.live_info()" ] }, @@ -675,13 +674,15 @@ "\n", "\n", "def f_divergent_1d(x):\n", + " if x == 0:\n", + " return np.inf\n", " return 1 / x**2\n", "\n", "\n", "learner = adaptive.Learner1D(\n", " f_divergent_1d, (-1, 1), loss_per_interval=uniform_sampling_1d\n", ")\n", - "runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01)\n", + "runner = adaptive.BlockingRunner(learner, goal=0.01)\n", "learner.plot().select(y=(0, 10000))" ] }, @@ -718,7 +719,7 @@ ")\n", "\n", "# this takes a while, so use the async Runner so we know *something* is happening\n", - "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.02)\n", + "runner = adaptive.Runner(learner, goal=0.02)\n", "runner.live_info()\n", "runner.live_plot(update_interval=0.2, plotter=plot_logz)" ] @@ -780,7 +781,7 @@ "loss = partial(resolution_loss, min_distance=0.01)\n", "\n", "learner = adaptive.Learner2D(f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=loss)\n", - "runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.02)\n", + "runner = adaptive.BlockingRunner(learner, goal=0.02)\n", "plot_logz(learner)" ] }, @@ -826,7 +827,7 @@ "]\n", "\n", "bal_learner = adaptive.BalancingLearner(learners)\n", - "runner = adaptive.Runner(bal_learner, goal=lambda l: l.loss() < 0.01)\n", + "runner = adaptive.Runner(bal_learner, goal=0.01)\n", "runner.live_info()" ] }, @@ -870,7 +871,7 @@ " jacobi, adaptive.Learner1D, dict(bounds=(0, 1)), combos\n", ")\n", "\n", - "runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01)\n", + "runner = adaptive.BlockingRunner(learner, goal=0.01)\n", "\n", "# The `cdims` will automatically be set when using `from_product`, so\n", "# `plot()` will return a HoloMap with correctly labeled sliders.\n", @@ -935,7 +936,7 @@ "metadata": {}, "outputs": [], "source": [ - "runner = adaptive.Runner(learner, goal=lambda l: l.learner.loss() < 0.05)\n", + "runner = adaptive.Runner(learner, goal=0.05)\n", "runner.live_info()" ] }, @@ -1005,7 +1006,7 @@ " acq_func=\"gp_hedge\",\n", " acq_optimizer=\"lbfgs\",\n", ")\n", - "runner = adaptive.Runner(learner, ntasks=1, goal=lambda l: l.npoints > 40)\n", + "runner = adaptive.Runner(learner, ntasks=1, goal=40)\n", "runner.live_info()" ] }, @@ -1063,7 +1064,7 @@ "executor = ProcessPoolExecutor(max_workers=4)\n", "\n", "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n", - "runner = adaptive.Runner(learner, executor=executor, goal=lambda l: l.loss() < 0.05)\n", + "runner = adaptive.Runner(learner, executor=executor, goal=0.05)\n", "runner.live_info()\n", "runner.live_plot(update_interval=0.1)" ] @@ -1086,7 +1087,7 @@ "client = ipyparallel.Client() # You will need to start an `ipcluster` to make this work\n", "\n", "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n", - "runner = adaptive.Runner(learner, executor=client, goal=lambda l: l.loss() < 0.01)\n", + "runner = adaptive.Runner(learner, executor=client, goal=0.01)\n", "runner.live_info()\n", "runner.live_plot()" ] @@ -1111,7 +1112,7 @@ "client = distributed.Client()\n", "\n", "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n", - "runner = adaptive.Runner(learner, executor=client, goal=lambda l: l.loss() < 0.01)\n", + "runner = adaptive.Runner(learner, executor=client, goal=0.01)\n", "runner.live_info()\n", "runner.live_plot(update_interval=0.1)" ] @@ -1163,7 +1164,7 @@ "control = adaptive.Learner1D(partial(peak, wait=False), bounds=(-1, 1))\n", "\n", "# Let's only run the learner\n", - "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)\n", + "runner = adaptive.Runner(learner, goal=0.01)\n", "runner.live_info()" ] }, @@ -1239,7 +1240,7 @@ "\n", "\n", "learner = adaptive.Learner1D(slow_f, bounds=[0, 1])\n", - "runner = adaptive.Runner(learner, goal=lambda l: l.npoints > 100)\n", + "runner = adaptive.Runner(learner, goal=100)\n", "\n", "runner.start_periodic_saving(\n", " save_kwargs=dict(fname=\"data/periodic_example.p\"), interval=6\n", @@ -1335,7 +1336,7 @@ "outputs": [], "source": [ "learner = adaptive.Learner1D(partial(peak, wait=False), bounds=(-1, 1))\n", - "adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.005)\n", + "adaptive.BlockingRunner(learner, goal=0.005)\n", "# This will only get run after the runner has finished\n", "learner.plot()" ] @@ -1369,7 +1370,7 @@ "learner = adaptive.Learner1D(partial(peak, wait=False), bounds=(-1, 1))\n", "\n", "# blocks until completion\n", - "adaptive.runner.simple(learner, goal=lambda l: l.loss() < 0.002)\n", + "adaptive.runner.simple(learner, goal=0.002)\n", "\n", "learner.plot()" ] @@ -1394,7 +1395,7 @@ "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n", "\n", "runner = adaptive.Runner(\n", - " learner, executor=SequentialExecutor(), goal=lambda l: l.loss() < 0.002\n", + " learner, executor=SequentialExecutor(), goal=0.002\n", ")\n", "runner.live_info()\n", "runner.live_plot(update_interval=0.1)" @@ -1543,7 +1544,7 @@ "outputs": [], "source": [ "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n", - "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.1, log=True)\n", + "runner = adaptive.Runner(learner, goal=0.1, log=True)\n", "runner.live_info()" ] }, @@ -1628,7 +1629,7 @@ "ioloop = asyncio.get_event_loop()\n", "\n", "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n", - "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.1)\n", + "runner = adaptive.Runner(learner, goal=0.1)\n", "\n", "timer = ioloop.create_task(time(runner))\n", "runner.live_info()" @@ -1667,7 +1668,7 @@ "\n", "learner = adaptive.Learner1D(peak, (-1, 1))\n", "\n", - "adaptive.BlockingRunner(learner, goal=lambda: l: l.loss() < 0.1)\n", + "adaptive.BlockingRunner(learner, goal=0.1)\n", "```\n", "\n", "If you use `asyncio` already in your script and want to integrate `adaptive` into it, then you can use the default `Runner` as you would from a notebook. If you want to wait for the runner to finish, then you can simply\n", From d4359a474cdfe9f75b8bb30ab64f02bf56d319aa Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Fri, 14 Oct 2022 15:25:58 -0700 Subject: [PATCH 03/33] Replace point goals --- adaptive/tests/test_learner1d.py | 10 +++++----- adaptive/tests/test_runner.py | 4 ++-- adaptive/tests/unit/test_learnernd_integration.py | 8 ++++---- docs/source/tutorial/tutorial.Learner1D.md | 2 +- docs/source/tutorial/tutorial.LearnerND.md | 2 +- docs/source/tutorial/tutorial.SKOptLearner.md | 2 +- docs/source/tutorial/tutorial.advanced-topics.md | 2 +- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/adaptive/tests/test_learner1d.py b/adaptive/tests/test_learner1d.py index 7e990bd7b..b7a917a4f 100644 --- a/adaptive/tests/test_learner1d.py +++ b/adaptive/tests/test_learner1d.py @@ -298,7 +298,7 @@ def test_equal(l1, l2): for function in [f, f_vec]: learner = Learner1D(function, bounds=(-1, 1)) learner2 = Learner1D(function, bounds=(-1, 1)) - simple(learner, goal=lambda l: l.npoints > 200) + simple(learner, goal=200) xs, ys = zip(*learner.data.items()) # Make the scale huge to no get a scale doubling @@ -374,7 +374,7 @@ def f(x): loss = curvature_loss_function() assert loss.nth_neighbors == 1 learner = Learner1D(f, (-1, 1), loss_per_interval=loss) - simple(learner, goal=lambda l: l.npoints > 100) + simple(learner, goal=100) assert learner.npoints > 100 @@ -385,7 +385,7 @@ def f(x): loss = curvature_loss_function() assert loss.nth_neighbors == 1 learner = Learner1D(f, (-1, 1), loss_per_interval=loss) - simple(learner, goal=lambda l: l.npoints > 100) + simple(learner, goal=100) assert learner.npoints > 100 @@ -398,7 +398,7 @@ def f(x): return x + a**2 / (a**2 + x**2) learner = Learner1D(f, bounds=(-1, 1)) - simple(learner, lambda l: l.npoints > 100) + simple(learner, 100) def test_inf_loss_with_missing_bounds(): @@ -408,6 +408,6 @@ def test_inf_loss_with_missing_bounds(): loss_per_interval=curvature_loss_function(), ) # must be done in parallel because otherwise the bounds will be evaluated first - BlockingRunner(learner, goal=lambda learner: learner.loss() < 0.01) + BlockingRunner(learner, goal=0.01) learner.npoints > 20 diff --git a/adaptive/tests/test_runner.py b/adaptive/tests/test_runner.py index 169c38431..51dc777d9 100644 --- a/adaptive/tests/test_runner.py +++ b/adaptive/tests/test_runner.py @@ -43,7 +43,7 @@ def f(x): return x learner = Learner1D(f, (-1, 1)) - runner(learner, lambda l: l.npoints > 10) + runner(learner, 10) assert len(learner.data) > 10 @@ -152,5 +152,5 @@ def test_loky_executor(loky_executor): def test_default_executor(): learner = Learner1D(linear, (-1, 1)) - runner = AsyncRunner(learner, goal=lambda l: l.npoints > 10) + runner = AsyncRunner(learner, goal=10) asyncio.get_event_loop().run_until_complete(runner.task) diff --git a/adaptive/tests/unit/test_learnernd_integration.py b/adaptive/tests/unit/test_learnernd_integration.py index ce3482e9c..3cbd132b0 100644 --- a/adaptive/tests/unit/test_learnernd_integration.py +++ b/adaptive/tests/unit/test_learnernd_integration.py @@ -16,21 +16,21 @@ def ring_of_fire(xy, d=0.75): def test_learnerND_runs_to_10_points(): learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)]) - SimpleRunner(learner, goal=lambda l: l.npoints >= 10) + SimpleRunner(learner, goal=10) assert learner.npoints == 10 @pytest.mark.parametrize("execution_number", range(5)) def test_learnerND_runs_to_10_points_Blocking(execution_number): learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)]) - BlockingRunner(learner, goal=lambda l: l.npoints >= 10) + BlockingRunner(learner, goal=10) assert learner.npoints >= 10 def test_learnerND_curvature_runs_to_10_points(): loss = curvature_loss_function() learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)], loss_per_simplex=loss) - SimpleRunner(learner, goal=lambda l: l.npoints >= 10) + SimpleRunner(learner, goal=10) assert learner.npoints == 10 @@ -38,7 +38,7 @@ def test_learnerND_curvature_runs_to_10_points(): def test_learnerND_curvature_runs_to_10_points_Blocking(execution_number): loss = curvature_loss_function() learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)], loss_per_simplex=loss) - BlockingRunner(learner, goal=lambda l: l.npoints >= 10) + BlockingRunner(learner, goal=10) assert learner.npoints >= 10 diff --git a/docs/source/tutorial/tutorial.Learner1D.md b/docs/source/tutorial/tutorial.Learner1D.md index 02b3f1bf8..4e3629ac3 100644 --- a/docs/source/tutorial/tutorial.Learner1D.md +++ b/docs/source/tutorial/tutorial.Learner1D.md @@ -187,7 +187,7 @@ learner_h = adaptive.Learner1D(sin_exp, (-1, 1), loss_per_interval=uniform_loss) learner_1 = adaptive.Learner1D(sin_exp, (-1, 1), loss_per_interval=default_loss) learner_2 = adaptive.Learner1D(sin_exp, (-1, 1), loss_per_interval=curvature_loss) -npoints_goal = lambda l: l.npoints >= 100 +npoints_goal = 100 # adaptive.runner.simple is a non parallel blocking runner. adaptive.runner.simple(learner_h, goal=npoints_goal) adaptive.runner.simple(learner_1, goal=npoints_goal) diff --git a/docs/source/tutorial/tutorial.LearnerND.md b/docs/source/tutorial/tutorial.LearnerND.md index dae8ab2ec..cb72f5a23 100644 --- a/docs/source/tutorial/tutorial.LearnerND.md +++ b/docs/source/tutorial/tutorial.LearnerND.md @@ -123,7 +123,7 @@ b = [(-1, -1, -1), (-1, 1, -1), (-1, -1, 1), (-1, 1, 1), (1, 1, -1), (1, -1, -1) hull = scipy.spatial.ConvexHull(b) learner = adaptive.LearnerND(f, hull) -adaptive.BlockingRunner(learner, goal=lambda l: l.npoints > 2000) +adaptive.BlockingRunner(learner, goal=2000) learner.plot_isosurface(-0.5) ``` diff --git a/docs/source/tutorial/tutorial.SKOptLearner.md b/docs/source/tutorial/tutorial.SKOptLearner.md index fb82bca17..742af5cd5 100644 --- a/docs/source/tutorial/tutorial.SKOptLearner.md +++ b/docs/source/tutorial/tutorial.SKOptLearner.md @@ -47,7 +47,7 @@ learner = adaptive.SKOptLearner( acq_func="gp_hedge", acq_optimizer="lbfgs", ) -runner = adaptive.Runner(learner, ntasks=1, goal=lambda l: l.npoints > 40) +runner = adaptive.Runner(learner, ntasks=1, goal=40) ``` ```{code-cell} ipython3 diff --git a/docs/source/tutorial/tutorial.advanced-topics.md b/docs/source/tutorial/tutorial.advanced-topics.md index 8d53e44c0..cb6a2c828 100644 --- a/docs/source/tutorial/tutorial.advanced-topics.md +++ b/docs/source/tutorial/tutorial.advanced-topics.md @@ -90,7 +90,7 @@ def slow_f(x): learner = adaptive.Learner1D(slow_f, bounds=[0, 1]) -runner = adaptive.Runner(learner, goal=lambda l: l.npoints > 100) +runner = adaptive.Runner(learner, goal=100) runner.start_periodic_saving( save_kwargs=dict(fname="data/periodic_example.p"), interval=6 ) From 17650f126076373d5200947580d997f7478aaf4f Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Fri, 14 Oct 2022 15:29:25 -0700 Subject: [PATCH 04/33] use auto_goal in runner.simple --- adaptive/runner.py | 1 + 1 file changed, 1 insertion(+) diff --git a/adaptive/runner.py b/adaptive/runner.py index 9cb9732d4..916c0d9ca 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -738,6 +738,7 @@ def simple(learner, goal): The end condition for the calculation. This function must take the learner as its sole argument, and return True if we should stop. """ + goal = auto_goal(goal, learner) while not goal(learner): xs, _ = learner.ask(1) for x in xs: From 8fc4db79bc7d66d1b74b02964dbbc384503cabe2 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Fri, 14 Oct 2022 15:37:04 -0700 Subject: [PATCH 05/33] fix comment --- docs/source/tutorial/tutorial.Learner1D.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/tutorial/tutorial.Learner1D.md b/docs/source/tutorial/tutorial.Learner1D.md index 4e3629ac3..c524ca439 100644 --- a/docs/source/tutorial/tutorial.Learner1D.md +++ b/docs/source/tutorial/tutorial.Learner1D.md @@ -63,8 +63,8 @@ On Windows systems the runner will use a {class}`loky.get_reusable_executor`. A {class}`~concurrent.futures.ProcessPoolExecutor` cannot be used on Windows for reasons. ```{code-cell} ipython3 -# The end condition is when the "loss" is less than 0.1. In the context of the -# 1D learner this means that we will resolve features in 'func' with width 0.1 or wider. +# The end condition is when the "loss" is less than 0.01. In the context of the +# 1D learner this means that we will resolve features in 'func' with width 0.01 or wider. runner = adaptive.Runner(learner, goal=0.01) ``` From 3e68f44b1bf28cf89850b582cc509d48cb35db5a Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Fri, 14 Oct 2022 15:38:31 -0700 Subject: [PATCH 06/33] fix doc-string --- adaptive/runner.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/adaptive/runner.py b/adaptive/runner.py index 916c0d9ca..3f528f2a5 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -903,8 +903,8 @@ def auto_goal( If it is None, and - the learner type is `adaptive.SequenceLearner`, it continues until it no more points to add - - the learner type is `adaptive.Integrator`, it continues until the - error is less than the tolerance. + - the learner type is `adaptive.IntegratorLearner`, it continues until the + error is less than the tolerance specified in the learner. - otherwise, it continues forever, unless `allow_running_forever` is False, in which case it raises a ValueError. learner From 0836e68cba46298742c25ccc2032d09d31efc793 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Fri, 14 Oct 2022 15:40:50 -0700 Subject: [PATCH 07/33] =?UTF-8?q?Fixes=20regarding=20>=20vs=20=E2=89=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- adaptive/runner.py | 2 +- adaptive/tests/test_learner1d.py | 4 ++-- adaptive/tests/test_runner.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/adaptive/runner.py b/adaptive/runner.py index 3f528f2a5..2161d70dd 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -895,7 +895,7 @@ def auto_goal( If it is an integer, the goal is reached after that many points have been returned. If it is a float, the goal is reached when the learner has reached a loss - less than that. + equal or less than that. If it is a datetime, the goal is reached when the current time is after the datetime. If it is a timedelta, the goal is reached when the current time is after diff --git a/adaptive/tests/test_learner1d.py b/adaptive/tests/test_learner1d.py index b7a917a4f..f006abf53 100644 --- a/adaptive/tests/test_learner1d.py +++ b/adaptive/tests/test_learner1d.py @@ -375,7 +375,7 @@ def f(x): assert loss.nth_neighbors == 1 learner = Learner1D(f, (-1, 1), loss_per_interval=loss) simple(learner, goal=100) - assert learner.npoints > 100 + assert learner.npoints >= 100 def test_curvature_loss_vectors(): @@ -386,7 +386,7 @@ def f(x): assert loss.nth_neighbors == 1 learner = Learner1D(f, (-1, 1), loss_per_interval=loss) simple(learner, goal=100) - assert learner.npoints > 100 + assert learner.npoints >= 100 def test_NaN_loss(): diff --git a/adaptive/tests/test_runner.py b/adaptive/tests/test_runner.py index 51dc777d9..295f9601e 100644 --- a/adaptive/tests/test_runner.py +++ b/adaptive/tests/test_runner.py @@ -44,7 +44,7 @@ def f(x): learner = Learner1D(f, (-1, 1)) runner(learner, 10) - assert len(learner.data) > 10 + assert len(learner.data) >= 10 @pytest.mark.parametrize("runner", runners) From fbc686c75bdb6951e0e2487d3944b2a4bb2eadd3 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Fri, 14 Oct 2022 15:56:14 -0700 Subject: [PATCH 08/33] Fix docstring formatting --- adaptive/runner.py | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/adaptive/runner.py b/adaptive/runner.py index 2161d70dd..2443cd2fc 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -891,22 +891,23 @@ def auto_goal( goal The goal to extract. Can be a callable, an integer, a float, a datetime, a timedelta or None. - If it is a callable, it is returned as is. - If it is an integer, the goal is reached after that many points have been - returned. - If it is a float, the goal is reached when the learner has reached a loss - equal or less than that. - If it is a datetime, the goal is reached when the current time is after the - datetime. - If it is a timedelta, the goal is reached when the current time is after - the start time plus that timedelta. - If it is None, and - - the learner type is `adaptive.SequenceLearner`, it continues until - it no more points to add - - the learner type is `adaptive.IntegratorLearner`, it continues until the - error is less than the tolerance specified in the learner. - - otherwise, it continues forever, unless `allow_running_forever` is - False, in which case it raises a ValueError. + If the type of `goal` is: + + * ``callable``, it is returned as is. + * ``int``, the goal is reached after that many points have been added. + * ``float``, the goal is reached when the learner has reached a loss + equal or less than that. + * `datetime.datetime`, the goal is reached when the current time is after the + datetime. + * `datetime.timedelta`, the goal is reached when the current time is after + the start time plus that timedelta. + * ``None`` and + * the learner type is `adaptive.SequenceLearner`, it continues until + it no more points to add + * the learner type is `adaptive.IntegratorLearner`, it continues until the + error is less than the tolerance specified in the learner. + * otherwise, it continues forever, unless ``allow_running_forever`` is + False, in which case it raises a ValueError. learner Learner for which to determine the goal. allow_running_forever From d7f6b9ceef22565ed513e2eb81d60a7fa1a9259a Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Fri, 14 Oct 2022 15:56:27 -0700 Subject: [PATCH 09/33] Add auto_goal to the docs --- docs/source/reference/adaptive.runner.extras.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/docs/source/reference/adaptive.runner.extras.md b/docs/source/reference/adaptive.runner.extras.md index 510c17ec5..cb8cb1214 100644 --- a/docs/source/reference/adaptive.runner.extras.md +++ b/docs/source/reference/adaptive.runner.extras.md @@ -6,7 +6,14 @@ Runners allow you to specify the stopping criterion by providing a `goal` as a function that takes the learner and returns a boolean: `False` for "continue running" and `True` for "stop". This gives you a lot of flexibility for defining your own stopping conditions, however we also provide some common -stopping conditions as a convenience. +stopping conditions as a convenience. The `adaptive.runner.auto_goal` will +automatically create a goal based on simple input types, e.g., an int means +at least that many points are required and a float means that the loss has +to become lower or equal to that float. + +```{eval-rst} +.. autofunction:: adaptive.runner.auto_goal +``` ```{eval-rst} .. autofunction:: adaptive.runner.stop_after From 84808a1beec75f0cc54b6c12a42b2b62cfb7c7b0 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Fri, 14 Oct 2022 15:58:44 -0700 Subject: [PATCH 10/33] make compatible with older Python versions --- adaptive/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/adaptive/runner.py b/adaptive/runner.py index 2443cd2fc..2563a62aa 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -876,7 +876,7 @@ def __call__(self, _): elif isinstance(self.dt, datetime): return datetime.now() > self.dt else: - raise TypeError(f"{self.dt=} is not a datetime or timedelta.") + raise TypeError(f"`dt={self.dt}` is not a datetime or timedelta.") def auto_goal( From 7900b5abcaaeffe5040c47d35cc02c23429c1c38 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Fri, 14 Oct 2022 16:48:29 -0700 Subject: [PATCH 11/33] Import fixes --- adaptive/__init__.py | 3 ++- adaptive/runner.py | 8 ++------ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/adaptive/__init__.py b/adaptive/__init__.py index 98a42181a..c28e43fcb 100644 --- a/adaptive/__init__.py +++ b/adaptive/__init__.py @@ -1,6 +1,5 @@ from contextlib import suppress -from adaptive import learner, runner, utils from adaptive._version import __version__ from adaptive.learner import ( AverageLearner, @@ -22,6 +21,8 @@ ) from adaptive.runner import AsyncRunner, BlockingRunner, Runner +from adaptive import learner, runner, utils # isort:skip + __all__ = [ "learner", "runner", diff --git a/adaptive/runner.py b/adaptive/runner.py index 2563a62aa..ed7add6b0 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -14,15 +14,13 @@ import warnings from contextlib import suppress from datetime import datetime, timedelta -from typing import TYPE_CHECKING, Any, Callable +from typing import Any, Callable import loky +from adaptive import BalancingLearner, BaseLearner, IntegratorLearner, SequenceLearner from adaptive.notebook_integration import in_ipynb, live_info, live_plot -if TYPE_CHECKING: - from adaptive import BaseLearner - try: import ipyparallel @@ -918,8 +916,6 @@ def auto_goal( ------- Callable[[adaptive.BaseLearner], bool] """ - from adaptive import BalancingLearner, IntegratorLearner, SequenceLearner - if callable(goal): return goal if isinstance(goal, float): From 05be9488b3c81cbc1dd4e1c7dba8718442a75862 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Sun, 13 Nov 2022 16:49:59 -0800 Subject: [PATCH 12/33] Use loss_goal and npoints_goal --- README.md | 2 +- adaptive/runner.py | 75 ++++++++++++++----- adaptive/tests/test_average_learner.py | 2 +- adaptive/tests/test_learner1d.py | 10 +-- adaptive/tests/test_learnernd.py | 6 +- adaptive/tests/test_learners.py | 2 +- adaptive/tests/test_pickling.py | 4 +- adaptive/tests/test_runner.py | 30 ++++---- adaptive/tests/test_sequence_learner.py | 4 +- .../tests/unit/test_learnernd_integration.py | 8 +- docs/logo.py | 2 +- docs/source/algorithms_and_examples.md | 8 +- docs/source/logo.md | 2 +- .../tutorial/tutorial.AverageLearner.md | 2 +- .../tutorial/tutorial.BalancingLearner.md | 4 +- docs/source/tutorial/tutorial.DataSaver.md | 2 +- .../tutorial/tutorial.IntegratorLearner.md | 4 +- docs/source/tutorial/tutorial.Learner1D.md | 13 ++-- docs/source/tutorial/tutorial.Learner2D.md | 2 +- docs/source/tutorial/tutorial.LearnerND.md | 4 +- docs/source/tutorial/tutorial.SKOptLearner.md | 2 +- .../tutorial/tutorial.SequenceLearner.md | 4 +- .../tutorial/tutorial.advanced-topics.md | 18 ++--- docs/source/tutorial/tutorial.custom_loss.md | 6 +- docs/source/tutorial/tutorial.parallelism.md | 10 +-- example-notebook.ipynb | 46 ++++++------ 26 files changed, 153 insertions(+), 119 deletions(-) diff --git a/README.md b/README.md index 64a6a7e84..e414e1e1c 100644 --- a/README.md +++ b/README.md @@ -75,7 +75,7 @@ def peak(x, a=0.01): learner = Learner1D(peak, bounds=(-1, 1)) -runner = Runner(learner, goal=0.01) +runner = Runner(learner, loss_goal=0.01) runner.live_info() runner.live_plot() ``` diff --git a/adaptive/runner.py b/adaptive/runner.py index ed7add6b0..180ccd6f5 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -14,13 +14,19 @@ import warnings from contextlib import suppress from datetime import datetime, timedelta -from typing import Any, Callable +from typing import Any, Callable, Union import loky from adaptive import BalancingLearner, BaseLearner, IntegratorLearner, SequenceLearner from adaptive.notebook_integration import in_ipynb, live_info, live_plot +try: + from typing import TypeAlias +except ModuleNotFoundError: + # Python <3.10 + from typing_extensions import TypeAlias + try: import ipyparallel @@ -60,6 +66,10 @@ # and https://github.com/python-adaptive/adaptive/issues/301 _default_executor = loky.get_reusable_executor +GoalTypes: TypeAlias = Union[ + Callable[[BaseLearner], bool], int, float, datetime, timedelta, None +] + class BaseRunner(metaclass=abc.ABCMeta): r"""Base class for runners that use `concurrent.futures.Executors`. @@ -120,8 +130,10 @@ class BaseRunner(metaclass=abc.ABCMeta): def __init__( self, learner, - goal, *, + goal: GoalTypes = None, + loss_goal: float | None = None, + npoints_goal: int | None = None, executor=None, ntasks=None, log=False, @@ -132,7 +144,7 @@ def __init__( ): self.executor = _ensure_executor(executor) - self.goal = auto_goal(goal, learner, allow_running_forever) + self.goal = _goal(learner, goal, loss_goal, npoints_goal, allow_running_forever) self._max_tasks = ntasks @@ -376,8 +388,10 @@ class BlockingRunner(BaseRunner): def __init__( self, learner, - goal, *, + goal: GoalTypes = None, + loss_goal: float | None = None, + npoints_goal: int | None = None, executor=None, ntasks=None, log=False, @@ -389,7 +403,9 @@ def __init__( raise ValueError("Coroutine functions can only be used with 'AsyncRunner'.") super().__init__( learner, - goal, + goal=goal, + loss_goal=loss_goal, + npoints_goal=npoints_goal, executor=executor, ntasks=ntasks, log=log, @@ -508,8 +524,10 @@ class AsyncRunner(BaseRunner): def __init__( self, learner, - goal=None, *, + goal: GoalTypes = None, + loss_goal: float | None = None, + npoints_goal: int | None = None, executor=None, ntasks=None, log=False, @@ -537,7 +555,9 @@ def __init__( super().__init__( learner, - goal, + goal=goal, + loss_goal=loss_goal, + npoints_goal=npoints_goal, executor=executor, ntasks=ntasks, log=log, @@ -717,7 +737,13 @@ async def _saver(): Runner = AsyncRunner -def simple(learner, goal): +def simple( + learner, + *, + goal: GoalTypes = None, + loss_goal: float | None = None, + npoints_goal: int | None = None, +): """Run the learner until the goal is reached. Requests a single point from the learner, evaluates @@ -736,7 +762,7 @@ def simple(learner, goal): The end condition for the calculation. This function must take the learner as its sole argument, and return True if we should stop. """ - goal = auto_goal(goal, learner) + goal = _goal(learner, goal, loss_goal, npoints_goal, allow_running_forever=False) while not goal(learner): xs, _ = learner.ask(1) for x in xs: @@ -871,14 +897,13 @@ def __call__(self, _): if self.start_time is None: self.start_time = datetime.now() return datetime.now() - self.start_time > self.dt - elif isinstance(self.dt, datetime): + if isinstance(self.dt, datetime): return datetime.now() > self.dt - else: - raise TypeError(f"`dt={self.dt}` is not a datetime or timedelta.") + raise TypeError(f"`dt={self.dt}` is not a datetime or timedelta.") def auto_goal( - goal: Callable[[BaseLearner], bool] | int | float | datetime | timedelta | None, + goal: GoalTypes, learner: BaseLearner, allow_running_forever: bool = True, ): @@ -935,12 +960,28 @@ def auto_goal( return SequenceLearner.done if isinstance(learner, IntegratorLearner): return IntegratorLearner.done - warnings.warn("Goal is None which means the learners continue forever!") - if allow_running_forever: - return lambda _: False - else: + if not allow_running_forever: raise ValueError( "Goal is None which means the learners" " continue forever and this is not allowed." ) + warnings.warn("Goal is None which means the learners continue forever!") + return lambda _: False raise ValueError("Cannot determine goal from {goal}.") + + +def _goal( + learner: BaseLearner, + goal: GoalTypes, + loss_goal: float | None, + npoints_goal: int | None, + allow_running_forever: bool, +): + # goal, loss_goal, npoints_goal are mutually exclusive, only one can be not None + if goal is not None and (loss_goal is not None or npoints_goal is not None): + raise ValueError("Either goal, loss_goal, or npoints_goal can be specified.") + if loss_goal is not None: + goal = float(loss_goal) + if npoints_goal is not None: + goal = int(npoints_goal) + return auto_goal(goal, learner, allow_running_forever) diff --git a/adaptive/tests/test_average_learner.py b/adaptive/tests/test_average_learner.py index 519188dc4..d0176858e 100644 --- a/adaptive/tests/test_average_learner.py +++ b/adaptive/tests/test_average_learner.py @@ -61,7 +61,7 @@ def constant_function(seed): learner = AverageLearner( constant_function, atol=0.01, rtol=0.01, min_npoints=min_npoints ) - simple(learner, 1.0) + simple(learner, loss_goal=1.0) assert learner.npoints >= max(2, min_npoints) diff --git a/adaptive/tests/test_learner1d.py b/adaptive/tests/test_learner1d.py index f006abf53..f3c555a04 100644 --- a/adaptive/tests/test_learner1d.py +++ b/adaptive/tests/test_learner1d.py @@ -298,7 +298,7 @@ def test_equal(l1, l2): for function in [f, f_vec]: learner = Learner1D(function, bounds=(-1, 1)) learner2 = Learner1D(function, bounds=(-1, 1)) - simple(learner, goal=200) + simple(learner, npoints_goal=200) xs, ys = zip(*learner.data.items()) # Make the scale huge to no get a scale doubling @@ -374,7 +374,7 @@ def f(x): loss = curvature_loss_function() assert loss.nth_neighbors == 1 learner = Learner1D(f, (-1, 1), loss_per_interval=loss) - simple(learner, goal=100) + simple(learner, npoints_goal=100) assert learner.npoints >= 100 @@ -385,7 +385,7 @@ def f(x): loss = curvature_loss_function() assert loss.nth_neighbors == 1 learner = Learner1D(f, (-1, 1), loss_per_interval=loss) - simple(learner, goal=100) + simple(learner, npoints_goal=100) assert learner.npoints >= 100 @@ -398,7 +398,7 @@ def f(x): return x + a**2 / (a**2 + x**2) learner = Learner1D(f, bounds=(-1, 1)) - simple(learner, 100) + simple(learner, npoints_goal=100) def test_inf_loss_with_missing_bounds(): @@ -408,6 +408,6 @@ def test_inf_loss_with_missing_bounds(): loss_per_interval=curvature_loss_function(), ) # must be done in parallel because otherwise the bounds will be evaluated first - BlockingRunner(learner, goal=0.01) + BlockingRunner(learner, loss_goal=0.01) learner.npoints > 20 diff --git a/adaptive/tests/test_learnernd.py b/adaptive/tests/test_learnernd.py index 2c41fabee..0884b7eeb 100644 --- a/adaptive/tests/test_learnernd.py +++ b/adaptive/tests/test_learnernd.py @@ -33,8 +33,8 @@ def test_interior_vs_bbox_gives_same_result(): hull = scipy.spatial.ConvexHull(control._bounds_points) learner = LearnerND(f, bounds=hull) - simple(control, goal=0.1) - simple(learner, goal=0.1) + simple(control, loss_goal=0.1) + simple(learner, loss_goal=0.1) assert learner.data == control.data @@ -47,4 +47,4 @@ def test_vector_return_with_a_flat_layer(): h3 = lambda xy: np.array([0 * f(xy), g(xy)]) # noqa: E731 for function in [h1, h2, h3]: learner = LearnerND(function, bounds=[(-1, 1), (-1, 1)]) - simple(learner, goal=0.1) + simple(learner, loss_goal=0.1) diff --git a/adaptive/tests/test_learners.py b/adaptive/tests/test_learners.py index d393511fb..e800b6d39 100644 --- a/adaptive/tests/test_learners.py +++ b/adaptive/tests/test_learners.py @@ -103,7 +103,7 @@ def goal(): return get_goal(learner.learner) return get_goal(learner) - simple(learner, goal()) + simple(learner, goal=goal()) # Library of functions and associated learners. diff --git a/adaptive/tests/test_pickling.py b/adaptive/tests/test_pickling.py index c0d515320..baf5b1146 100644 --- a/adaptive/tests/test_pickling.py +++ b/adaptive/tests/test_pickling.py @@ -94,7 +94,7 @@ def test_serialization_for(learner_type, learner_kwargs, serializer, f): learner = learner_type(f, **learner_kwargs) - simple(learner, goal_1) + simple(learner, goal=goal_1) learner_bytes = serializer.dumps(learner) loss = learner.loss() asked = learner.ask(10) @@ -113,5 +113,5 @@ def test_serialization_for(learner_type, learner_kwargs, serializer, f): # load again to undo the ask learner_loaded = serializer.loads(learner_bytes) - simple(learner_loaded, goal_2) + simple(learner_loaded, goal=goal_2) assert learner_loaded.npoints == 20 diff --git a/adaptive/tests/test_runner.py b/adaptive/tests/test_runner.py index 295f9601e..c6ecdf91e 100644 --- a/adaptive/tests/test_runner.py +++ b/adaptive/tests/test_runner.py @@ -19,22 +19,18 @@ OPERATING_SYSTEM = platform.system() -def blocking_runner(learner, goal): - BlockingRunner(learner, goal, executor=SequentialExecutor()) +def blocking_runner(learner, **kw): + BlockingRunner(learner, executor=SequentialExecutor(), **kw) -def async_runner(learner, goal): - runner = AsyncRunner(learner, goal, executor=SequentialExecutor()) +def async_runner(learner, **kw): + runner = AsyncRunner(learner, executor=SequentialExecutor(), **kw) asyncio.get_event_loop().run_until_complete(runner.task) runners = [simple, blocking_runner, async_runner] -def trivial_goal(learner): - return learner.npoints > 10 - - @pytest.mark.parametrize("runner", runners) def test_simple(runner): """Test that the runners actually run.""" @@ -43,7 +39,7 @@ def f(x): return x learner = Learner1D(f, (-1, 1)) - runner(learner, 10) + runner(learner, npoints_goal=10) assert len(learner.data) >= 10 @@ -57,7 +53,7 @@ def test_nonconforming_output(runner): def f(x): return [0] - runner(Learner2D(f, ((-1, 1), (-1, 1))), trivial_goal) + runner(Learner2D(f, ((-1, 1), (-1, 1))), npoints_goal=10) def test_aync_def_function(): @@ -65,7 +61,7 @@ async def f(x): return x learner = Learner1D(f, (-1, 1)) - runner = AsyncRunner(learner, trivial_goal) + runner = AsyncRunner(learner, npoints_goal=10) asyncio.get_event_loop().run_until_complete(runner.task) @@ -88,7 +84,7 @@ def test_concurrent_futures_executor(): BlockingRunner( Learner1D(linear, (-1, 1)), - trivial_goal, + npoints_goal=10, executor=ProcessPoolExecutor(max_workers=1), ) @@ -96,7 +92,7 @@ def test_concurrent_futures_executor(): def test_stop_after_goal(): seconds_to_wait = 0.2 # don't make this too large or the test will take ages start_time = time.time() - BlockingRunner(Learner1D(linear, (-1, 1)), stop_after(seconds=seconds_to_wait)) + BlockingRunner(Learner1D(linear, (-1, 1)), goal=stop_after(seconds=seconds_to_wait)) stop_time = time.time() assert stop_time - start_time > seconds_to_wait @@ -119,7 +115,7 @@ def test_ipyparallel_executor(): child.expect("Engines appear to have started successfully", timeout=35) ipyparallel_executor = Client() learner = Learner1D(linear, (-1, 1)) - BlockingRunner(learner, trivial_goal, executor=ipyparallel_executor) + BlockingRunner(learner, npoints_goal=10, executor=ipyparallel_executor) assert learner.npoints > 0 @@ -137,7 +133,7 @@ def test_distributed_executor(): learner = Learner1D(linear, (-1, 1)) client = Client(n_workers=1) - BlockingRunner(learner, trivial_goal, executor=client) + BlockingRunner(learner, npoints_goal=10, executor=client) client.shutdown() assert learner.npoints > 0 @@ -145,12 +141,12 @@ def test_distributed_executor(): def test_loky_executor(loky_executor): learner = Learner1D(lambda x: x, (-1, 1)) BlockingRunner( - learner, trivial_goal, executor=loky_executor, shutdown_executor=True + learner, npoints_goal=10, executor=loky_executor, shutdown_executor=True ) assert learner.npoints > 0 def test_default_executor(): learner = Learner1D(linear, (-1, 1)) - runner = AsyncRunner(learner, goal=10) + runner = AsyncRunner(learner, npoints_goal=10) asyncio.get_event_loop().run_until_complete(runner.task) diff --git a/adaptive/tests/test_sequence_learner.py b/adaptive/tests/test_sequence_learner.py index 68ca956ca..fdd3dcb10 100644 --- a/adaptive/tests/test_sequence_learner.py +++ b/adaptive/tests/test_sequence_learner.py @@ -19,8 +19,6 @@ def test_fail_with_sequence_of_unhashable(): # https://github.com/python-adaptive/adaptive/issues/265 seq = [{1: 1}] # unhashable learner = SequenceLearner(FailOnce(), sequence=seq) - runner = Runner( - learner, goal=SequenceLearner.done, retries=1, executor=SequentialExecutor() - ) + runner = Runner(learner, retries=1, executor=SequentialExecutor()) asyncio.get_event_loop().run_until_complete(runner.task) assert runner.status() == "finished" diff --git a/adaptive/tests/unit/test_learnernd_integration.py b/adaptive/tests/unit/test_learnernd_integration.py index 3cbd132b0..939108377 100644 --- a/adaptive/tests/unit/test_learnernd_integration.py +++ b/adaptive/tests/unit/test_learnernd_integration.py @@ -16,21 +16,21 @@ def ring_of_fire(xy, d=0.75): def test_learnerND_runs_to_10_points(): learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)]) - SimpleRunner(learner, goal=10) + SimpleRunner(learner, npoints_goal=10) assert learner.npoints == 10 @pytest.mark.parametrize("execution_number", range(5)) def test_learnerND_runs_to_10_points_Blocking(execution_number): learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)]) - BlockingRunner(learner, goal=10) + BlockingRunner(learner, npoints_goal=10) assert learner.npoints >= 10 def test_learnerND_curvature_runs_to_10_points(): loss = curvature_loss_function() learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)], loss_per_simplex=loss) - SimpleRunner(learner, goal=10) + SimpleRunner(learner, npoints_goal=10) assert learner.npoints == 10 @@ -38,7 +38,7 @@ def test_learnerND_curvature_runs_to_10_points(): def test_learnerND_curvature_runs_to_10_points_Blocking(execution_number): loss = curvature_loss_function() learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)], loss_per_simplex=loss) - BlockingRunner(learner, goal=10) + BlockingRunner(learner, npoints_goal=10) assert learner.npoints >= 10 diff --git a/docs/logo.py b/docs/logo.py index 3b0a64031..595728db6 100644 --- a/docs/logo.py +++ b/docs/logo.py @@ -22,7 +22,7 @@ def ring(xy): return x + np.exp(-((x**2 + y**2 - 0.75**2) ** 2) / a**4) learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)]) - adaptive.runner.simple(learner, goal=0.01) + adaptive.runner.simple(learner, loss_goal=0.01) return learner diff --git a/docs/source/algorithms_and_examples.md b/docs/source/algorithms_and_examples.md index da674fe3c..48e4cb61e 100644 --- a/docs/source/algorithms_and_examples.md +++ b/docs/source/algorithms_and_examples.md @@ -102,7 +102,7 @@ def plot_loss_interval(learner): def plot(learner, npoints): - adaptive.runner.simple(learner, lambda l: l.npoints == npoints) + adaptive.runner.simple(learner, npoints_goal= npoints) return (learner.plot() * plot_loss_interval(learner))[:, -1.1:1.1] @@ -132,7 +132,7 @@ def ring(xy): def plot(learner, npoints): - adaptive.runner.simple(learner, lambda l: l.npoints == npoints) + adaptive.runner.simple(learner, npoints_goal=npoints) learner2 = adaptive.Learner2D(ring, bounds=learner.bounds) xs = ys = np.linspace(*learner.bounds[0], int(learner.npoints**0.5)) xys = list(itertools.product(xs, ys)) @@ -168,7 +168,7 @@ learner = adaptive.AverageLearner(g, atol=None, rtol=0.01) def plot(learner, npoints): - adaptive.runner.simple(learner, lambda l: l.npoints == npoints) + adaptive.runner.simple(learner, npoints_goal=npoints) return learner.plot().relabel(f"loss={learner.loss():.2f}") @@ -191,7 +191,7 @@ def sphere(xyz): learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)]) -adaptive.runner.simple(learner, lambda l: l.npoints == 5000) +adaptive.runner.simple(learner, npoints_goal=5000) fig = learner.plot_3D(return_fig=True) diff --git a/docs/source/logo.md b/docs/source/logo.md index 2f46d3920..c0baf5ddd 100644 --- a/docs/source/logo.md +++ b/docs/source/logo.md @@ -110,7 +110,7 @@ def create_and_run_learner(): return x + np.exp(-((x**2 + y**2 - 0.75**2) ** 2) / a**4) learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)]) - adaptive.runner.simple(learner, goal=0.005) + adaptive.runner.simple(learner, loss_goal=0.005) return learner diff --git a/docs/source/tutorial/tutorial.AverageLearner.md b/docs/source/tutorial/tutorial.AverageLearner.md index ad66398fb..2f53d396b 100644 --- a/docs/source/tutorial/tutorial.AverageLearner.md +++ b/docs/source/tutorial/tutorial.AverageLearner.md @@ -46,7 +46,7 @@ def g(n): ```{code-cell} ipython3 learner = adaptive.AverageLearner(g, atol=None, rtol=0.01) # `loss < 1.0` means that we reached the `rtol` or `atol` -runner = adaptive.Runner(learner, goal=1.0) +runner = adaptive.Runner(learner, loss_goal=1.0) ``` ```{code-cell} ipython3 diff --git a/docs/source/tutorial/tutorial.BalancingLearner.md b/docs/source/tutorial/tutorial.BalancingLearner.md index c9f17ca24..5f43bdd64 100644 --- a/docs/source/tutorial/tutorial.BalancingLearner.md +++ b/docs/source/tutorial/tutorial.BalancingLearner.md @@ -46,7 +46,7 @@ learners = [ ] bal_learner = adaptive.BalancingLearner(learners) -runner = adaptive.Runner(bal_learner, goal=0.01) +runner = adaptive.Runner(bal_learner, loss_goal=0.01) ``` ```{code-cell} ipython3 @@ -86,7 +86,7 @@ learner = adaptive.BalancingLearner.from_product( jacobi, adaptive.Learner1D, dict(bounds=(0, 1)), combos ) -runner = adaptive.BlockingRunner(learner, goal=0.01) +runner = adaptive.BlockingRunner(learner, loss_goal=0.01) # The `cdims` will automatically be set when using `from_product`, so # `plot()` will return a HoloMap with correctly labeled sliders. diff --git a/docs/source/tutorial/tutorial.DataSaver.md b/docs/source/tutorial/tutorial.DataSaver.md index 13a6666ce..4d4e0efc4 100644 --- a/docs/source/tutorial/tutorial.DataSaver.md +++ b/docs/source/tutorial/tutorial.DataSaver.md @@ -55,7 +55,7 @@ learner = adaptive.DataSaver(_learner, arg_picker=itemgetter("y")) `learner.learner` is the original learner, so `learner.learner.loss()` will call the correct loss method. ```{code-cell} ipython3 -runner = adaptive.Runner(learner, goal=lambda l: l.learner.loss() < 0.1) +runner = adaptive.Runner(learner, loss_goal=0.1) ``` ```{code-cell} ipython3 diff --git a/docs/source/tutorial/tutorial.IntegratorLearner.md b/docs/source/tutorial/tutorial.IntegratorLearner.md index 0686344c9..8110512a9 100644 --- a/docs/source/tutorial/tutorial.IntegratorLearner.md +++ b/docs/source/tutorial/tutorial.IntegratorLearner.md @@ -61,7 +61,7 @@ learner = adaptive.IntegratorLearner(f24, bounds=(0, 3), tol=1e-8) # *this* process only. This means we don't pay # the overhead of evaluating the function in another process. runner = adaptive.Runner( - learner, executor=SequentialExecutor(), goal=lambda l: l.done() + learner, executor=SequentialExecutor() ) ``` @@ -75,7 +75,7 @@ await runner.task # This is not needed in a notebook environment! runner.live_info() ``` -Now we could do the live plotting again, but lets just wait untill the +Now we could do the live plotting again, but let's just wait until the runner is done. ```{code-cell} ipython3 diff --git a/docs/source/tutorial/tutorial.Learner1D.md b/docs/source/tutorial/tutorial.Learner1D.md index c524ca439..5efb15e38 100644 --- a/docs/source/tutorial/tutorial.Learner1D.md +++ b/docs/source/tutorial/tutorial.Learner1D.md @@ -65,7 +65,7 @@ A {class}`~concurrent.futures.ProcessPoolExecutor` cannot be used on Windows for ```{code-cell} ipython3 # The end condition is when the "loss" is less than 0.01. In the context of the # 1D learner this means that we will resolve features in 'func' with width 0.01 or wider. -runner = adaptive.Runner(learner, goal=0.01) +runner = adaptive.Runner(learner, loss_goal=0.01) ``` ```{code-cell} ipython3 @@ -124,7 +124,7 @@ The `Learner1D` can be used for such functions: ```{code-cell} ipython3 learner = adaptive.Learner1D(f_levels, bounds=(-1, 1)) -runner = adaptive.Runner(learner, goal=0.01) +runner = adaptive.Runner(learner, loss_goal=0.01) ``` ```{code-cell} ipython3 @@ -156,7 +156,7 @@ from adaptive.learner.learner1D import ( curvature_loss = curvature_loss_function() learner = adaptive.Learner1D(f, bounds=(-1, 1), loss_per_interval=curvature_loss) -runner = adaptive.Runner(learner, goal=0.01) +runner = adaptive.Runner(learner, loss_goal=0.01) ``` ```{code-cell} ipython3 @@ -187,11 +187,10 @@ learner_h = adaptive.Learner1D(sin_exp, (-1, 1), loss_per_interval=uniform_loss) learner_1 = adaptive.Learner1D(sin_exp, (-1, 1), loss_per_interval=default_loss) learner_2 = adaptive.Learner1D(sin_exp, (-1, 1), loss_per_interval=curvature_loss) -npoints_goal = 100 # adaptive.runner.simple is a non parallel blocking runner. -adaptive.runner.simple(learner_h, goal=npoints_goal) -adaptive.runner.simple(learner_1, goal=npoints_goal) -adaptive.runner.simple(learner_2, goal=npoints_goal) +adaptive.runner.simple(learner_h, npoints_goal=100) +adaptive.runner.simple(learner_1, npoints_goal=100) +adaptive.runner.simple(learner_2, npoints_goal=100) ( learner_h.plot().relabel("homogeneous") diff --git a/docs/source/tutorial/tutorial.Learner2D.md b/docs/source/tutorial/tutorial.Learner2D.md index 05107de85..d15446fe4 100644 --- a/docs/source/tutorial/tutorial.Learner2D.md +++ b/docs/source/tutorial/tutorial.Learner2D.md @@ -46,7 +46,7 @@ learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)]) ``` ```{code-cell} ipython3 -runner = adaptive.Runner(learner, goal=0.01) +runner = adaptive.Runner(learner, loss_goal=0.01) ``` ```{code-cell} ipython3 diff --git a/docs/source/tutorial/tutorial.LearnerND.md b/docs/source/tutorial/tutorial.LearnerND.md index cb72f5a23..e525fd9b8 100644 --- a/docs/source/tutorial/tutorial.LearnerND.md +++ b/docs/source/tutorial/tutorial.LearnerND.md @@ -50,7 +50,7 @@ def sphere(xyz): learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)]) -runner = adaptive.Runner(learner, goal=1e-3) +runner = adaptive.Runner(learner, loss_goal=1e-3) ``` ```{code-cell} ipython3 @@ -123,7 +123,7 @@ b = [(-1, -1, -1), (-1, 1, -1), (-1, -1, 1), (-1, 1, 1), (1, 1, -1), (1, -1, -1) hull = scipy.spatial.ConvexHull(b) learner = adaptive.LearnerND(f, hull) -adaptive.BlockingRunner(learner, goal=2000) +adaptive.BlockingRunner(learner, npoints_goal=2000) learner.plot_isosurface(-0.5) ``` diff --git a/docs/source/tutorial/tutorial.SKOptLearner.md b/docs/source/tutorial/tutorial.SKOptLearner.md index 742af5cd5..49a5340a8 100644 --- a/docs/source/tutorial/tutorial.SKOptLearner.md +++ b/docs/source/tutorial/tutorial.SKOptLearner.md @@ -47,7 +47,7 @@ learner = adaptive.SKOptLearner( acq_func="gp_hedge", acq_optimizer="lbfgs", ) -runner = adaptive.Runner(learner, ntasks=1, goal=40) +runner = adaptive.Runner(learner, ntasks=1, npoints_goal=40) ``` ```{code-cell} ipython3 diff --git a/docs/source/tutorial/tutorial.SequenceLearner.md b/docs/source/tutorial/tutorial.SequenceLearner.md index 0d6bb71cc..5a60fbcdd 100644 --- a/docs/source/tutorial/tutorial.SequenceLearner.md +++ b/docs/source/tutorial/tutorial.SequenceLearner.md @@ -42,8 +42,8 @@ def f(x): seq = np.linspace(-15, 15, 1000) learner = SequenceLearner(f, seq) -runner = adaptive.Runner(learner, SequenceLearner.done) -# that goal is same as `lambda learner: learner.done()` +runner = adaptive.Runner(learner) +# not providing a goal is same as `lambda learner: learner.done()` ``` ```{code-cell} ipython3 diff --git a/docs/source/tutorial/tutorial.advanced-topics.md b/docs/source/tutorial/tutorial.advanced-topics.md index cb6a2c828..e6b6c55a3 100644 --- a/docs/source/tutorial/tutorial.advanced-topics.md +++ b/docs/source/tutorial/tutorial.advanced-topics.md @@ -51,7 +51,7 @@ learner = adaptive.Learner1D(f, bounds=(-1, 1)) control = adaptive.Learner1D(f, bounds=(-1, 1)) # Let's only run the learner -runner = adaptive.Runner(learner, goal=0.01) +runner = adaptive.Runner(learner, loss_goal=0.01) ``` ```{code-cell} ipython3 @@ -90,7 +90,7 @@ def slow_f(x): learner = adaptive.Learner1D(slow_f, bounds=[0, 1]) -runner = adaptive.Runner(learner, goal=100) +runner = adaptive.Runner(learner, npoints_goal=100) runner.start_periodic_saving( save_kwargs=dict(fname="data/periodic_example.p"), interval=6 ) @@ -134,7 +134,7 @@ The simplest way to accomplish this is to use {class}`adaptive.BlockingRunner`: ```{code-cell} ipython3 learner = adaptive.Learner1D(f, bounds=(-1, 1)) -adaptive.BlockingRunner(learner, goal=0.01) +adaptive.BlockingRunner(learner, loss_goal=0.01) # This will only get run after the runner has finished learner.plot() ``` @@ -155,7 +155,7 @@ The simplest way is to use {class}`adaptive.runner.simple` to run your learner: learner = adaptive.Learner1D(f, bounds=(-1, 1)) # blocks until completion -adaptive.runner.simple(learner, goal=0.01) +adaptive.runner.simple(learner, loss_goal=0.01) learner.plot() ``` @@ -169,7 +169,7 @@ from adaptive.runner import SequentialExecutor learner = adaptive.Learner1D(f, bounds=(-1, 1)) runner = adaptive.Runner( - learner, executor=SequentialExecutor(), goal=0.01 + learner, executor=SequentialExecutor(), loss_goal=0.01 ) ``` @@ -292,7 +292,7 @@ One way to inspect runners is to instantiate one with `log=True`: ```{code-cell} ipython3 learner = adaptive.Learner1D(f, bounds=(-1, 1)) -runner = adaptive.Runner(learner, goal=0.01, log=True) +runner = adaptive.Runner(learner, loss_goal=0.01, log=True) ``` ```{code-cell} ipython3 @@ -351,7 +351,7 @@ async def time(runner): ioloop = asyncio.get_event_loop() learner = adaptive.Learner1D(f, bounds=(-1, 1)) -runner = adaptive.Runner(learner, goal=0.01) +runner = adaptive.Runner(learner, loss_goal=0.01) timer = ioloop.create_task(time(runner)) ``` @@ -436,7 +436,7 @@ To run the adaptive evaluation we provide the asynchronous function to the `lear ```{code-cell} ipython3 learner = adaptive.Learner1D(f_parallel, bounds=(-3.5, 3.5)) -runner = adaptive.AsyncRunner(learner, goal=lambda l: l.loss() < 0.01, ntasks=20) +runner = adaptive.AsyncRunner(learner, loss_goal=0.01, ntasks=20) ``` Finally we await for the runner to finish, and then plot the result. @@ -462,7 +462,7 @@ def f(x): learner = adaptive.Learner1D(f, (-1, 1)) -adaptive.BlockingRunner(learner, goal=0.1) +adaptive.BlockingRunner(learner, loss_goal=0.1) ``` If you use `asyncio` already in your script and want to integrate `adaptive` into it, then you can use the default {class}`~adaptive.Runner` as you would from a notebook. diff --git a/docs/source/tutorial/tutorial.custom_loss.md b/docs/source/tutorial/tutorial.custom_loss.md index 73ff707b7..f76af484d 100644 --- a/docs/source/tutorial/tutorial.custom_loss.md +++ b/docs/source/tutorial/tutorial.custom_loss.md @@ -74,7 +74,7 @@ def f_divergent_1d(x): learner = adaptive.Learner1D( f_divergent_1d, (-1, 1), loss_per_interval=uniform_sampling_1d ) -runner = adaptive.BlockingRunner(learner, goal=0.01) +runner = adaptive.BlockingRunner(learner, loss_goal=0.01) learner.plot().select(y=(0, 10000)) ``` @@ -99,7 +99,7 @@ learner = adaptive.Learner2D( ) # this takes a while, so use the async Runner so we know *something* is happening -runner = adaptive.Runner(learner, goal= l.loss() < 0.03 or l.npoints > 1000) +runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.03 or l.npoints > 1000) ``` ```{code-cell} ipython3 @@ -154,7 +154,7 @@ def resolution_loss_function(min_distance=0, max_distance=1): loss = resolution_loss_function(min_distance=0.01) learner = adaptive.Learner2D(f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=loss) -runner = adaptive.BlockingRunner(learner, goal=0.02) +runner = adaptive.BlockingRunner(learner, loss_goal=0.02) learner.plot(tri_alpha=0.3).relabel("1 / (x^2 + y^2) in log scale").opts( hv.opts.EdgePaths(color="w"), hv.opts.Image(logz=True, colorbar=True) ) diff --git a/docs/source/tutorial/tutorial.parallelism.md b/docs/source/tutorial/tutorial.parallelism.md index bc71d909c..ef0963a3a 100644 --- a/docs/source/tutorial/tutorial.parallelism.md +++ b/docs/source/tutorial/tutorial.parallelism.md @@ -24,7 +24,7 @@ from concurrent.futures import ProcessPoolExecutor executor = ProcessPoolExecutor(max_workers=4) learner = adaptive.Learner1D(f, bounds=(-1, 1)) -runner = adaptive.Runner(learner, executor=executor, goal=0.05) +runner = adaptive.Runner(learner, executor=executor, loss_goal=0.05) runner.live_info() runner.live_plot(update_interval=0.1) ``` @@ -37,7 +37,7 @@ import ipyparallel client = ipyparallel.Client() # You will need to start an `ipcluster` to make this work learner = adaptive.Learner1D(f, bounds=(-1, 1)) -runner = adaptive.Runner(learner, executor=client, goal=0.01) +runner = adaptive.Runner(learner, executor=client, loss_goal=0.01) runner.live_info() runner.live_plot() ``` @@ -52,7 +52,7 @@ import distributed client = distributed.Client() learner = adaptive.Learner1D(f, bounds=(-1, 1)) -runner = adaptive.Runner(learner, executor=client, goal=0.01) +runner = adaptive.Runner(learner, executor=client, loss_goal=0.01) runner.live_info() runner.live_plot(update_interval=0.1) ``` @@ -80,7 +80,7 @@ if __name__ == "__main__": learner, executor=MPIPoolExecutor(), shutdown_executor=True, - goal=0.01, + loss_goal=0.01, ) # periodically save the data (in case the job dies) @@ -132,6 +132,6 @@ ex = get_reusable_executor() f = lambda x: x learner = adaptive.Learner1D(f, bounds=(-1, 1)) -runner = adaptive.Runner(learner, goal=0.01, executor=ex) +runner = adaptive.Runner(learner, loss_goal=0.01, executor=ex) runner.live_info() ``` diff --git a/example-notebook.ipynb b/example-notebook.ipynb index e910874cc..d3a739056 100644 --- a/example-notebook.ipynb +++ b/example-notebook.ipynb @@ -106,7 +106,7 @@ "source": [ "# The end condition is when the \"loss\" is less than 0.01. In the context of the\n", "# 1D learner this means that we will resolve features in 'func' with width 0.01 or wider.\n", - "runner = adaptive.Runner(learner, goal=0.01)\n", + "runner = adaptive.Runner(learner, loss_goal=0.01)\n", "runner.live_info()" ] }, @@ -194,7 +194,7 @@ "metadata": {}, "outputs": [], "source": [ - "runner = adaptive.Runner(learner, goal=0.01)\n", + "runner = adaptive.Runner(learner, loss_goal=0.01)\n", "runner.live_info()" ] }, @@ -279,7 +279,7 @@ "outputs": [], "source": [ "learner = adaptive.AverageLearner(g, atol=None, rtol=0.01)\n", - "runner = adaptive.Runner(learner, goal=2.0)\n", + "runner = adaptive.Runner(learner, loss_goal=2.0)\n", "runner.live_info()" ] }, @@ -535,7 +535,7 @@ "outputs": [], "source": [ "learner = adaptive.Learner1D(f_levels, bounds=(-1, 1))\n", - "runner = adaptive.Runner(learner, goal=0.01)\n", + "runner = adaptive.Runner(learner, loss_goal=0.01)\n", "runner.live_info()" ] }, @@ -571,7 +571,7 @@ "\n", "\n", "learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)])\n", - "runner = adaptive.Runner(learner, goal=2000)\n", + "runner = adaptive.Runner(learner, npoints_goal=2000)\n", "runner.live_info()" ] }, @@ -682,7 +682,7 @@ "learner = adaptive.Learner1D(\n", " f_divergent_1d, (-1, 1), loss_per_interval=uniform_sampling_1d\n", ")\n", - "runner = adaptive.BlockingRunner(learner, goal=0.01)\n", + "runner = adaptive.BlockingRunner(learner, loss_goal=0.01)\n", "learner.plot().select(y=(0, 10000))" ] }, @@ -719,7 +719,7 @@ ")\n", "\n", "# this takes a while, so use the async Runner so we know *something* is happening\n", - "runner = adaptive.Runner(learner, goal=0.02)\n", + "runner = adaptive.Runner(learner, loss_goal=0.02)\n", "runner.live_info()\n", "runner.live_plot(update_interval=0.2, plotter=plot_logz)" ] @@ -781,7 +781,7 @@ "loss = partial(resolution_loss, min_distance=0.01)\n", "\n", "learner = adaptive.Learner2D(f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=loss)\n", - "runner = adaptive.BlockingRunner(learner, goal=0.02)\n", + "runner = adaptive.BlockingRunner(learner, loss_goal=0.02)\n", "plot_logz(learner)" ] }, @@ -827,7 +827,7 @@ "]\n", "\n", "bal_learner = adaptive.BalancingLearner(learners)\n", - "runner = adaptive.Runner(bal_learner, goal=0.01)\n", + "runner = adaptive.Runner(bal_learner, loss_goal=0.01)\n", "runner.live_info()" ] }, @@ -871,7 +871,7 @@ " jacobi, adaptive.Learner1D, dict(bounds=(0, 1)), combos\n", ")\n", "\n", - "runner = adaptive.BlockingRunner(learner, goal=0.01)\n", + "runner = adaptive.BlockingRunner(learner, loss_goal=0.01)\n", "\n", "# The `cdims` will automatically be set when using `from_product`, so\n", "# `plot()` will return a HoloMap with correctly labeled sliders.\n", @@ -936,7 +936,7 @@ "metadata": {}, "outputs": [], "source": [ - "runner = adaptive.Runner(learner, goal=0.05)\n", + "runner = adaptive.Runner(learner, loss_goal=0.05)\n", "runner.live_info()" ] }, @@ -1006,7 +1006,7 @@ " acq_func=\"gp_hedge\",\n", " acq_optimizer=\"lbfgs\",\n", ")\n", - "runner = adaptive.Runner(learner, ntasks=1, goal=40)\n", + "runner = adaptive.Runner(learner, ntasks=1, npoints_goal=40)\n", "runner.live_info()" ] }, @@ -1064,7 +1064,7 @@ "executor = ProcessPoolExecutor(max_workers=4)\n", "\n", "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n", - "runner = adaptive.Runner(learner, executor=executor, goal=0.05)\n", + "runner = adaptive.Runner(learner, executor=executor, loss_goal=0.05)\n", "runner.live_info()\n", "runner.live_plot(update_interval=0.1)" ] @@ -1087,7 +1087,7 @@ "client = ipyparallel.Client() # You will need to start an `ipcluster` to make this work\n", "\n", "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n", - "runner = adaptive.Runner(learner, executor=client, goal=0.01)\n", + "runner = adaptive.Runner(learner, executor=client, loss_goal=0.01)\n", "runner.live_info()\n", "runner.live_plot()" ] @@ -1112,7 +1112,7 @@ "client = distributed.Client()\n", "\n", "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n", - "runner = adaptive.Runner(learner, executor=client, goal=0.01)\n", + "runner = adaptive.Runner(learner, executor=client, loss_goal=0.01)\n", "runner.live_info()\n", "runner.live_plot(update_interval=0.1)" ] @@ -1164,7 +1164,7 @@ "control = adaptive.Learner1D(partial(peak, wait=False), bounds=(-1, 1))\n", "\n", "# Let's only run the learner\n", - "runner = adaptive.Runner(learner, goal=0.01)\n", + "runner = adaptive.Runner(learner, loss_goal=0.01)\n", "runner.live_info()" ] }, @@ -1240,7 +1240,7 @@ "\n", "\n", "learner = adaptive.Learner1D(slow_f, bounds=[0, 1])\n", - "runner = adaptive.Runner(learner, goal=100)\n", + "runner = adaptive.Runner(learner, npoints_goal=100)\n", "\n", "runner.start_periodic_saving(\n", " save_kwargs=dict(fname=\"data/periodic_example.p\"), interval=6\n", @@ -1336,7 +1336,7 @@ "outputs": [], "source": [ "learner = adaptive.Learner1D(partial(peak, wait=False), bounds=(-1, 1))\n", - "adaptive.BlockingRunner(learner, goal=0.005)\n", + "adaptive.BlockingRunner(learner, loss_goal=0.005)\n", "# This will only get run after the runner has finished\n", "learner.plot()" ] @@ -1370,7 +1370,7 @@ "learner = adaptive.Learner1D(partial(peak, wait=False), bounds=(-1, 1))\n", "\n", "# blocks until completion\n", - "adaptive.runner.simple(learner, goal=0.002)\n", + "adaptive.runner.simple(learner, loss_goal=0.002)\n", "\n", "learner.plot()" ] @@ -1395,7 +1395,7 @@ "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n", "\n", "runner = adaptive.Runner(\n", - " learner, executor=SequentialExecutor(), goal=0.002\n", + " learner, executor=SequentialExecutor(), loss_goal=0.002\n", ")\n", "runner.live_info()\n", "runner.live_plot(update_interval=0.1)" @@ -1544,7 +1544,7 @@ "outputs": [], "source": [ "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n", - "runner = adaptive.Runner(learner, goal=0.1, log=True)\n", + "runner = adaptive.Runner(learner, loss_goal=0.1, log=True)\n", "runner.live_info()" ] }, @@ -1629,7 +1629,7 @@ "ioloop = asyncio.get_event_loop()\n", "\n", "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n", - "runner = adaptive.Runner(learner, goal=0.1)\n", + "runner = adaptive.Runner(learner, loss_goal=0.1)\n", "\n", "timer = ioloop.create_task(time(runner))\n", "runner.live_info()" @@ -1668,7 +1668,7 @@ "\n", "learner = adaptive.Learner1D(peak, (-1, 1))\n", "\n", - "adaptive.BlockingRunner(learner, goal=0.1)\n", + "adaptive.BlockingRunner(learner, loss_goal=0.1)\n", "```\n", "\n", "If you use `asyncio` already in your script and want to integrate `adaptive` into it, then you can use the default `Runner` as you would from a notebook. If you want to wait for the runner to finish, then you can simply\n", From efe4a449a1d7c03917755410b250d52bb9a644aa Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Sun, 13 Nov 2022 17:38:16 -0800 Subject: [PATCH 13/33] DataSaver fix --- adaptive/runner.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/adaptive/runner.py b/adaptive/runner.py index 180ccd6f5..b31c92a6d 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -18,7 +18,13 @@ import loky -from adaptive import BalancingLearner, BaseLearner, IntegratorLearner, SequenceLearner +from adaptive import ( + BalancingLearner, + BaseLearner, + DataSaver, + IntegratorLearner, + SequenceLearner, +) from adaptive.notebook_integration import in_ipynb, live_info, live_plot try: @@ -955,6 +961,8 @@ def auto_goal( return lambda learner: learner.npoints >= goal if isinstance(goal, (timedelta, datetime)): return _TimeGoal(goal) + if isinstance(learner, DataSaver): + return auto_goal(goal, learner.learner, allow_running_forever) if goal is None: if isinstance(learner, SequenceLearner): return SequenceLearner.done From dbd81c310de0a2cb945ff4e7c7a80a9d7689e78b Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Sun, 13 Nov 2022 17:42:02 -0800 Subject: [PATCH 14/33] Use ImportError instead of ModuleNotFoundError --- adaptive/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/adaptive/runner.py b/adaptive/runner.py index b31c92a6d..3adbb80f3 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -29,7 +29,7 @@ try: from typing import TypeAlias -except ModuleNotFoundError: +except ImportError: # Python <3.10 from typing_extensions import TypeAlias From 8fcaee9e12a7f612e00d7e837abd4f59b4604336 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Sun, 13 Nov 2022 17:42:35 -0800 Subject: [PATCH 15/33] Use flake8 from GitHub --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 24816a1fb..bb5218a81 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -21,7 +21,7 @@ repos: rev: 5.10.1 hooks: - id: isort - - repo: https://gitlab.com/pycqa/flake8 + - repo: https://github.com/pycqa/flake8 rev: 3.9.2 hooks: - id: flake8 From 873eb0f80ec93c9cb1b458ac4c7aa011301bda12 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Sun, 13 Nov 2022 17:54:11 -0800 Subject: [PATCH 16/33] Add doc-string entries for loss_goal and npoints_goal --- adaptive/runner.py | 49 ++++++++++++++++++---- docs/source/tutorial/tutorial.Learner1D.md | 2 +- 2 files changed, 42 insertions(+), 9 deletions(-) diff --git a/adaptive/runner.py b/adaptive/runner.py index 3adbb80f3..5ea0371d0 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -83,10 +83,18 @@ class BaseRunner(metaclass=abc.ABCMeta): Parameters ---------- learner : `~adaptive.BaseLearner` instance - goal : callable + goal : callable, optional The end condition for the calculation. This function must take the learner as its sole argument, and return True when we should - stop requesting more points. + stop requesting more points. (Advanced use) Instead of providing a + function, see `auto_goal` for other types that are accepted here. + loss_goal : float, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the loss is smaller than this value. + npoints_goal : int, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the number of points is larger or + equal than this value. executor : `concurrent.futures.Executor`, `distributed.Client`,\ `mpi4py.futures.MPIPoolExecutor`, `ipyparallel.Client` or\ `loky.get_reusable_executor`, optional @@ -340,7 +348,15 @@ class BlockingRunner(BaseRunner): goal : callable The end condition for the calculation. This function must take the learner as its sole argument, and return True when we should - stop requesting more points. + stop requesting more points. (Advanced use) Instead of providing a + function, see `auto_goal` for other types that are accepted here. + loss_goal : float + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the loss is smaller than this value. + npoints_goal : int + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the number of points is larger or + equal than this value. executor : `concurrent.futures.Executor`, `distributed.Client`,\ `mpi4py.futures.MPIPoolExecutor`, `ipyparallel.Client` or\ `loky.get_reusable_executor`, optional @@ -465,8 +481,17 @@ class AsyncRunner(BaseRunner): goal : callable, optional The end condition for the calculation. This function must take the learner as its sole argument, and return True when we should - stop requesting more points. If not provided, the runner will run - forever, or until ``self.task.cancel()`` is called. + stop requesting more points. (Advanced use) Instead of providing a + function, see `auto_goal` for other types that are accepted here. + If not provided, the runner will run forever (or stop when no more + points can be added), or until ``self.task.cancel()`` is called. + loss_goal : float, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the loss is smaller than this value. + npoints_goal : int, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the number of points is larger or + equal than this value. executor : `concurrent.futures.Executor`, `distributed.Client`,\ `mpi4py.futures.MPIPoolExecutor`, `ipyparallel.Client` or\ `loky.get_reusable_executor`, optional @@ -765,8 +790,16 @@ def simple( ---------- learner : ~`adaptive.BaseLearner` instance goal : callable - The end condition for the calculation. This function must take the - learner as its sole argument, and return True if we should stop. + The end condition for the calculation. This function must take + the learner as its sole argument, and return True when we should + stop requesting more points. + loss_goal : float, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the loss is smaller than this value. + npoints_goal : int, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the number of points is larger or + equal than this value. """ goal = _goal(learner, goal, loss_goal, npoints_goal, allow_running_forever=False) while not goal(learner): @@ -912,7 +945,7 @@ def auto_goal( goal: GoalTypes, learner: BaseLearner, allow_running_forever: bool = True, -): +) -> Callable[[BaseLearner], bool]: """Extract a goal from the learners. Parameters diff --git a/docs/source/tutorial/tutorial.Learner1D.md b/docs/source/tutorial/tutorial.Learner1D.md index 5efb15e38..5d25061cb 100644 --- a/docs/source/tutorial/tutorial.Learner1D.md +++ b/docs/source/tutorial/tutorial.Learner1D.md @@ -124,7 +124,7 @@ The `Learner1D` can be used for such functions: ```{code-cell} ipython3 learner = adaptive.Learner1D(f_levels, bounds=(-1, 1)) -runner = adaptive.Runner(learner, loss_goal=0.01) +runner = adaptive.Runner(learner, loss_goal=0.01) # continue until `learner.loss()<=1` ``` ```{code-cell} ipython3 From 470c58f5ab3903ce5ebeb09325bbbb95f7276d6d Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Sun, 13 Nov 2022 18:14:42 -0800 Subject: [PATCH 17/33] do not make goal keyword-only --- adaptive/runner.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/adaptive/runner.py b/adaptive/runner.py index 5ea0371d0..9d3a8e7f3 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -144,8 +144,8 @@ class BaseRunner(metaclass=abc.ABCMeta): def __init__( self, learner, - *, goal: GoalTypes = None, + *, loss_goal: float | None = None, npoints_goal: int | None = None, executor=None, @@ -410,8 +410,8 @@ class BlockingRunner(BaseRunner): def __init__( self, learner, - *, goal: GoalTypes = None, + *, loss_goal: float | None = None, npoints_goal: int | None = None, executor=None, @@ -555,8 +555,8 @@ class AsyncRunner(BaseRunner): def __init__( self, learner, - *, goal: GoalTypes = None, + *, loss_goal: float | None = None, npoints_goal: int | None = None, executor=None, @@ -770,8 +770,8 @@ async def _saver(): def simple( learner, - *, goal: GoalTypes = None, + *, loss_goal: float | None = None, npoints_goal: int | None = None, ): From 11e14c429e11f93831d6aaf1b213eccd7287dbd7 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Sun, 13 Nov 2022 18:40:12 -0800 Subject: [PATCH 18/33] add test_auto_goal --- adaptive/learner/data_saver.py | 14 +++++++++- adaptive/tests/test_runner.py | 48 +++++++++++++++++++++++++++++++++- adaptive/utils.py | 3 ++- 3 files changed, 62 insertions(+), 3 deletions(-) diff --git a/adaptive/learner/data_saver.py b/adaptive/learner/data_saver.py index 074718e9b..0c7dd4c47 100644 --- a/adaptive/learner/data_saver.py +++ b/adaptive/learner/data_saver.py @@ -20,7 +20,7 @@ def _to_key(x): return tuple(x.values) if x.values.size > 1 else x.item() -class DataSaver: +class DataSaver(BaseLearner): """Save extra data associated with the values that need to be learned. Parameters @@ -50,6 +50,18 @@ def new(self) -> DataSaver: """Return a new `DataSaver` with the same `arg_picker` and `learner`.""" return DataSaver(self.learner.new(), self.arg_picker) + @copy_docstring_from(BaseLearner.ask) + def ask(self, *args, **kwargs): + return self.learner.ask(*args, **kwargs) + + @copy_docstring_from(BaseLearner.loss) + def loss(self, *args, **kwargs): + return self.learner.loss(*args, **kwargs) + + @copy_docstring_from(BaseLearner.remove_unfinished) + def remove_unfinished(self, *args, **kwargs): + return self.learner.remove_unfinished(*args, **kwargs) + def __getattr__(self, attr: str) -> Any: return getattr(self.learner, attr) diff --git a/adaptive/tests/test_runner.py b/adaptive/tests/test_runner.py index c6ecdf91e..a27bf5e7b 100644 --- a/adaptive/tests/test_runner.py +++ b/adaptive/tests/test_runner.py @@ -3,13 +3,22 @@ import sys import time +import numpy as np import pytest -from adaptive.learner import Learner1D, Learner2D +from adaptive.learner import ( + BalancingLearner, + DataSaver, + IntegratorLearner, + Learner1D, + Learner2D, + SequenceLearner, +) from adaptive.runner import ( AsyncRunner, BlockingRunner, SequentialExecutor, + auto_goal, simple, stop_after, with_distributed, @@ -150,3 +159,40 @@ def test_default_executor(): learner = Learner1D(linear, (-1, 1)) runner = AsyncRunner(learner, npoints_goal=10) asyncio.get_event_loop().run_until_complete(runner.task) + + +def test_auto_goal(): + learner = Learner1D(linear, (-1, 1)) + simple(learner, auto_goal(4, learner)) + assert learner.npoints == 4 + + learner = Learner1D(linear, (-1, 1)) + simple(learner, auto_goal(0.5, learner)) + assert learner.loss() <= 0.5 + + learner = SequenceLearner(linear, np.linspace(-1, 1)) + simple(learner, auto_goal(None, learner)) + assert learner.done() + + learner = IntegratorLearner(linear, bounds=(0, 1), tol=0.1) + simple(learner, auto_goal(None, learner)) + assert learner.done() + + learner = Learner1D(linear, (-1, 1)) + learner = DataSaver(learner, lambda x: x) + simple(learner, auto_goal(4, learner)) + assert learner.npoints == 4 + + learner1 = Learner1D(linear, (-1, 1)) + learner2 = Learner1D(linear, (-2, 2)) + balancing_learner = BalancingLearner([learner1, learner2]) + simple(balancing_learner, auto_goal(4, balancing_learner)) + assert learner1.npoints == 4 and learner2.npoints == 4 + + learner1 = Learner1D(linear, bounds=(0, 1)) + learner1 = DataSaver(learner1, lambda x: x) + learner2 = Learner1D(linear, bounds=(0, 1)) + learner2 = DataSaver(learner2, lambda x: x) + balancing_learner = BalancingLearner([learner1, learner2]) + simple(balancing_learner, auto_goal(10, balancing_learner)) + assert learner1.npoints == 10 and learner2.npoints == 10 diff --git a/adaptive/utils.py b/adaptive/utils.py index ecacffc36..a87aeeb56 100644 --- a/adaptive/utils.py +++ b/adaptive/utils.py @@ -83,7 +83,8 @@ def load(fname: str, compress: bool = True) -> Any: def copy_docstring_from(other: Callable) -> Callable: def decorator(method): - return functools.wraps(other)(method) + method.__doc__ = other.__doc__ + return method return decorator From 2a77ca23aead7b75feb26288039052d685b92b36 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Mon, 14 Nov 2022 18:03:25 -0800 Subject: [PATCH 19/33] Implement losses explicitly --- adaptive/runner.py | 150 +++++++++++++++++------ adaptive/tests/test_balancing_learner.py | 14 +-- adaptive/tests/test_runner.py | 14 +-- 3 files changed, 127 insertions(+), 51 deletions(-) diff --git a/adaptive/runner.py b/adaptive/runner.py index 9d3a8e7f3..7a069fcaf 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -72,7 +72,7 @@ # and https://github.com/python-adaptive/adaptive/issues/301 _default_executor = loky.get_reusable_executor -GoalTypes: TypeAlias = Union[ +_GoalTypes: TypeAlias = Union[ Callable[[BaseLearner], bool], int, float, datetime, timedelta, None ] @@ -86,8 +86,7 @@ class BaseRunner(metaclass=abc.ABCMeta): goal : callable, optional The end condition for the calculation. This function must take the learner as its sole argument, and return True when we should - stop requesting more points. (Advanced use) Instead of providing a - function, see `auto_goal` for other types that are accepted here. + stop requesting more points. loss_goal : float, optional Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the loss is smaller than this value. @@ -95,6 +94,14 @@ class BaseRunner(metaclass=abc.ABCMeta): Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the number of points is larger or equal than this value. + datetime_goal : datetime, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the current time is larger or equal than this + value. + timedelta_goal : timedelta, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the current time is larger or equal than + ``start_time + timedelta_goal``. executor : `concurrent.futures.Executor`, `distributed.Client`,\ `mpi4py.futures.MPIPoolExecutor`, `ipyparallel.Client` or\ `loky.get_reusable_executor`, optional @@ -144,10 +151,12 @@ class BaseRunner(metaclass=abc.ABCMeta): def __init__( self, learner, - goal: GoalTypes = None, + goal: Callable[[BaseLearner], bool] | None = None, *, loss_goal: float | None = None, npoints_goal: int | None = None, + datetime_goal: datetime | None = None, + timedelta_goal: timedelta | None = None, executor=None, ntasks=None, log=False, @@ -158,7 +167,15 @@ def __init__( ): self.executor = _ensure_executor(executor) - self.goal = _goal(learner, goal, loss_goal, npoints_goal, allow_running_forever) + self.goal = _goal( + learner, + goal, + loss_goal, + npoints_goal, + datetime_goal, + timedelta_goal, + allow_running_forever, + ) self._max_tasks = ntasks @@ -348,8 +365,7 @@ class BlockingRunner(BaseRunner): goal : callable The end condition for the calculation. This function must take the learner as its sole argument, and return True when we should - stop requesting more points. (Advanced use) Instead of providing a - function, see `auto_goal` for other types that are accepted here. + stop requesting more points. loss_goal : float Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the loss is smaller than this value. @@ -410,10 +426,12 @@ class BlockingRunner(BaseRunner): def __init__( self, learner, - goal: GoalTypes = None, + goal: Callable[[BaseLearner], bool] | None = None, *, loss_goal: float | None = None, npoints_goal: int | None = None, + datetime_goal: datetime | None = None, + timedelta_goal: timedelta | None = None, executor=None, ntasks=None, log=False, @@ -481,8 +499,7 @@ class AsyncRunner(BaseRunner): goal : callable, optional The end condition for the calculation. This function must take the learner as its sole argument, and return True when we should - stop requesting more points. (Advanced use) Instead of providing a - function, see `auto_goal` for other types that are accepted here. + stop requesting more points. If not provided, the runner will run forever (or stop when no more points can be added), or until ``self.task.cancel()`` is called. loss_goal : float, optional @@ -492,6 +509,14 @@ class AsyncRunner(BaseRunner): Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the number of points is larger or equal than this value. + datetime_goal : datetime, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the current time is larger or equal than this + value. + timedelta_goal : timedelta, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the current time is larger or equal than + ``start_time + timedelta_goal``. executor : `concurrent.futures.Executor`, `distributed.Client`,\ `mpi4py.futures.MPIPoolExecutor`, `ipyparallel.Client` or\ `loky.get_reusable_executor`, optional @@ -555,10 +580,12 @@ class AsyncRunner(BaseRunner): def __init__( self, learner, - goal: GoalTypes = None, + goal: Callable[[BaseLearner], bool] | None = None, *, loss_goal: float | None = None, npoints_goal: int | None = None, + datetime_goal: datetime | None = None, + timedelta_goal: timedelta | None = None, executor=None, ntasks=None, log=False, @@ -770,10 +797,12 @@ async def _saver(): def simple( learner, - goal: GoalTypes = None, + goal: Callable[[BaseLearner], bool] | None = None, *, loss_goal: float | None = None, npoints_goal: int | None = None, + datetime_goal: datetime | None = None, + timedelta_goal: timedelta | None = None, ): """Run the learner until the goal is reached. @@ -800,8 +829,24 @@ def simple( Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the number of points is larger or equal than this value. + datetime_goal : datetime, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the current time is larger or equal than this + value. + timedelta_goal : timedelta, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the current time is larger or equal than + ``start_time + timedelta_goal``. """ - goal = _goal(learner, goal, loss_goal, npoints_goal, allow_running_forever=False) + goal = _goal( + learner, + goal, + loss_goal, + npoints_goal, + datetime_goal, + timedelta_goal, + allow_running_forever=False, + ) while not goal(learner): xs, _ = learner.ask(1) for x in xs: @@ -942,8 +987,12 @@ def __call__(self, _): def auto_goal( - goal: GoalTypes, - learner: BaseLearner, + *, + loss: float | None = None, + npoints: int | None = None, + datetime: datetime | None = None, + timedelta: timedelta | None = None, + learner: BaseLearner | None = None, allow_running_forever: bool = True, ) -> Callable[[BaseLearner], bool]: """Extract a goal from the learners. @@ -954,7 +1003,6 @@ def auto_goal( The goal to extract. Can be a callable, an integer, a float, a datetime, a timedelta or None. If the type of `goal` is: - * ``callable``, it is returned as is. * ``int``, the goal is reached after that many points have been added. * ``float``, the goal is reached when the learner has reached a loss @@ -980,23 +1028,36 @@ def auto_goal( ------- Callable[[adaptive.BaseLearner], bool] """ - if callable(goal): - return goal - if isinstance(goal, float): - return lambda learner: learner.loss() <= goal + kw = dict( + loss=loss, + npoints=npoints, + datetime=datetime, + timedelta=timedelta, + allow_running_forever=allow_running_forever, + ) + opts = (loss, npoints, datetime, timedelta) # all are mutually exclusive + if sum(v is not None for v in opts) > 1: + raise ValueError( + "Only one of loss, npoints, datetime, timedelta can be specified." + ) + + if loss is not None: + return lambda learner: learner.loss() <= loss if isinstance(learner, BalancingLearner): # Note that the float loss goal is more efficiently implemented in the # BalancingLearner itself. That is why the previous if statement is # above this one. - goals = [auto_goal(goal, l, allow_running_forever) for l in learner.learners] + goals = [auto_goal(learner=l, **kw) for l in learner.learners] return lambda learner: all(goal(l) for l, goal in zip(learner.learners, goals)) - if isinstance(goal, int): - return lambda learner: learner.npoints >= goal - if isinstance(goal, (timedelta, datetime)): - return _TimeGoal(goal) + if npoints is not None: + return lambda learner: learner.npoints >= npoints + if datetime is not None: + return _TimeGoal(datetime) + if timedelta is not None: + return _TimeGoal(timedelta) if isinstance(learner, DataSaver): - return auto_goal(goal, learner.learner, allow_running_forever) - if goal is None: + return auto_goal(**kw, learner=learner.learner) + if all(v is None for v in opts): if isinstance(learner, SequenceLearner): return SequenceLearner.done if isinstance(learner, IntegratorLearner): @@ -1012,17 +1073,32 @@ def auto_goal( def _goal( - learner: BaseLearner, - goal: GoalTypes, + learner: BaseLearner | None, + goal: Callable[[BaseLearner], bool] | None, loss_goal: float | None, npoints_goal: int | None, + datetime_goal: datetime | None, + timedelta_goal: timedelta | None, allow_running_forever: bool, ): - # goal, loss_goal, npoints_goal are mutually exclusive, only one can be not None - if goal is not None and (loss_goal is not None or npoints_goal is not None): - raise ValueError("Either goal, loss_goal, or npoints_goal can be specified.") - if loss_goal is not None: - goal = float(loss_goal) - if npoints_goal is not None: - goal = int(npoints_goal) - return auto_goal(goal, learner, allow_running_forever) + if callable(goal): + return goal + + if goal is not None and ( + loss_goal is not None + or npoints_goal is not None + or datetime_goal is not None + or timedelta_goal is not None + ): + raise ValueError( + "Either goal, loss_goal, npoints_goal, datetime_goal or" + " timedelta_goal can be specified, not multiple." + ) + return auto_goal( + learner=learner, + loss=loss_goal, + npoints=npoints_goal, + datetime=datetime_goal, + timedelta=timedelta_goal, + allow_running_forever=allow_running_forever, + ) diff --git a/adaptive/tests/test_balancing_learner.py b/adaptive/tests/test_balancing_learner.py index 01e13cb53..b2e513698 100644 --- a/adaptive/tests/test_balancing_learner.py +++ b/adaptive/tests/test_balancing_learner.py @@ -50,15 +50,15 @@ def test_ask_0(strategy): @pytest.mark.parametrize( - "strategy, goal", + "strategy, goal_type, goal", [ - ("loss", 0.1), - ("loss_improvements", 0.1), - ("npoints", lambda bl: all(l.npoints > 10 for l in bl.learners)), - ("cycle", 0.1), + ("loss", "loss_goal", 0.1), + ("loss_improvements", "loss_goal", 0.1), + ("npoints", "goal", lambda bl: all(l.npoints > 10 for l in bl.learners)), + ("cycle", "loss_goal", 0.1), ], ) -def test_strategies(strategy, goal): +def test_strategies(strategy, goal_type, goal): learners = [Learner1D(lambda x: x, bounds=(-1, 1)) for i in range(10)] learner = BalancingLearner(learners, strategy=strategy) - simple(learner, goal=goal) + simple(learner, **{goal_type: goal}) diff --git a/adaptive/tests/test_runner.py b/adaptive/tests/test_runner.py index a27bf5e7b..70334e11c 100644 --- a/adaptive/tests/test_runner.py +++ b/adaptive/tests/test_runner.py @@ -163,30 +163,30 @@ def test_default_executor(): def test_auto_goal(): learner = Learner1D(linear, (-1, 1)) - simple(learner, auto_goal(4, learner)) + simple(learner, auto_goal(npoints=4)) assert learner.npoints == 4 learner = Learner1D(linear, (-1, 1)) - simple(learner, auto_goal(0.5, learner)) + simple(learner, auto_goal(loss=0.5)) assert learner.loss() <= 0.5 learner = SequenceLearner(linear, np.linspace(-1, 1)) - simple(learner, auto_goal(None, learner)) + simple(learner, auto_goal(learner=learner)) assert learner.done() learner = IntegratorLearner(linear, bounds=(0, 1), tol=0.1) - simple(learner, auto_goal(None, learner)) + simple(learner, auto_goal(learner=learner)) assert learner.done() learner = Learner1D(linear, (-1, 1)) learner = DataSaver(learner, lambda x: x) - simple(learner, auto_goal(4, learner)) + simple(learner, auto_goal(npoints=4, learner=learner)) assert learner.npoints == 4 learner1 = Learner1D(linear, (-1, 1)) learner2 = Learner1D(linear, (-2, 2)) balancing_learner = BalancingLearner([learner1, learner2]) - simple(balancing_learner, auto_goal(4, balancing_learner)) + simple(balancing_learner, auto_goal(npoints=4, learner=balancing_learner)) assert learner1.npoints == 4 and learner2.npoints == 4 learner1 = Learner1D(linear, bounds=(0, 1)) @@ -194,5 +194,5 @@ def test_auto_goal(): learner2 = Learner1D(linear, bounds=(0, 1)) learner2 = DataSaver(learner2, lambda x: x) balancing_learner = BalancingLearner([learner1, learner2]) - simple(balancing_learner, auto_goal(10, balancing_learner)) + simple(balancing_learner, auto_goal(npoints=10, learner=balancing_learner)) assert learner1.npoints == 10 and learner2.npoints == 10 From 6f3437a9c1b14570c67ef089db627bab9195fed5 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Mon, 14 Nov 2022 18:09:00 -0800 Subject: [PATCH 20/33] Remove unused variable --- adaptive/runner.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/adaptive/runner.py b/adaptive/runner.py index 7a069fcaf..1fb730615 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -14,7 +14,7 @@ import warnings from contextlib import suppress from datetime import datetime, timedelta -from typing import Any, Callable, Union +from typing import Any, Callable import loky @@ -27,12 +27,6 @@ ) from adaptive.notebook_integration import in_ipynb, live_info, live_plot -try: - from typing import TypeAlias -except ImportError: - # Python <3.10 - from typing_extensions import TypeAlias - try: import ipyparallel @@ -72,10 +66,6 @@ # and https://github.com/python-adaptive/adaptive/issues/301 _default_executor = loky.get_reusable_executor -_GoalTypes: TypeAlias = Union[ - Callable[[BaseLearner], bool], int, float, datetime, timedelta, None -] - class BaseRunner(metaclass=abc.ABCMeta): r"""Base class for runners that use `concurrent.futures.Executors`. From be14576a843fe9878ce1a996a70cbb4318e11372 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Wed, 16 Nov 2022 21:41:29 -0800 Subject: [PATCH 21/33] pass parameters and add doc-strings --- adaptive/runner.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/adaptive/runner.py b/adaptive/runner.py index 1fb730615..449d8f579 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -113,6 +113,8 @@ class BaseRunner(metaclass=abc.ABCMeta): the point is present in ``runner.failed``. raise_if_retries_exceeded : bool, default: True Raise the error after a point ``x`` failed `retries`. + allow_running_forever : bool, default: False + Allow the runner to run forever when the goal is None. Attributes ---------- @@ -436,6 +438,8 @@ def __init__( goal=goal, loss_goal=loss_goal, npoints_goal=npoints_goal, + datetime_goal=datetime_goal, + timedelta_goal=timedelta_goal, executor=executor, ntasks=ntasks, log=log, @@ -531,6 +535,8 @@ class AsyncRunner(BaseRunner): the point is present in ``runner.failed``. raise_if_retries_exceeded : bool, default: True Raise the error after a point ``x`` failed `retries`. + allow_running_forever : bool, default: True + If True, the runner will run forever if the goal is not provided. Attributes ---------- @@ -606,6 +612,8 @@ def __init__( goal=goal, loss_goal=loss_goal, npoints_goal=npoints_goal, + datetime_goal=datetime_goal, + timedelta_goal=timedelta_goal, executor=executor, ntasks=ntasks, log=log, From 0f7d320574d50674b02ee4c208e2d1ce4bc27b62 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Wed, 16 Nov 2022 21:44:46 -0800 Subject: [PATCH 22/33] Allow timedelta_goal to be an int --- adaptive/runner.py | 39 +++++++++++++++------------------------ 1 file changed, 15 insertions(+), 24 deletions(-) diff --git a/adaptive/runner.py b/adaptive/runner.py index 449d8f579..eaf2d06fd 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -88,7 +88,7 @@ class BaseRunner(metaclass=abc.ABCMeta): Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the current time is larger or equal than this value. - timedelta_goal : timedelta, optional + timedelta_goal : timedelta or int, optional Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the current time is larger or equal than ``start_time + timedelta_goal``. @@ -507,7 +507,7 @@ class AsyncRunner(BaseRunner): Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the current time is larger or equal than this value. - timedelta_goal : timedelta, optional + timedelta_goal : timedelta or int, optional Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the current time is larger or equal than ``start_time + timedelta_goal``. @@ -831,7 +831,7 @@ def simple( Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the current time is larger or equal than this value. - timedelta_goal : timedelta, optional + timedelta_goal : timedelta or int, optional Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the current time is larger or equal than ``start_time + timedelta_goal``. @@ -970,7 +970,9 @@ def _get_ncores(ex): class _TimeGoal: - def __init__(self, dt: timedelta | datetime): + def __init__(self, dt: timedelta | datetime | int): + if isinstance(dt, int): + self.dt = timedelta(seconds=dt) self.dt = dt self.start_time = None @@ -989,7 +991,7 @@ def auto_goal( loss: float | None = None, npoints: int | None = None, datetime: datetime | None = None, - timedelta: timedelta | None = None, + timedelta: timedelta | int | None = None, learner: BaseLearner | None = None, allow_running_forever: bool = True, ) -> Callable[[BaseLearner], bool]: @@ -997,25 +999,14 @@ def auto_goal( Parameters ---------- - goal - The goal to extract. Can be a callable, an integer, a float, a datetime, - a timedelta or None. - If the type of `goal` is: - * ``callable``, it is returned as is. - * ``int``, the goal is reached after that many points have been added. - * ``float``, the goal is reached when the learner has reached a loss - equal or less than that. - * `datetime.datetime`, the goal is reached when the current time is after the - datetime. - * `datetime.timedelta`, the goal is reached when the current time is after - the start time plus that timedelta. - * ``None`` and - * the learner type is `adaptive.SequenceLearner`, it continues until - it no more points to add - * the learner type is `adaptive.IntegratorLearner`, it continues until the - error is less than the tolerance specified in the learner. - * otherwise, it continues forever, unless ``allow_running_forever`` is - False, in which case it raises a ValueError. + loss + TODO + npoints + TODO + datetime + TODO + timedelta + TODO learner Learner for which to determine the goal. allow_running_forever From 8b300de10667236f1c7e0dcfcbc0c2098c906161 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Mon, 21 Nov 2022 15:50:13 -0800 Subject: [PATCH 23/33] Test timedelta goal --- adaptive/tests/test_runner.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/adaptive/tests/test_runner.py b/adaptive/tests/test_runner.py index 70334e11c..bf2c5b4b9 100644 --- a/adaptive/tests/test_runner.py +++ b/adaptive/tests/test_runner.py @@ -196,3 +196,9 @@ def test_auto_goal(): balancing_learner = BalancingLearner([learner1, learner2]) simple(balancing_learner, auto_goal(npoints=10, learner=balancing_learner)) assert learner1.npoints == 10 and learner2.npoints == 10 + + learner = Learner1D(linear, (-1, 1)) + t_start = time.time() + simple(learner, auto_goal(timedelta=1e-2, learner=learner)) + t_end = time.time() + assert t_end - t_start >= 1e-2 From ae6ab648cced3e14d988893d229905efb9df4979 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Mon, 21 Nov 2022 15:53:55 -0800 Subject: [PATCH 24/33] rename timedelta to duration --- adaptive/runner.py | 56 +++++++++++++++++++++++----------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/adaptive/runner.py b/adaptive/runner.py index eaf2d06fd..9bdae98e9 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -88,10 +88,10 @@ class BaseRunner(metaclass=abc.ABCMeta): Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the current time is larger or equal than this value. - timedelta_goal : timedelta or int, optional + duration_goal : timedelta or number, optional Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the current time is larger or equal than - ``start_time + timedelta_goal``. + ``start_time + duration_goal``. executor : `concurrent.futures.Executor`, `distributed.Client`,\ `mpi4py.futures.MPIPoolExecutor`, `ipyparallel.Client` or\ `loky.get_reusable_executor`, optional @@ -148,7 +148,7 @@ def __init__( loss_goal: float | None = None, npoints_goal: int | None = None, datetime_goal: datetime | None = None, - timedelta_goal: timedelta | None = None, + duration_goal: timedelta | None = None, executor=None, ntasks=None, log=False, @@ -165,7 +165,7 @@ def __init__( loss_goal, npoints_goal, datetime_goal, - timedelta_goal, + duration_goal, allow_running_forever, ) @@ -423,7 +423,7 @@ def __init__( loss_goal: float | None = None, npoints_goal: int | None = None, datetime_goal: datetime | None = None, - timedelta_goal: timedelta | None = None, + duration_goal: timedelta | None = None, executor=None, ntasks=None, log=False, @@ -439,7 +439,7 @@ def __init__( loss_goal=loss_goal, npoints_goal=npoints_goal, datetime_goal=datetime_goal, - timedelta_goal=timedelta_goal, + duration_goal=duration_goal, executor=executor, ntasks=ntasks, log=log, @@ -507,10 +507,10 @@ class AsyncRunner(BaseRunner): Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the current time is larger or equal than this value. - timedelta_goal : timedelta or int, optional + duration_goal : timedelta or number, optional Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the current time is larger or equal than - ``start_time + timedelta_goal``. + ``start_time + duration_goal``. executor : `concurrent.futures.Executor`, `distributed.Client`,\ `mpi4py.futures.MPIPoolExecutor`, `ipyparallel.Client` or\ `loky.get_reusable_executor`, optional @@ -581,7 +581,7 @@ def __init__( loss_goal: float | None = None, npoints_goal: int | None = None, datetime_goal: datetime | None = None, - timedelta_goal: timedelta | None = None, + duration_goal: timedelta | None = None, executor=None, ntasks=None, log=False, @@ -613,7 +613,7 @@ def __init__( loss_goal=loss_goal, npoints_goal=npoints_goal, datetime_goal=datetime_goal, - timedelta_goal=timedelta_goal, + duration_goal=duration_goal, executor=executor, ntasks=ntasks, log=log, @@ -800,7 +800,7 @@ def simple( loss_goal: float | None = None, npoints_goal: int | None = None, datetime_goal: datetime | None = None, - timedelta_goal: timedelta | None = None, + duration_goal: timedelta | None = None, ): """Run the learner until the goal is reached. @@ -831,10 +831,10 @@ def simple( Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the current time is larger or equal than this value. - timedelta_goal : timedelta or int, optional + duration_goal : timedelta or number, optional Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the current time is larger or equal than - ``start_time + timedelta_goal``. + ``start_time + duration_goal``. """ goal = _goal( learner, @@ -842,7 +842,7 @@ def simple( loss_goal, npoints_goal, datetime_goal, - timedelta_goal, + duration_goal, allow_running_forever=False, ) while not goal(learner): @@ -970,8 +970,8 @@ def _get_ncores(ex): class _TimeGoal: - def __init__(self, dt: timedelta | datetime | int): - if isinstance(dt, int): + def __init__(self, dt: timedelta | datetime | int | float): + if not isinstance(dt, (timedelta, datetime)): self.dt = timedelta(seconds=dt) self.dt = dt self.start_time = None @@ -983,7 +983,7 @@ def __call__(self, _): return datetime.now() - self.start_time > self.dt if isinstance(self.dt, datetime): return datetime.now() > self.dt - raise TypeError(f"`dt={self.dt}` is not a datetime or timedelta.") + raise TypeError(f"`dt={self.dt}` is not a datetime, timedelta, or number.") def auto_goal( @@ -991,7 +991,7 @@ def auto_goal( loss: float | None = None, npoints: int | None = None, datetime: datetime | None = None, - timedelta: timedelta | int | None = None, + duration: timedelta | int | None = None, learner: BaseLearner | None = None, allow_running_forever: bool = True, ) -> Callable[[BaseLearner], bool]: @@ -1005,7 +1005,7 @@ def auto_goal( TODO datetime TODO - timedelta + duration TODO learner Learner for which to determine the goal. @@ -1021,13 +1021,13 @@ def auto_goal( loss=loss, npoints=npoints, datetime=datetime, - timedelta=timedelta, + duration=duration, allow_running_forever=allow_running_forever, ) - opts = (loss, npoints, datetime, timedelta) # all are mutually exclusive + opts = (loss, npoints, datetime, duration) # all are mutually exclusive if sum(v is not None for v in opts) > 1: raise ValueError( - "Only one of loss, npoints, datetime, timedelta can be specified." + "Only one of loss, npoints, datetime, duration can be specified." ) if loss is not None: @@ -1042,8 +1042,8 @@ def auto_goal( return lambda learner: learner.npoints >= npoints if datetime is not None: return _TimeGoal(datetime) - if timedelta is not None: - return _TimeGoal(timedelta) + if duration is not None: + return _TimeGoal(duration) if isinstance(learner, DataSaver): return auto_goal(**kw, learner=learner.learner) if all(v is None for v in opts): @@ -1067,7 +1067,7 @@ def _goal( loss_goal: float | None, npoints_goal: int | None, datetime_goal: datetime | None, - timedelta_goal: timedelta | None, + duration_goal: timedelta | None, allow_running_forever: bool, ): if callable(goal): @@ -1077,17 +1077,17 @@ def _goal( loss_goal is not None or npoints_goal is not None or datetime_goal is not None - or timedelta_goal is not None + or duration_goal is not None ): raise ValueError( "Either goal, loss_goal, npoints_goal, datetime_goal or" - " timedelta_goal can be specified, not multiple." + " duration_goal can be specified, not multiple." ) return auto_goal( learner=learner, loss=loss_goal, npoints=npoints_goal, datetime=datetime_goal, - timedelta=timedelta_goal, + duration=duration_goal, allow_running_forever=allow_running_forever, ) From 761afa867e01686ffcfcac95b54fa37f11ef14ae Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Mon, 21 Nov 2022 15:57:04 -0800 Subject: [PATCH 25/33] rename datetime_goal to end_time_goal --- adaptive/runner.py | 47 ++++++++++++++++++----------------- adaptive/tests/test_runner.py | 2 +- 2 files changed, 25 insertions(+), 24 deletions(-) diff --git a/adaptive/runner.py b/adaptive/runner.py index 9bdae98e9..d04352902 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -84,7 +84,7 @@ class BaseRunner(metaclass=abc.ABCMeta): Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the number of points is larger or equal than this value. - datetime_goal : datetime, optional + end_time_goal : datetime, optional Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the current time is larger or equal than this value. @@ -147,7 +147,7 @@ def __init__( *, loss_goal: float | None = None, npoints_goal: int | None = None, - datetime_goal: datetime | None = None, + end_time_goal: datetime | None = None, duration_goal: timedelta | None = None, executor=None, ntasks=None, @@ -164,7 +164,7 @@ def __init__( goal, loss_goal, npoints_goal, - datetime_goal, + end_time_goal, duration_goal, allow_running_forever, ) @@ -422,7 +422,7 @@ def __init__( *, loss_goal: float | None = None, npoints_goal: int | None = None, - datetime_goal: datetime | None = None, + end_time_goal: datetime | None = None, duration_goal: timedelta | None = None, executor=None, ntasks=None, @@ -438,7 +438,7 @@ def __init__( goal=goal, loss_goal=loss_goal, npoints_goal=npoints_goal, - datetime_goal=datetime_goal, + end_time_goal=end_time_goal, duration_goal=duration_goal, executor=executor, ntasks=ntasks, @@ -503,7 +503,7 @@ class AsyncRunner(BaseRunner): Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the number of points is larger or equal than this value. - datetime_goal : datetime, optional + end_time_goal : datetime, optional Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the current time is larger or equal than this value. @@ -580,7 +580,7 @@ def __init__( *, loss_goal: float | None = None, npoints_goal: int | None = None, - datetime_goal: datetime | None = None, + end_time_goal: datetime | None = None, duration_goal: timedelta | None = None, executor=None, ntasks=None, @@ -612,7 +612,7 @@ def __init__( goal=goal, loss_goal=loss_goal, npoints_goal=npoints_goal, - datetime_goal=datetime_goal, + end_time_goal=end_time_goal, duration_goal=duration_goal, executor=executor, ntasks=ntasks, @@ -799,7 +799,7 @@ def simple( *, loss_goal: float | None = None, npoints_goal: int | None = None, - datetime_goal: datetime | None = None, + end_time_goal: datetime | None = None, duration_goal: timedelta | None = None, ): """Run the learner until the goal is reached. @@ -827,7 +827,7 @@ def simple( Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the number of points is larger or equal than this value. - datetime_goal : datetime, optional + end_time_goal : datetime, optional Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the current time is larger or equal than this value. @@ -841,7 +841,7 @@ def simple( goal, loss_goal, npoints_goal, - datetime_goal, + end_time_goal, duration_goal, allow_running_forever=False, ) @@ -973,7 +973,8 @@ class _TimeGoal: def __init__(self, dt: timedelta | datetime | int | float): if not isinstance(dt, (timedelta, datetime)): self.dt = timedelta(seconds=dt) - self.dt = dt + else: + self.dt = dt self.start_time = None def __call__(self, _): @@ -990,7 +991,7 @@ def auto_goal( *, loss: float | None = None, npoints: int | None = None, - datetime: datetime | None = None, + end_time: datetime | None = None, duration: timedelta | int | None = None, learner: BaseLearner | None = None, allow_running_forever: bool = True, @@ -1003,7 +1004,7 @@ def auto_goal( TODO npoints TODO - datetime + end_time TODO duration TODO @@ -1020,14 +1021,14 @@ def auto_goal( kw = dict( loss=loss, npoints=npoints, - datetime=datetime, + end_time=end_time, duration=duration, allow_running_forever=allow_running_forever, ) - opts = (loss, npoints, datetime, duration) # all are mutually exclusive + opts = (loss, npoints, end_time, duration) # all are mutually exclusive if sum(v is not None for v in opts) > 1: raise ValueError( - "Only one of loss, npoints, datetime, duration can be specified." + "Only one of loss, npoints, end_time, duration can be specified." ) if loss is not None: @@ -1040,8 +1041,8 @@ def auto_goal( return lambda learner: all(goal(l) for l, goal in zip(learner.learners, goals)) if npoints is not None: return lambda learner: learner.npoints >= npoints - if datetime is not None: - return _TimeGoal(datetime) + if end_time is not None: + return _TimeGoal(end_time) if duration is not None: return _TimeGoal(duration) if isinstance(learner, DataSaver): @@ -1066,7 +1067,7 @@ def _goal( goal: Callable[[BaseLearner], bool] | None, loss_goal: float | None, npoints_goal: int | None, - datetime_goal: datetime | None, + end_time_goal: datetime | None, duration_goal: timedelta | None, allow_running_forever: bool, ): @@ -1076,18 +1077,18 @@ def _goal( if goal is not None and ( loss_goal is not None or npoints_goal is not None - or datetime_goal is not None + or end_time_goal is not None or duration_goal is not None ): raise ValueError( - "Either goal, loss_goal, npoints_goal, datetime_goal or" + "Either goal, loss_goal, npoints_goal, end_time_goal or" " duration_goal can be specified, not multiple." ) return auto_goal( learner=learner, loss=loss_goal, npoints=npoints_goal, - datetime=datetime_goal, + end_time=end_time_goal, duration=duration_goal, allow_running_forever=allow_running_forever, ) diff --git a/adaptive/tests/test_runner.py b/adaptive/tests/test_runner.py index bf2c5b4b9..e36abcbe1 100644 --- a/adaptive/tests/test_runner.py +++ b/adaptive/tests/test_runner.py @@ -199,6 +199,6 @@ def test_auto_goal(): learner = Learner1D(linear, (-1, 1)) t_start = time.time() - simple(learner, auto_goal(timedelta=1e-2, learner=learner)) + simple(learner, auto_goal(duration=1e-2, learner=learner)) t_end = time.time() assert t_end - t_start >= 1e-2 From c96e5a42423e5b26fea4ebf2be3e5e68c121e56f Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Tue, 22 Nov 2022 10:03:56 -0800 Subject: [PATCH 26/33] complete doc-string of auto_goal --- adaptive/runner.py | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/adaptive/runner.py b/adaptive/runner.py index d04352902..85247f5cf 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -91,7 +91,8 @@ class BaseRunner(metaclass=abc.ABCMeta): duration_goal : timedelta or number, optional Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the current time is larger or equal than - ``start_time + duration_goal``. + ``start_time + duration_goal``. ``duration_goal`` can be a number + indicating the number of seconds executor : `concurrent.futures.Executor`, `distributed.Client`,\ `mpi4py.futures.MPIPoolExecutor`, `ipyparallel.Client` or\ `loky.get_reusable_executor`, optional @@ -510,7 +511,8 @@ class AsyncRunner(BaseRunner): duration_goal : timedelta or number, optional Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the current time is larger or equal than - ``start_time + duration_goal``. + ``start_time + duration_goal``. ``duration_goal`` can be a number + indicating the number of seconds. executor : `concurrent.futures.Executor`, `distributed.Client`,\ `mpi4py.futures.MPIPoolExecutor`, `ipyparallel.Client` or\ `loky.get_reusable_executor`, optional @@ -834,7 +836,8 @@ def simple( duration_goal : timedelta or number, optional Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the current time is larger or equal than - ``start_time + duration_goal``. + ``start_time + duration_goal``. ``duration_goal`` can be a number + indicating the number of seconds. """ goal = _goal( learner, @@ -1000,14 +1003,16 @@ def auto_goal( Parameters ---------- - loss - TODO - npoints - TODO - end_time - TODO - duration - TODO + loss : float, optional + Stop when the loss is smaller than this value. + npoints : int, optional + Stop when the number of points is larger or equal than this value. + end_time : datetime, optional + Stop when the current time is larger or equal than this value. + duration : timedelta or number, optional + Stop when the current time is larger or equal than + ``start_time + duration``. ``duration`` can be a number + indicating the number of seconds. learner Learner for which to determine the goal. allow_running_forever From 84bd796e4733c955731cf02aac8b3e00ed3ffb38 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Tue, 22 Nov 2022 20:34:57 -0800 Subject: [PATCH 27/33] No longer restrict scikit-learn --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 08a7948f5..653e2e41b 100644 --- a/setup.py +++ b/setup.py @@ -60,7 +60,7 @@ def get_version_and_cmdclass(package_name): "distributed", "ipyparallel>=6.2.5", # because of https://github.com/ipython/ipyparallel/issues/404 "scikit-optimize>=0.8.1", # because of https://github.com/scikit-optimize/scikit-optimize/issues/931 - "scikit-learn<=0.24.2", # because of https://github.com/scikit-optimize/scikit-optimize/issues/1059 + "scikit-learn", "wexpect" if os.name == "nt" else "pexpect", ], } From 68b31439062a94ca2f4458f5395400aeb9068189 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Tue, 22 Nov 2022 21:09:22 -0800 Subject: [PATCH 28/33] fix type annotations --- adaptive/runner.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/adaptive/runner.py b/adaptive/runner.py index 85247f5cf..ca1268590 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -149,7 +149,7 @@ def __init__( loss_goal: float | None = None, npoints_goal: int | None = None, end_time_goal: datetime | None = None, - duration_goal: timedelta | None = None, + duration_goal: timedelta | int | float | None = None, executor=None, ntasks=None, log=False, @@ -424,7 +424,7 @@ def __init__( loss_goal: float | None = None, npoints_goal: int | None = None, end_time_goal: datetime | None = None, - duration_goal: timedelta | None = None, + duration_goal: timedelta | int | float | None = None, executor=None, ntasks=None, log=False, @@ -583,7 +583,7 @@ def __init__( loss_goal: float | None = None, npoints_goal: int | None = None, end_time_goal: datetime | None = None, - duration_goal: timedelta | None = None, + duration_goal: timedelta | int | float | None = None, executor=None, ntasks=None, log=False, @@ -802,7 +802,7 @@ def simple( loss_goal: float | None = None, npoints_goal: int | None = None, end_time_goal: datetime | None = None, - duration_goal: timedelta | None = None, + duration_goal: timedelta | int | float | None = None, ): """Run the learner until the goal is reached. @@ -995,7 +995,7 @@ def auto_goal( loss: float | None = None, npoints: int | None = None, end_time: datetime | None = None, - duration: timedelta | int | None = None, + duration: timedelta | int | float | None = None, learner: BaseLearner | None = None, allow_running_forever: bool = True, ) -> Callable[[BaseLearner], bool]: From 881a2d3a8c81383f0649a90ed88ef72eaaa1b45e Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Wed, 23 Nov 2022 11:09:50 -0800 Subject: [PATCH 29/33] Update "Stopping Criteria" Runner docs Co-authored-by: Joseph Weston --- docs/source/reference/adaptive.runner.extras.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/source/reference/adaptive.runner.extras.md b/docs/source/reference/adaptive.runner.extras.md index cb8cb1214..d2dfaa89c 100644 --- a/docs/source/reference/adaptive.runner.extras.md +++ b/docs/source/reference/adaptive.runner.extras.md @@ -6,10 +6,9 @@ Runners allow you to specify the stopping criterion by providing a `goal` as a function that takes the learner and returns a boolean: `False` for "continue running" and `True` for "stop". This gives you a lot of flexibility for defining your own stopping conditions, however we also provide some common -stopping conditions as a convenience. The `adaptive.runner.auto_goal` will -automatically create a goal based on simple input types, e.g., an int means -at least that many points are required and a float means that the loss has -to become lower or equal to that float. +stopping conditions as a convenience. For example, to continue until the loss is below a threshold `x`, +you may specify `loss_goal=x`. Similarly, to continue until `n` points have been sampled, you may +specify `npoints_goal=n`. See the Runner docstring for details. ```{eval-rst} .. autofunction:: adaptive.runner.auto_goal From efb46f15ec5b6597595b6fe829ea8f61f209fd12 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Wed, 23 Nov 2022 11:10:04 -0800 Subject: [PATCH 30/33] Fix loss in tutorial Co-authored-by: Joseph Weston --- docs/source/tutorial/tutorial.Learner1D.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/tutorial/tutorial.Learner1D.md b/docs/source/tutorial/tutorial.Learner1D.md index 5d25061cb..db80e03ec 100644 --- a/docs/source/tutorial/tutorial.Learner1D.md +++ b/docs/source/tutorial/tutorial.Learner1D.md @@ -124,7 +124,7 @@ The `Learner1D` can be used for such functions: ```{code-cell} ipython3 learner = adaptive.Learner1D(f_levels, bounds=(-1, 1)) -runner = adaptive.Runner(learner, loss_goal=0.01) # continue until `learner.loss()<=1` +runner = adaptive.Runner(learner, loss_goal=0.01) # continue until `learner.loss()<=0.01` ``` ```{code-cell} ipython3 From 563a633fa5f06cc91681f67757912527e9b44d1b Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Wed, 23 Nov 2022 11:11:01 -0800 Subject: [PATCH 31/33] Add missing end_time_goal and duration_goal to doc-string --- adaptive/runner.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/adaptive/runner.py b/adaptive/runner.py index ca1268590..f35017786 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -366,6 +366,15 @@ class BlockingRunner(BaseRunner): Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the number of points is larger or equal than this value. + end_time_goal : datetime, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the current time is larger or equal than this + value. + duration_goal : timedelta or number, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the current time is larger or equal than + ``start_time + duration_goal``. ``duration_goal`` can be a number + indicating the number of seconds executor : `concurrent.futures.Executor`, `distributed.Client`,\ `mpi4py.futures.MPIPoolExecutor`, `ipyparallel.Client` or\ `loky.get_reusable_executor`, optional From 2186c025689b295eb0d7a5d123daa2ab9509a547 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Wed, 23 Nov 2022 11:18:24 -0800 Subject: [PATCH 32/33] Use exact same text in different runners --- adaptive/runner.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/adaptive/runner.py b/adaptive/runner.py index f35017786..5db5d0b9c 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -92,7 +92,7 @@ class BaseRunner(metaclass=abc.ABCMeta): Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the current time is larger or equal than ``start_time + duration_goal``. ``duration_goal`` can be a number - indicating the number of seconds + indicating the number of seconds. executor : `concurrent.futures.Executor`, `distributed.Client`,\ `mpi4py.futures.MPIPoolExecutor`, `ipyparallel.Client` or\ `loky.get_reusable_executor`, optional @@ -355,14 +355,14 @@ class BlockingRunner(BaseRunner): Parameters ---------- learner : `~adaptive.BaseLearner` instance - goal : callable + goal : callable, optional The end condition for the calculation. This function must take the learner as its sole argument, and return True when we should stop requesting more points. - loss_goal : float + loss_goal : float, optional Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the loss is smaller than this value. - npoints_goal : int + npoints_goal : int, optional Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the number of points is larger or equal than this value. @@ -374,7 +374,7 @@ class BlockingRunner(BaseRunner): Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the current time is larger or equal than ``start_time + duration_goal``. ``duration_goal`` can be a number - indicating the number of seconds + indicating the number of seconds. executor : `concurrent.futures.Executor`, `distributed.Client`,\ `mpi4py.futures.MPIPoolExecutor`, `ipyparallel.Client` or\ `loky.get_reusable_executor`, optional @@ -505,7 +505,7 @@ class AsyncRunner(BaseRunner): the learner as its sole argument, and return True when we should stop requesting more points. If not provided, the runner will run forever (or stop when no more - points can be added), or until ``self.task.cancel()`` is called. + points can be added), or until ``runner.task.cancel()`` is called. loss_goal : float, optional Convenience argument, use instead of ``goal``. The end condition for the calculation. Stop when the loss is smaller than this value. @@ -827,7 +827,7 @@ def simple( Parameters ---------- learner : ~`adaptive.BaseLearner` instance - goal : callable + goal : callable, optional The end condition for the calculation. This function must take the learner as its sole argument, and return True when we should stop requesting more points. From b5f6f26541854a75b75624f18f9abdb17e38b2e9 Mon Sep 17 00:00:00 2001 From: Bas Nijholt Date: Wed, 23 Nov 2022 11:20:06 -0800 Subject: [PATCH 33/33] Specify when learner is used in auto_goal --- adaptive/runner.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/adaptive/runner.py b/adaptive/runner.py index 5db5d0b9c..9a8ff0fd1 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -1023,7 +1023,8 @@ def auto_goal( ``start_time + duration``. ``duration`` can be a number indicating the number of seconds. learner - Learner for which to determine the goal. + Learner for which to determine the goal. Only used if the learner type + is `BalancingLearner`, `DataSaver`, `SequenceLearner`, or `IntegratorLearner`. allow_running_forever If True, and the goal is None and the learner is not a SequenceLearner, then a goal that never stops is returned, otherwise an exception is raised.