diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 24816a1fb..bb5218a81 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -21,7 +21,7 @@ repos: rev: 5.10.1 hooks: - id: isort - - repo: https://gitlab.com/pycqa/flake8 + - repo: https://github.com/pycqa/flake8 rev: 3.9.2 hooks: - id: flake8 diff --git a/README.md b/README.md index f5ad0b922..e414e1e1c 100644 --- a/README.md +++ b/README.md @@ -75,7 +75,7 @@ def peak(x, a=0.01): learner = Learner1D(peak, bounds=(-1, 1)) -runner = Runner(learner, goal=lambda l: l.loss() < 0.01) +runner = Runner(learner, loss_goal=0.01) runner.live_info() runner.live_plot() ``` diff --git a/adaptive/__init__.py b/adaptive/__init__.py index 98a42181a..c28e43fcb 100644 --- a/adaptive/__init__.py +++ b/adaptive/__init__.py @@ -1,6 +1,5 @@ from contextlib import suppress -from adaptive import learner, runner, utils from adaptive._version import __version__ from adaptive.learner import ( AverageLearner, @@ -22,6 +21,8 @@ ) from adaptive.runner import AsyncRunner, BlockingRunner, Runner +from adaptive import learner, runner, utils # isort:skip + __all__ = [ "learner", "runner", diff --git a/adaptive/learner/data_saver.py b/adaptive/learner/data_saver.py index 074718e9b..0c7dd4c47 100644 --- a/adaptive/learner/data_saver.py +++ b/adaptive/learner/data_saver.py @@ -20,7 +20,7 @@ def _to_key(x): return tuple(x.values) if x.values.size > 1 else x.item() -class DataSaver: +class DataSaver(BaseLearner): """Save extra data associated with the values that need to be learned. Parameters @@ -50,6 +50,18 @@ def new(self) -> DataSaver: """Return a new `DataSaver` with the same `arg_picker` and `learner`.""" return DataSaver(self.learner.new(), self.arg_picker) + @copy_docstring_from(BaseLearner.ask) + def ask(self, *args, **kwargs): + return self.learner.ask(*args, **kwargs) + + @copy_docstring_from(BaseLearner.loss) + def loss(self, *args, **kwargs): + return self.learner.loss(*args, **kwargs) + + @copy_docstring_from(BaseLearner.remove_unfinished) + def remove_unfinished(self, *args, **kwargs): + return self.learner.remove_unfinished(*args, **kwargs) + def __getattr__(self, attr: str) -> Any: return getattr(self.learner, attr) diff --git a/adaptive/runner.py b/adaptive/runner.py index ecc7fe14b..9a8ff0fd1 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -13,15 +13,20 @@ import traceback import warnings from contextlib import suppress -from typing import TYPE_CHECKING, Any, Callable +from datetime import datetime, timedelta +from typing import Any, Callable import loky +from adaptive import ( + BalancingLearner, + BaseLearner, + DataSaver, + IntegratorLearner, + SequenceLearner, +) from adaptive.notebook_integration import in_ipynb, live_info, live_plot -if TYPE_CHECKING: - from adaptive import BaseLearner - try: import ipyparallel @@ -68,10 +73,26 @@ class BaseRunner(metaclass=abc.ABCMeta): Parameters ---------- learner : `~adaptive.BaseLearner` instance - goal : callable + goal : callable, optional The end condition for the calculation. This function must take the learner as its sole argument, and return True when we should stop requesting more points. + loss_goal : float, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the loss is smaller than this value. + npoints_goal : int, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the number of points is larger or + equal than this value. + end_time_goal : datetime, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the current time is larger or equal than this + value. + duration_goal : timedelta or number, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the current time is larger or equal than + ``start_time + duration_goal``. ``duration_goal`` can be a number + indicating the number of seconds. executor : `concurrent.futures.Executor`, `distributed.Client`,\ `mpi4py.futures.MPIPoolExecutor`, `ipyparallel.Client` or\ `loky.get_reusable_executor`, optional @@ -93,6 +114,8 @@ class BaseRunner(metaclass=abc.ABCMeta): the point is present in ``runner.failed``. raise_if_retries_exceeded : bool, default: True Raise the error after a point ``x`` failed `retries`. + allow_running_forever : bool, default: False + Allow the runner to run forever when the goal is None. Attributes ---------- @@ -121,18 +144,31 @@ class BaseRunner(metaclass=abc.ABCMeta): def __init__( self, learner, - goal, + goal: Callable[[BaseLearner], bool] | None = None, *, + loss_goal: float | None = None, + npoints_goal: int | None = None, + end_time_goal: datetime | None = None, + duration_goal: timedelta | int | float | None = None, executor=None, ntasks=None, log=False, shutdown_executor=False, retries=0, raise_if_retries_exceeded=True, + allow_running_forever=False, ): self.executor = _ensure_executor(executor) - self.goal = goal + self.goal = _goal( + learner, + goal, + loss_goal, + npoints_goal, + end_time_goal, + duration_goal, + allow_running_forever, + ) self._max_tasks = ntasks @@ -319,10 +355,26 @@ class BlockingRunner(BaseRunner): Parameters ---------- learner : `~adaptive.BaseLearner` instance - goal : callable + goal : callable, optional The end condition for the calculation. This function must take the learner as its sole argument, and return True when we should stop requesting more points. + loss_goal : float, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the loss is smaller than this value. + npoints_goal : int, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the number of points is larger or + equal than this value. + end_time_goal : datetime, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the current time is larger or equal than this + value. + duration_goal : timedelta or number, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the current time is larger or equal than + ``start_time + duration_goal``. ``duration_goal`` can be a number + indicating the number of seconds. executor : `concurrent.futures.Executor`, `distributed.Client`,\ `mpi4py.futures.MPIPoolExecutor`, `ipyparallel.Client` or\ `loky.get_reusable_executor`, optional @@ -376,8 +428,12 @@ class BlockingRunner(BaseRunner): def __init__( self, learner, - goal, + goal: Callable[[BaseLearner], bool] | None = None, *, + loss_goal: float | None = None, + npoints_goal: int | None = None, + end_time_goal: datetime | None = None, + duration_goal: timedelta | int | float | None = None, executor=None, ntasks=None, log=False, @@ -389,13 +445,18 @@ def __init__( raise ValueError("Coroutine functions can only be used with 'AsyncRunner'.") super().__init__( learner, - goal, + goal=goal, + loss_goal=loss_goal, + npoints_goal=npoints_goal, + end_time_goal=end_time_goal, + duration_goal=duration_goal, executor=executor, ntasks=ntasks, log=log, shutdown_executor=shutdown_executor, retries=retries, raise_if_retries_exceeded=raise_if_retries_exceeded, + allow_running_forever=False, ) self._run() @@ -442,8 +503,25 @@ class AsyncRunner(BaseRunner): goal : callable, optional The end condition for the calculation. This function must take the learner as its sole argument, and return True when we should - stop requesting more points. If not provided, the runner will run - forever, or until ``self.task.cancel()`` is called. + stop requesting more points. + If not provided, the runner will run forever (or stop when no more + points can be added), or until ``runner.task.cancel()`` is called. + loss_goal : float, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the loss is smaller than this value. + npoints_goal : int, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the number of points is larger or + equal than this value. + end_time_goal : datetime, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the current time is larger or equal than this + value. + duration_goal : timedelta or number, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the current time is larger or equal than + ``start_time + duration_goal``. ``duration_goal`` can be a number + indicating the number of seconds. executor : `concurrent.futures.Executor`, `distributed.Client`,\ `mpi4py.futures.MPIPoolExecutor`, `ipyparallel.Client` or\ `loky.get_reusable_executor`, optional @@ -468,6 +546,8 @@ class AsyncRunner(BaseRunner): the point is present in ``runner.failed``. raise_if_retries_exceeded : bool, default: True Raise the error after a point ``x`` failed `retries`. + allow_running_forever : bool, default: True + If True, the runner will run forever if the goal is not provided. Attributes ---------- @@ -507,8 +587,12 @@ class AsyncRunner(BaseRunner): def __init__( self, learner, - goal=None, + goal: Callable[[BaseLearner], bool] | None = None, *, + loss_goal: float | None = None, + npoints_goal: int | None = None, + end_time_goal: datetime | None = None, + duration_goal: timedelta | int | float | None = None, executor=None, ntasks=None, log=False, @@ -518,11 +602,6 @@ def __init__( raise_if_retries_exceeded=True, ): - if goal is None: - - def goal(_): - return False - if ( executor is None and _default_executor is concurrent.ProcessPoolExecutor @@ -541,13 +620,18 @@ def goal(_): super().__init__( learner, - goal, + goal=goal, + loss_goal=loss_goal, + npoints_goal=npoints_goal, + end_time_goal=end_time_goal, + duration_goal=duration_goal, executor=executor, ntasks=ntasks, log=log, shutdown_executor=shutdown_executor, retries=retries, raise_if_retries_exceeded=raise_if_retries_exceeded, + allow_running_forever=True, ) self.ioloop = ioloop or asyncio.get_event_loop() self.task = None @@ -720,7 +804,15 @@ async def _saver(): Runner = AsyncRunner -def simple(learner, goal): +def simple( + learner, + goal: Callable[[BaseLearner], bool] | None = None, + *, + loss_goal: float | None = None, + npoints_goal: int | None = None, + end_time_goal: datetime | None = None, + duration_goal: timedelta | int | float | None = None, +): """Run the learner until the goal is reached. Requests a single point from the learner, evaluates @@ -735,10 +827,36 @@ def simple(learner, goal): Parameters ---------- learner : ~`adaptive.BaseLearner` instance - goal : callable - The end condition for the calculation. This function must take the - learner as its sole argument, and return True if we should stop. + goal : callable, optional + The end condition for the calculation. This function must take + the learner as its sole argument, and return True when we should + stop requesting more points. + loss_goal : float, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the loss is smaller than this value. + npoints_goal : int, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the number of points is larger or + equal than this value. + end_time_goal : datetime, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the current time is larger or equal than this + value. + duration_goal : timedelta or number, optional + Convenience argument, use instead of ``goal``. The end condition for the + calculation. Stop when the current time is larger or equal than + ``start_time + duration_goal``. ``duration_goal`` can be a number + indicating the number of seconds. """ + goal = _goal( + learner, + goal, + loss_goal, + npoints_goal, + end_time_goal, + duration_goal, + allow_running_forever=False, + ) while not goal(learner): xs, _ = learner.ask(1) for x in xs: @@ -861,3 +979,131 @@ def _get_ncores(ex): return ex._pool.size # not public API! else: raise TypeError(f"Cannot get number of cores for {ex.__class__}") + + +class _TimeGoal: + def __init__(self, dt: timedelta | datetime | int | float): + if not isinstance(dt, (timedelta, datetime)): + self.dt = timedelta(seconds=dt) + else: + self.dt = dt + self.start_time = None + + def __call__(self, _): + if isinstance(self.dt, timedelta): + if self.start_time is None: + self.start_time = datetime.now() + return datetime.now() - self.start_time > self.dt + if isinstance(self.dt, datetime): + return datetime.now() > self.dt + raise TypeError(f"`dt={self.dt}` is not a datetime, timedelta, or number.") + + +def auto_goal( + *, + loss: float | None = None, + npoints: int | None = None, + end_time: datetime | None = None, + duration: timedelta | int | float | None = None, + learner: BaseLearner | None = None, + allow_running_forever: bool = True, +) -> Callable[[BaseLearner], bool]: + """Extract a goal from the learners. + + Parameters + ---------- + loss : float, optional + Stop when the loss is smaller than this value. + npoints : int, optional + Stop when the number of points is larger or equal than this value. + end_time : datetime, optional + Stop when the current time is larger or equal than this value. + duration : timedelta or number, optional + Stop when the current time is larger or equal than + ``start_time + duration``. ``duration`` can be a number + indicating the number of seconds. + learner + Learner for which to determine the goal. Only used if the learner type + is `BalancingLearner`, `DataSaver`, `SequenceLearner`, or `IntegratorLearner`. + allow_running_forever + If True, and the goal is None and the learner is not a SequenceLearner, + then a goal that never stops is returned, otherwise an exception is raised. + + Returns + ------- + Callable[[adaptive.BaseLearner], bool] + """ + kw = dict( + loss=loss, + npoints=npoints, + end_time=end_time, + duration=duration, + allow_running_forever=allow_running_forever, + ) + opts = (loss, npoints, end_time, duration) # all are mutually exclusive + if sum(v is not None for v in opts) > 1: + raise ValueError( + "Only one of loss, npoints, end_time, duration can be specified." + ) + + if loss is not None: + return lambda learner: learner.loss() <= loss + if isinstance(learner, BalancingLearner): + # Note that the float loss goal is more efficiently implemented in the + # BalancingLearner itself. That is why the previous if statement is + # above this one. + goals = [auto_goal(learner=l, **kw) for l in learner.learners] + return lambda learner: all(goal(l) for l, goal in zip(learner.learners, goals)) + if npoints is not None: + return lambda learner: learner.npoints >= npoints + if end_time is not None: + return _TimeGoal(end_time) + if duration is not None: + return _TimeGoal(duration) + if isinstance(learner, DataSaver): + return auto_goal(**kw, learner=learner.learner) + if all(v is None for v in opts): + if isinstance(learner, SequenceLearner): + return SequenceLearner.done + if isinstance(learner, IntegratorLearner): + return IntegratorLearner.done + if not allow_running_forever: + raise ValueError( + "Goal is None which means the learners" + " continue forever and this is not allowed." + ) + warnings.warn("Goal is None which means the learners continue forever!") + return lambda _: False + raise ValueError("Cannot determine goal from {goal}.") + + +def _goal( + learner: BaseLearner | None, + goal: Callable[[BaseLearner], bool] | None, + loss_goal: float | None, + npoints_goal: int | None, + end_time_goal: datetime | None, + duration_goal: timedelta | None, + allow_running_forever: bool, +): + if callable(goal): + return goal + + if goal is not None and ( + loss_goal is not None + or npoints_goal is not None + or end_time_goal is not None + or duration_goal is not None + ): + raise ValueError( + "Either goal, loss_goal, npoints_goal, end_time_goal or" + " duration_goal can be specified, not multiple." + ) + return auto_goal( + learner=learner, + loss=loss_goal, + npoints=npoints_goal, + end_time=end_time_goal, + duration=duration_goal, + allow_running_forever=allow_running_forever, + ) diff --git a/adaptive/tests/test_average_learner.py b/adaptive/tests/test_average_learner.py index 5de3ced45..d0176858e 100644 --- a/adaptive/tests/test_average_learner.py +++ b/adaptive/tests/test_average_learner.py @@ -61,7 +61,7 @@ def constant_function(seed): learner = AverageLearner( constant_function, atol=0.01, rtol=0.01, min_npoints=min_npoints ) - simple(learner, lambda l: l.loss() < 1) + simple(learner, loss_goal=1.0) assert learner.npoints >= max(2, min_npoints) diff --git a/adaptive/tests/test_balancing_learner.py b/adaptive/tests/test_balancing_learner.py index 0cf3dcd6d..b2e513698 100644 --- a/adaptive/tests/test_balancing_learner.py +++ b/adaptive/tests/test_balancing_learner.py @@ -50,15 +50,15 @@ def test_ask_0(strategy): @pytest.mark.parametrize( - "strategy, goal", + "strategy, goal_type, goal", [ - ("loss", lambda l: l.loss() < 0.1), - ("loss_improvements", lambda l: l.loss() < 0.1), - ("npoints", lambda bl: all(l.npoints > 10 for l in bl.learners)), - ("cycle", lambda l: l.loss() < 0.1), + ("loss", "loss_goal", 0.1), + ("loss_improvements", "loss_goal", 0.1), + ("npoints", "goal", lambda bl: all(l.npoints > 10 for l in bl.learners)), + ("cycle", "loss_goal", 0.1), ], ) -def test_strategies(strategy, goal): +def test_strategies(strategy, goal_type, goal): learners = [Learner1D(lambda x: x, bounds=(-1, 1)) for i in range(10)] learner = BalancingLearner(learners, strategy=strategy) - simple(learner, goal=goal) + simple(learner, **{goal_type: goal}) diff --git a/adaptive/tests/test_learner1d.py b/adaptive/tests/test_learner1d.py index 7e990bd7b..f3c555a04 100644 --- a/adaptive/tests/test_learner1d.py +++ b/adaptive/tests/test_learner1d.py @@ -298,7 +298,7 @@ def test_equal(l1, l2): for function in [f, f_vec]: learner = Learner1D(function, bounds=(-1, 1)) learner2 = Learner1D(function, bounds=(-1, 1)) - simple(learner, goal=lambda l: l.npoints > 200) + simple(learner, npoints_goal=200) xs, ys = zip(*learner.data.items()) # Make the scale huge to no get a scale doubling @@ -374,8 +374,8 @@ def f(x): loss = curvature_loss_function() assert loss.nth_neighbors == 1 learner = Learner1D(f, (-1, 1), loss_per_interval=loss) - simple(learner, goal=lambda l: l.npoints > 100) - assert learner.npoints > 100 + simple(learner, npoints_goal=100) + assert learner.npoints >= 100 def test_curvature_loss_vectors(): @@ -385,8 +385,8 @@ def f(x): loss = curvature_loss_function() assert loss.nth_neighbors == 1 learner = Learner1D(f, (-1, 1), loss_per_interval=loss) - simple(learner, goal=lambda l: l.npoints > 100) - assert learner.npoints > 100 + simple(learner, npoints_goal=100) + assert learner.npoints >= 100 def test_NaN_loss(): @@ -398,7 +398,7 @@ def f(x): return x + a**2 / (a**2 + x**2) learner = Learner1D(f, bounds=(-1, 1)) - simple(learner, lambda l: l.npoints > 100) + simple(learner, npoints_goal=100) def test_inf_loss_with_missing_bounds(): @@ -408,6 +408,6 @@ def test_inf_loss_with_missing_bounds(): loss_per_interval=curvature_loss_function(), ) # must be done in parallel because otherwise the bounds will be evaluated first - BlockingRunner(learner, goal=lambda learner: learner.loss() < 0.01) + BlockingRunner(learner, loss_goal=0.01) learner.npoints > 20 diff --git a/adaptive/tests/test_learnernd.py b/adaptive/tests/test_learnernd.py index 9ae359a8f..0884b7eeb 100644 --- a/adaptive/tests/test_learnernd.py +++ b/adaptive/tests/test_learnernd.py @@ -33,8 +33,8 @@ def test_interior_vs_bbox_gives_same_result(): hull = scipy.spatial.ConvexHull(control._bounds_points) learner = LearnerND(f, bounds=hull) - simple(control, goal=lambda l: l.loss() < 0.1) - simple(learner, goal=lambda l: l.loss() < 0.1) + simple(control, loss_goal=0.1) + simple(learner, loss_goal=0.1) assert learner.data == control.data @@ -47,4 +47,4 @@ def test_vector_return_with_a_flat_layer(): h3 = lambda xy: np.array([0 * f(xy), g(xy)]) # noqa: E731 for function in [h1, h2, h3]: learner = LearnerND(function, bounds=[(-1, 1), (-1, 1)]) - simple(learner, goal=lambda l: l.loss() < 0.1) + simple(learner, loss_goal=0.1) diff --git a/adaptive/tests/test_learners.py b/adaptive/tests/test_learners.py index d393511fb..e800b6d39 100644 --- a/adaptive/tests/test_learners.py +++ b/adaptive/tests/test_learners.py @@ -103,7 +103,7 @@ def goal(): return get_goal(learner.learner) return get_goal(learner) - simple(learner, goal()) + simple(learner, goal=goal()) # Library of functions and associated learners. diff --git a/adaptive/tests/test_pickling.py b/adaptive/tests/test_pickling.py index c0d515320..baf5b1146 100644 --- a/adaptive/tests/test_pickling.py +++ b/adaptive/tests/test_pickling.py @@ -94,7 +94,7 @@ def test_serialization_for(learner_type, learner_kwargs, serializer, f): learner = learner_type(f, **learner_kwargs) - simple(learner, goal_1) + simple(learner, goal=goal_1) learner_bytes = serializer.dumps(learner) loss = learner.loss() asked = learner.ask(10) @@ -113,5 +113,5 @@ def test_serialization_for(learner_type, learner_kwargs, serializer, f): # load again to undo the ask learner_loaded = serializer.loads(learner_bytes) - simple(learner_loaded, goal_2) + simple(learner_loaded, goal=goal_2) assert learner_loaded.npoints == 20 diff --git a/adaptive/tests/test_runner.py b/adaptive/tests/test_runner.py index 169c38431..e36abcbe1 100644 --- a/adaptive/tests/test_runner.py +++ b/adaptive/tests/test_runner.py @@ -3,13 +3,22 @@ import sys import time +import numpy as np import pytest -from adaptive.learner import Learner1D, Learner2D +from adaptive.learner import ( + BalancingLearner, + DataSaver, + IntegratorLearner, + Learner1D, + Learner2D, + SequenceLearner, +) from adaptive.runner import ( AsyncRunner, BlockingRunner, SequentialExecutor, + auto_goal, simple, stop_after, with_distributed, @@ -19,22 +28,18 @@ OPERATING_SYSTEM = platform.system() -def blocking_runner(learner, goal): - BlockingRunner(learner, goal, executor=SequentialExecutor()) +def blocking_runner(learner, **kw): + BlockingRunner(learner, executor=SequentialExecutor(), **kw) -def async_runner(learner, goal): - runner = AsyncRunner(learner, goal, executor=SequentialExecutor()) +def async_runner(learner, **kw): + runner = AsyncRunner(learner, executor=SequentialExecutor(), **kw) asyncio.get_event_loop().run_until_complete(runner.task) runners = [simple, blocking_runner, async_runner] -def trivial_goal(learner): - return learner.npoints > 10 - - @pytest.mark.parametrize("runner", runners) def test_simple(runner): """Test that the runners actually run.""" @@ -43,8 +48,8 @@ def f(x): return x learner = Learner1D(f, (-1, 1)) - runner(learner, lambda l: l.npoints > 10) - assert len(learner.data) > 10 + runner(learner, npoints_goal=10) + assert len(learner.data) >= 10 @pytest.mark.parametrize("runner", runners) @@ -57,7 +62,7 @@ def test_nonconforming_output(runner): def f(x): return [0] - runner(Learner2D(f, ((-1, 1), (-1, 1))), trivial_goal) + runner(Learner2D(f, ((-1, 1), (-1, 1))), npoints_goal=10) def test_aync_def_function(): @@ -65,7 +70,7 @@ async def f(x): return x learner = Learner1D(f, (-1, 1)) - runner = AsyncRunner(learner, trivial_goal) + runner = AsyncRunner(learner, npoints_goal=10) asyncio.get_event_loop().run_until_complete(runner.task) @@ -88,7 +93,7 @@ def test_concurrent_futures_executor(): BlockingRunner( Learner1D(linear, (-1, 1)), - trivial_goal, + npoints_goal=10, executor=ProcessPoolExecutor(max_workers=1), ) @@ -96,7 +101,7 @@ def test_concurrent_futures_executor(): def test_stop_after_goal(): seconds_to_wait = 0.2 # don't make this too large or the test will take ages start_time = time.time() - BlockingRunner(Learner1D(linear, (-1, 1)), stop_after(seconds=seconds_to_wait)) + BlockingRunner(Learner1D(linear, (-1, 1)), goal=stop_after(seconds=seconds_to_wait)) stop_time = time.time() assert stop_time - start_time > seconds_to_wait @@ -119,7 +124,7 @@ def test_ipyparallel_executor(): child.expect("Engines appear to have started successfully", timeout=35) ipyparallel_executor = Client() learner = Learner1D(linear, (-1, 1)) - BlockingRunner(learner, trivial_goal, executor=ipyparallel_executor) + BlockingRunner(learner, npoints_goal=10, executor=ipyparallel_executor) assert learner.npoints > 0 @@ -137,7 +142,7 @@ def test_distributed_executor(): learner = Learner1D(linear, (-1, 1)) client = Client(n_workers=1) - BlockingRunner(learner, trivial_goal, executor=client) + BlockingRunner(learner, npoints_goal=10, executor=client) client.shutdown() assert learner.npoints > 0 @@ -145,12 +150,55 @@ def test_distributed_executor(): def test_loky_executor(loky_executor): learner = Learner1D(lambda x: x, (-1, 1)) BlockingRunner( - learner, trivial_goal, executor=loky_executor, shutdown_executor=True + learner, npoints_goal=10, executor=loky_executor, shutdown_executor=True ) assert learner.npoints > 0 def test_default_executor(): learner = Learner1D(linear, (-1, 1)) - runner = AsyncRunner(learner, goal=lambda l: l.npoints > 10) + runner = AsyncRunner(learner, npoints_goal=10) asyncio.get_event_loop().run_until_complete(runner.task) + + +def test_auto_goal(): + learner = Learner1D(linear, (-1, 1)) + simple(learner, auto_goal(npoints=4)) + assert learner.npoints == 4 + + learner = Learner1D(linear, (-1, 1)) + simple(learner, auto_goal(loss=0.5)) + assert learner.loss() <= 0.5 + + learner = SequenceLearner(linear, np.linspace(-1, 1)) + simple(learner, auto_goal(learner=learner)) + assert learner.done() + + learner = IntegratorLearner(linear, bounds=(0, 1), tol=0.1) + simple(learner, auto_goal(learner=learner)) + assert learner.done() + + learner = Learner1D(linear, (-1, 1)) + learner = DataSaver(learner, lambda x: x) + simple(learner, auto_goal(npoints=4, learner=learner)) + assert learner.npoints == 4 + + learner1 = Learner1D(linear, (-1, 1)) + learner2 = Learner1D(linear, (-2, 2)) + balancing_learner = BalancingLearner([learner1, learner2]) + simple(balancing_learner, auto_goal(npoints=4, learner=balancing_learner)) + assert learner1.npoints == 4 and learner2.npoints == 4 + + learner1 = Learner1D(linear, bounds=(0, 1)) + learner1 = DataSaver(learner1, lambda x: x) + learner2 = Learner1D(linear, bounds=(0, 1)) + learner2 = DataSaver(learner2, lambda x: x) + balancing_learner = BalancingLearner([learner1, learner2]) + simple(balancing_learner, auto_goal(npoints=10, learner=balancing_learner)) + assert learner1.npoints == 10 and learner2.npoints == 10 + + learner = Learner1D(linear, (-1, 1)) + t_start = time.time() + simple(learner, auto_goal(duration=1e-2, learner=learner)) + t_end = time.time() + assert t_end - t_start >= 1e-2 diff --git a/adaptive/tests/test_sequence_learner.py b/adaptive/tests/test_sequence_learner.py index 68ca956ca..fdd3dcb10 100644 --- a/adaptive/tests/test_sequence_learner.py +++ b/adaptive/tests/test_sequence_learner.py @@ -19,8 +19,6 @@ def test_fail_with_sequence_of_unhashable(): # https://github.com/python-adaptive/adaptive/issues/265 seq = [{1: 1}] # unhashable learner = SequenceLearner(FailOnce(), sequence=seq) - runner = Runner( - learner, goal=SequenceLearner.done, retries=1, executor=SequentialExecutor() - ) + runner = Runner(learner, retries=1, executor=SequentialExecutor()) asyncio.get_event_loop().run_until_complete(runner.task) assert runner.status() == "finished" diff --git a/adaptive/tests/unit/test_learnernd_integration.py b/adaptive/tests/unit/test_learnernd_integration.py index ce3482e9c..939108377 100644 --- a/adaptive/tests/unit/test_learnernd_integration.py +++ b/adaptive/tests/unit/test_learnernd_integration.py @@ -16,21 +16,21 @@ def ring_of_fire(xy, d=0.75): def test_learnerND_runs_to_10_points(): learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)]) - SimpleRunner(learner, goal=lambda l: l.npoints >= 10) + SimpleRunner(learner, npoints_goal=10) assert learner.npoints == 10 @pytest.mark.parametrize("execution_number", range(5)) def test_learnerND_runs_to_10_points_Blocking(execution_number): learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)]) - BlockingRunner(learner, goal=lambda l: l.npoints >= 10) + BlockingRunner(learner, npoints_goal=10) assert learner.npoints >= 10 def test_learnerND_curvature_runs_to_10_points(): loss = curvature_loss_function() learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)], loss_per_simplex=loss) - SimpleRunner(learner, goal=lambda l: l.npoints >= 10) + SimpleRunner(learner, npoints_goal=10) assert learner.npoints == 10 @@ -38,7 +38,7 @@ def test_learnerND_curvature_runs_to_10_points(): def test_learnerND_curvature_runs_to_10_points_Blocking(execution_number): loss = curvature_loss_function() learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)], loss_per_simplex=loss) - BlockingRunner(learner, goal=lambda l: l.npoints >= 10) + BlockingRunner(learner, npoints_goal=10) assert learner.npoints >= 10 diff --git a/adaptive/utils.py b/adaptive/utils.py index ecacffc36..a87aeeb56 100644 --- a/adaptive/utils.py +++ b/adaptive/utils.py @@ -83,7 +83,8 @@ def load(fname: str, compress: bool = True) -> Any: def copy_docstring_from(other: Callable) -> Callable: def decorator(method): - return functools.wraps(other)(method) + method.__doc__ = other.__doc__ + return method return decorator diff --git a/docs/logo.py b/docs/logo.py index 5ba52ed67..595728db6 100644 --- a/docs/logo.py +++ b/docs/logo.py @@ -22,7 +22,7 @@ def ring(xy): return x + np.exp(-((x**2 + y**2 - 0.75**2) ** 2) / a**4) learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)]) - adaptive.runner.simple(learner, goal=lambda l: l.loss() < 0.01) + adaptive.runner.simple(learner, loss_goal=0.01) return learner diff --git a/docs/source/algorithms_and_examples.md b/docs/source/algorithms_and_examples.md index da674fe3c..48e4cb61e 100644 --- a/docs/source/algorithms_and_examples.md +++ b/docs/source/algorithms_and_examples.md @@ -102,7 +102,7 @@ def plot_loss_interval(learner): def plot(learner, npoints): - adaptive.runner.simple(learner, lambda l: l.npoints == npoints) + adaptive.runner.simple(learner, npoints_goal= npoints) return (learner.plot() * plot_loss_interval(learner))[:, -1.1:1.1] @@ -132,7 +132,7 @@ def ring(xy): def plot(learner, npoints): - adaptive.runner.simple(learner, lambda l: l.npoints == npoints) + adaptive.runner.simple(learner, npoints_goal=npoints) learner2 = adaptive.Learner2D(ring, bounds=learner.bounds) xs = ys = np.linspace(*learner.bounds[0], int(learner.npoints**0.5)) xys = list(itertools.product(xs, ys)) @@ -168,7 +168,7 @@ learner = adaptive.AverageLearner(g, atol=None, rtol=0.01) def plot(learner, npoints): - adaptive.runner.simple(learner, lambda l: l.npoints == npoints) + adaptive.runner.simple(learner, npoints_goal=npoints) return learner.plot().relabel(f"loss={learner.loss():.2f}") @@ -191,7 +191,7 @@ def sphere(xyz): learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)]) -adaptive.runner.simple(learner, lambda l: l.npoints == 5000) +adaptive.runner.simple(learner, npoints_goal=5000) fig = learner.plot_3D(return_fig=True) diff --git a/docs/source/logo.md b/docs/source/logo.md index c70a8f98b..c0baf5ddd 100644 --- a/docs/source/logo.md +++ b/docs/source/logo.md @@ -110,7 +110,7 @@ def create_and_run_learner(): return x + np.exp(-((x**2 + y**2 - 0.75**2) ** 2) / a**4) learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)]) - adaptive.runner.simple(learner, goal=lambda l: l.loss() < 0.005) + adaptive.runner.simple(learner, loss_goal=0.005) return learner diff --git a/docs/source/reference/adaptive.runner.extras.md b/docs/source/reference/adaptive.runner.extras.md index 510c17ec5..d2dfaa89c 100644 --- a/docs/source/reference/adaptive.runner.extras.md +++ b/docs/source/reference/adaptive.runner.extras.md @@ -6,7 +6,13 @@ Runners allow you to specify the stopping criterion by providing a `goal` as a function that takes the learner and returns a boolean: `False` for "continue running" and `True` for "stop". This gives you a lot of flexibility for defining your own stopping conditions, however we also provide some common -stopping conditions as a convenience. +stopping conditions as a convenience. For example, to continue until the loss is below a threshold `x`, +you may specify `loss_goal=x`. Similarly, to continue until `n` points have been sampled, you may +specify `npoints_goal=n`. See the Runner docstring for details. + +```{eval-rst} +.. autofunction:: adaptive.runner.auto_goal +``` ```{eval-rst} .. autofunction:: adaptive.runner.stop_after diff --git a/docs/source/tutorial/tutorial.AverageLearner.md b/docs/source/tutorial/tutorial.AverageLearner.md index 5bdd5ae27..2f53d396b 100644 --- a/docs/source/tutorial/tutorial.AverageLearner.md +++ b/docs/source/tutorial/tutorial.AverageLearner.md @@ -45,8 +45,8 @@ def g(n): ```{code-cell} ipython3 learner = adaptive.AverageLearner(g, atol=None, rtol=0.01) -# `loss < 1` means that we reached the `rtol` or `atol` -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 1) +# `loss < 1.0` means that we reached the `rtol` or `atol` +runner = adaptive.Runner(learner, loss_goal=1.0) ``` ```{code-cell} ipython3 diff --git a/docs/source/tutorial/tutorial.BalancingLearner.md b/docs/source/tutorial/tutorial.BalancingLearner.md index 4276dd0d4..5f43bdd64 100644 --- a/docs/source/tutorial/tutorial.BalancingLearner.md +++ b/docs/source/tutorial/tutorial.BalancingLearner.md @@ -46,7 +46,7 @@ learners = [ ] bal_learner = adaptive.BalancingLearner(learners) -runner = adaptive.Runner(bal_learner, goal=lambda l: l.loss() < 0.01) +runner = adaptive.Runner(bal_learner, loss_goal=0.01) ``` ```{code-cell} ipython3 @@ -86,7 +86,7 @@ learner = adaptive.BalancingLearner.from_product( jacobi, adaptive.Learner1D, dict(bounds=(0, 1)), combos ) -runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01) +runner = adaptive.BlockingRunner(learner, loss_goal=0.01) # The `cdims` will automatically be set when using `from_product`, so # `plot()` will return a HoloMap with correctly labeled sliders. diff --git a/docs/source/tutorial/tutorial.DataSaver.md b/docs/source/tutorial/tutorial.DataSaver.md index 13a6666ce..4d4e0efc4 100644 --- a/docs/source/tutorial/tutorial.DataSaver.md +++ b/docs/source/tutorial/tutorial.DataSaver.md @@ -55,7 +55,7 @@ learner = adaptive.DataSaver(_learner, arg_picker=itemgetter("y")) `learner.learner` is the original learner, so `learner.learner.loss()` will call the correct loss method. ```{code-cell} ipython3 -runner = adaptive.Runner(learner, goal=lambda l: l.learner.loss() < 0.1) +runner = adaptive.Runner(learner, loss_goal=0.1) ``` ```{code-cell} ipython3 diff --git a/docs/source/tutorial/tutorial.IntegratorLearner.md b/docs/source/tutorial/tutorial.IntegratorLearner.md index 0686344c9..8110512a9 100644 --- a/docs/source/tutorial/tutorial.IntegratorLearner.md +++ b/docs/source/tutorial/tutorial.IntegratorLearner.md @@ -61,7 +61,7 @@ learner = adaptive.IntegratorLearner(f24, bounds=(0, 3), tol=1e-8) # *this* process only. This means we don't pay # the overhead of evaluating the function in another process. runner = adaptive.Runner( - learner, executor=SequentialExecutor(), goal=lambda l: l.done() + learner, executor=SequentialExecutor() ) ``` @@ -75,7 +75,7 @@ await runner.task # This is not needed in a notebook environment! runner.live_info() ``` -Now we could do the live plotting again, but lets just wait untill the +Now we could do the live plotting again, but let's just wait until the runner is done. ```{code-cell} ipython3 diff --git a/docs/source/tutorial/tutorial.Learner1D.md b/docs/source/tutorial/tutorial.Learner1D.md index 60cb8aac5..db80e03ec 100644 --- a/docs/source/tutorial/tutorial.Learner1D.md +++ b/docs/source/tutorial/tutorial.Learner1D.md @@ -63,9 +63,9 @@ On Windows systems the runner will use a {class}`loky.get_reusable_executor`. A {class}`~concurrent.futures.ProcessPoolExecutor` cannot be used on Windows for reasons. ```{code-cell} ipython3 -# The end condition is when the "loss" is less than 0.1. In the context of the -# 1D learner this means that we will resolve features in 'func' with width 0.1 or wider. -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) +# The end condition is when the "loss" is less than 0.01. In the context of the +# 1D learner this means that we will resolve features in 'func' with width 0.01 or wider. +runner = adaptive.Runner(learner, loss_goal=0.01) ``` ```{code-cell} ipython3 @@ -124,7 +124,7 @@ The `Learner1D` can be used for such functions: ```{code-cell} ipython3 learner = adaptive.Learner1D(f_levels, bounds=(-1, 1)) -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) +runner = adaptive.Runner(learner, loss_goal=0.01) # continue until `learner.loss()<=0.01` ``` ```{code-cell} ipython3 @@ -156,7 +156,7 @@ from adaptive.learner.learner1D import ( curvature_loss = curvature_loss_function() learner = adaptive.Learner1D(f, bounds=(-1, 1), loss_per_interval=curvature_loss) -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) +runner = adaptive.Runner(learner, loss_goal=0.01) ``` ```{code-cell} ipython3 @@ -187,11 +187,10 @@ learner_h = adaptive.Learner1D(sin_exp, (-1, 1), loss_per_interval=uniform_loss) learner_1 = adaptive.Learner1D(sin_exp, (-1, 1), loss_per_interval=default_loss) learner_2 = adaptive.Learner1D(sin_exp, (-1, 1), loss_per_interval=curvature_loss) -npoints_goal = lambda l: l.npoints >= 100 # adaptive.runner.simple is a non parallel blocking runner. -adaptive.runner.simple(learner_h, goal=npoints_goal) -adaptive.runner.simple(learner_1, goal=npoints_goal) -adaptive.runner.simple(learner_2, goal=npoints_goal) +adaptive.runner.simple(learner_h, npoints_goal=100) +adaptive.runner.simple(learner_1, npoints_goal=100) +adaptive.runner.simple(learner_2, npoints_goal=100) ( learner_h.plot().relabel("homogeneous") diff --git a/docs/source/tutorial/tutorial.Learner2D.md b/docs/source/tutorial/tutorial.Learner2D.md index c2f6ddba5..d15446fe4 100644 --- a/docs/source/tutorial/tutorial.Learner2D.md +++ b/docs/source/tutorial/tutorial.Learner2D.md @@ -46,7 +46,7 @@ learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)]) ``` ```{code-cell} ipython3 -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) +runner = adaptive.Runner(learner, loss_goal=0.01) ``` ```{code-cell} ipython3 diff --git a/docs/source/tutorial/tutorial.LearnerND.md b/docs/source/tutorial/tutorial.LearnerND.md index aca8f187e..e525fd9b8 100644 --- a/docs/source/tutorial/tutorial.LearnerND.md +++ b/docs/source/tutorial/tutorial.LearnerND.md @@ -50,7 +50,7 @@ def sphere(xyz): learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)]) -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 1e-3) +runner = adaptive.Runner(learner, loss_goal=1e-3) ``` ```{code-cell} ipython3 @@ -123,7 +123,7 @@ b = [(-1, -1, -1), (-1, 1, -1), (-1, -1, 1), (-1, 1, 1), (1, 1, -1), (1, -1, -1) hull = scipy.spatial.ConvexHull(b) learner = adaptive.LearnerND(f, hull) -adaptive.BlockingRunner(learner, goal=lambda l: l.npoints > 2000) +adaptive.BlockingRunner(learner, npoints_goal=2000) learner.plot_isosurface(-0.5) ``` diff --git a/docs/source/tutorial/tutorial.SKOptLearner.md b/docs/source/tutorial/tutorial.SKOptLearner.md index fb82bca17..49a5340a8 100644 --- a/docs/source/tutorial/tutorial.SKOptLearner.md +++ b/docs/source/tutorial/tutorial.SKOptLearner.md @@ -47,7 +47,7 @@ learner = adaptive.SKOptLearner( acq_func="gp_hedge", acq_optimizer="lbfgs", ) -runner = adaptive.Runner(learner, ntasks=1, goal=lambda l: l.npoints > 40) +runner = adaptive.Runner(learner, ntasks=1, npoints_goal=40) ``` ```{code-cell} ipython3 diff --git a/docs/source/tutorial/tutorial.SequenceLearner.md b/docs/source/tutorial/tutorial.SequenceLearner.md index 0d6bb71cc..5a60fbcdd 100644 --- a/docs/source/tutorial/tutorial.SequenceLearner.md +++ b/docs/source/tutorial/tutorial.SequenceLearner.md @@ -42,8 +42,8 @@ def f(x): seq = np.linspace(-15, 15, 1000) learner = SequenceLearner(f, seq) -runner = adaptive.Runner(learner, SequenceLearner.done) -# that goal is same as `lambda learner: learner.done()` +runner = adaptive.Runner(learner) +# not providing a goal is same as `lambda learner: learner.done()` ``` ```{code-cell} ipython3 diff --git a/docs/source/tutorial/tutorial.advanced-topics.md b/docs/source/tutorial/tutorial.advanced-topics.md index bcea65792..e6b6c55a3 100644 --- a/docs/source/tutorial/tutorial.advanced-topics.md +++ b/docs/source/tutorial/tutorial.advanced-topics.md @@ -51,7 +51,7 @@ learner = adaptive.Learner1D(f, bounds=(-1, 1)) control = adaptive.Learner1D(f, bounds=(-1, 1)) # Let's only run the learner -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) +runner = adaptive.Runner(learner, loss_goal=0.01) ``` ```{code-cell} ipython3 @@ -90,7 +90,7 @@ def slow_f(x): learner = adaptive.Learner1D(slow_f, bounds=[0, 1]) -runner = adaptive.Runner(learner, goal=lambda l: l.npoints > 100) +runner = adaptive.Runner(learner, npoints_goal=100) runner.start_periodic_saving( save_kwargs=dict(fname="data/periodic_example.p"), interval=6 ) @@ -134,7 +134,7 @@ The simplest way to accomplish this is to use {class}`adaptive.BlockingRunner`: ```{code-cell} ipython3 learner = adaptive.Learner1D(f, bounds=(-1, 1)) -adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01) +adaptive.BlockingRunner(learner, loss_goal=0.01) # This will only get run after the runner has finished learner.plot() ``` @@ -155,7 +155,7 @@ The simplest way is to use {class}`adaptive.runner.simple` to run your learner: learner = adaptive.Learner1D(f, bounds=(-1, 1)) # blocks until completion -adaptive.runner.simple(learner, goal=lambda l: l.loss() < 0.01) +adaptive.runner.simple(learner, loss_goal=0.01) learner.plot() ``` @@ -169,7 +169,7 @@ from adaptive.runner import SequentialExecutor learner = adaptive.Learner1D(f, bounds=(-1, 1)) runner = adaptive.Runner( - learner, executor=SequentialExecutor(), goal=lambda l: l.loss() < 0.01 + learner, executor=SequentialExecutor(), loss_goal=0.01 ) ``` @@ -292,7 +292,7 @@ One way to inspect runners is to instantiate one with `log=True`: ```{code-cell} ipython3 learner = adaptive.Learner1D(f, bounds=(-1, 1)) -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01, log=True) +runner = adaptive.Runner(learner, loss_goal=0.01, log=True) ``` ```{code-cell} ipython3 @@ -351,7 +351,7 @@ async def time(runner): ioloop = asyncio.get_event_loop() learner = adaptive.Learner1D(f, bounds=(-1, 1)) -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) +runner = adaptive.Runner(learner, loss_goal=0.01) timer = ioloop.create_task(time(runner)) ``` @@ -436,7 +436,7 @@ To run the adaptive evaluation we provide the asynchronous function to the `lear ```{code-cell} ipython3 learner = adaptive.Learner1D(f_parallel, bounds=(-3.5, 3.5)) -runner = adaptive.AsyncRunner(learner, goal=lambda l: l.loss() < 0.01, ntasks=20) +runner = adaptive.AsyncRunner(learner, loss_goal=0.01, ntasks=20) ``` Finally we await for the runner to finish, and then plot the result. @@ -462,7 +462,7 @@ def f(x): learner = adaptive.Learner1D(f, (-1, 1)) -adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.1) +adaptive.BlockingRunner(learner, loss_goal=0.1) ``` If you use `asyncio` already in your script and want to integrate `adaptive` into it, then you can use the default {class}`~adaptive.Runner` as you would from a notebook. diff --git a/docs/source/tutorial/tutorial.custom_loss.md b/docs/source/tutorial/tutorial.custom_loss.md index be1f78669..f76af484d 100644 --- a/docs/source/tutorial/tutorial.custom_loss.md +++ b/docs/source/tutorial/tutorial.custom_loss.md @@ -74,7 +74,7 @@ def f_divergent_1d(x): learner = adaptive.Learner1D( f_divergent_1d, (-1, 1), loss_per_interval=uniform_sampling_1d ) -runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01) +runner = adaptive.BlockingRunner(learner, loss_goal=0.01) learner.plot().select(y=(0, 10000)) ``` @@ -154,7 +154,7 @@ def resolution_loss_function(min_distance=0, max_distance=1): loss = resolution_loss_function(min_distance=0.01) learner = adaptive.Learner2D(f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=loss) -runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.02) +runner = adaptive.BlockingRunner(learner, loss_goal=0.02) learner.plot(tri_alpha=0.3).relabel("1 / (x^2 + y^2) in log scale").opts( hv.opts.EdgePaths(color="w"), hv.opts.Image(logz=True, colorbar=True) ) diff --git a/docs/source/tutorial/tutorial.parallelism.md b/docs/source/tutorial/tutorial.parallelism.md index f3c1985f6..ef0963a3a 100644 --- a/docs/source/tutorial/tutorial.parallelism.md +++ b/docs/source/tutorial/tutorial.parallelism.md @@ -24,7 +24,7 @@ from concurrent.futures import ProcessPoolExecutor executor = ProcessPoolExecutor(max_workers=4) learner = adaptive.Learner1D(f, bounds=(-1, 1)) -runner = adaptive.Runner(learner, executor=executor, goal=lambda l: l.loss() < 0.05) +runner = adaptive.Runner(learner, executor=executor, loss_goal=0.05) runner.live_info() runner.live_plot(update_interval=0.1) ``` @@ -37,7 +37,7 @@ import ipyparallel client = ipyparallel.Client() # You will need to start an `ipcluster` to make this work learner = adaptive.Learner1D(f, bounds=(-1, 1)) -runner = adaptive.Runner(learner, executor=client, goal=lambda l: l.loss() < 0.01) +runner = adaptive.Runner(learner, executor=client, loss_goal=0.01) runner.live_info() runner.live_plot() ``` @@ -52,7 +52,7 @@ import distributed client = distributed.Client() learner = adaptive.Learner1D(f, bounds=(-1, 1)) -runner = adaptive.Runner(learner, executor=client, goal=lambda l: l.loss() < 0.01) +runner = adaptive.Runner(learner, executor=client, loss_goal=0.01) runner.live_info() runner.live_plot(update_interval=0.1) ``` @@ -80,7 +80,7 @@ if __name__ == "__main__": learner, executor=MPIPoolExecutor(), shutdown_executor=True, - goal=lambda l: l.loss() < 0.01, + loss_goal=0.01, ) # periodically save the data (in case the job dies) @@ -132,6 +132,6 @@ ex = get_reusable_executor() f = lambda x: x learner = adaptive.Learner1D(f, bounds=(-1, 1)) -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01, executor=ex) +runner = adaptive.Runner(learner, loss_goal=0.01, executor=ex) runner.live_info() ``` diff --git a/example-notebook.ipynb b/example-notebook.ipynb index b00ac622c..d3a739056 100644 --- a/example-notebook.ipynb +++ b/example-notebook.ipynb @@ -106,7 +106,7 @@ "source": [ "# The end condition is when the \"loss\" is less than 0.01. In the context of the\n", "# 1D learner this means that we will resolve features in 'func' with width 0.01 or wider.\n", - "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)\n", + "runner = adaptive.Runner(learner, loss_goal=0.01)\n", "runner.live_info()" ] }, @@ -194,7 +194,7 @@ "metadata": {}, "outputs": [], "source": [ - "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)\n", + "runner = adaptive.Runner(learner, loss_goal=0.01)\n", "runner.live_info()" ] }, @@ -279,7 +279,7 @@ "outputs": [], "source": [ "learner = adaptive.AverageLearner(g, atol=None, rtol=0.01)\n", - "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 2)\n", + "runner = adaptive.Runner(learner, loss_goal=2.0)\n", "runner.live_info()" ] }, @@ -465,7 +465,7 @@ "# We use a SequentialExecutor, which runs the function to be learned in *this* process only.\n", "# This means we don't pay the overhead of evaluating the function in another process.\n", "executor = SequentialExecutor()\n", - "runner = adaptive.Runner(learner, executor=executor, goal=lambda l: l.done())\n", + "runner = adaptive.Runner(learner, executor=executor)\n", "runner.live_info()" ] }, @@ -535,7 +535,7 @@ "outputs": [], "source": [ "learner = adaptive.Learner1D(f_levels, bounds=(-1, 1))\n", - "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)\n", + "runner = adaptive.Runner(learner, loss_goal=0.01)\n", "runner.live_info()" ] }, @@ -564,7 +564,6 @@ "metadata": {}, "outputs": [], "source": [ - "# this step takes a lot of time, it will finish at about 3300 points, which can take up to 6 minutes\n", "def sphere(xyz):\n", " x, y, z = xyz\n", " a = 0.4\n", @@ -572,7 +571,7 @@ "\n", "\n", "learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)])\n", - "runner = adaptive.Runner(learner, goal=lambda l: l.npoints > 2000)\n", + "runner = adaptive.Runner(learner, npoints_goal=2000)\n", "runner.live_info()" ] }, @@ -675,13 +674,15 @@ "\n", "\n", "def f_divergent_1d(x):\n", + " if x == 0:\n", + " return np.inf\n", " return 1 / x**2\n", "\n", "\n", "learner = adaptive.Learner1D(\n", " f_divergent_1d, (-1, 1), loss_per_interval=uniform_sampling_1d\n", ")\n", - "runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01)\n", + "runner = adaptive.BlockingRunner(learner, loss_goal=0.01)\n", "learner.plot().select(y=(0, 10000))" ] }, @@ -718,7 +719,7 @@ ")\n", "\n", "# this takes a while, so use the async Runner so we know *something* is happening\n", - "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.02)\n", + "runner = adaptive.Runner(learner, loss_goal=0.02)\n", "runner.live_info()\n", "runner.live_plot(update_interval=0.2, plotter=plot_logz)" ] @@ -780,7 +781,7 @@ "loss = partial(resolution_loss, min_distance=0.01)\n", "\n", "learner = adaptive.Learner2D(f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=loss)\n", - "runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.02)\n", + "runner = adaptive.BlockingRunner(learner, loss_goal=0.02)\n", "plot_logz(learner)" ] }, @@ -826,7 +827,7 @@ "]\n", "\n", "bal_learner = adaptive.BalancingLearner(learners)\n", - "runner = adaptive.Runner(bal_learner, goal=lambda l: l.loss() < 0.01)\n", + "runner = adaptive.Runner(bal_learner, loss_goal=0.01)\n", "runner.live_info()" ] }, @@ -870,7 +871,7 @@ " jacobi, adaptive.Learner1D, dict(bounds=(0, 1)), combos\n", ")\n", "\n", - "runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01)\n", + "runner = adaptive.BlockingRunner(learner, loss_goal=0.01)\n", "\n", "# The `cdims` will automatically be set when using `from_product`, so\n", "# `plot()` will return a HoloMap with correctly labeled sliders.\n", @@ -935,7 +936,7 @@ "metadata": {}, "outputs": [], "source": [ - "runner = adaptive.Runner(learner, goal=lambda l: l.learner.loss() < 0.05)\n", + "runner = adaptive.Runner(learner, loss_goal=0.05)\n", "runner.live_info()" ] }, @@ -1005,7 +1006,7 @@ " acq_func=\"gp_hedge\",\n", " acq_optimizer=\"lbfgs\",\n", ")\n", - "runner = adaptive.Runner(learner, ntasks=1, goal=lambda l: l.npoints > 40)\n", + "runner = adaptive.Runner(learner, ntasks=1, npoints_goal=40)\n", "runner.live_info()" ] }, @@ -1063,7 +1064,7 @@ "executor = ProcessPoolExecutor(max_workers=4)\n", "\n", "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n", - "runner = adaptive.Runner(learner, executor=executor, goal=lambda l: l.loss() < 0.05)\n", + "runner = adaptive.Runner(learner, executor=executor, loss_goal=0.05)\n", "runner.live_info()\n", "runner.live_plot(update_interval=0.1)" ] @@ -1086,7 +1087,7 @@ "client = ipyparallel.Client() # You will need to start an `ipcluster` to make this work\n", "\n", "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n", - "runner = adaptive.Runner(learner, executor=client, goal=lambda l: l.loss() < 0.01)\n", + "runner = adaptive.Runner(learner, executor=client, loss_goal=0.01)\n", "runner.live_info()\n", "runner.live_plot()" ] @@ -1111,7 +1112,7 @@ "client = distributed.Client()\n", "\n", "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n", - "runner = adaptive.Runner(learner, executor=client, goal=lambda l: l.loss() < 0.01)\n", + "runner = adaptive.Runner(learner, executor=client, loss_goal=0.01)\n", "runner.live_info()\n", "runner.live_plot(update_interval=0.1)" ] @@ -1163,7 +1164,7 @@ "control = adaptive.Learner1D(partial(peak, wait=False), bounds=(-1, 1))\n", "\n", "# Let's only run the learner\n", - "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)\n", + "runner = adaptive.Runner(learner, loss_goal=0.01)\n", "runner.live_info()" ] }, @@ -1239,7 +1240,7 @@ "\n", "\n", "learner = adaptive.Learner1D(slow_f, bounds=[0, 1])\n", - "runner = adaptive.Runner(learner, goal=lambda l: l.npoints > 100)\n", + "runner = adaptive.Runner(learner, npoints_goal=100)\n", "\n", "runner.start_periodic_saving(\n", " save_kwargs=dict(fname=\"data/periodic_example.p\"), interval=6\n", @@ -1335,7 +1336,7 @@ "outputs": [], "source": [ "learner = adaptive.Learner1D(partial(peak, wait=False), bounds=(-1, 1))\n", - "adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.005)\n", + "adaptive.BlockingRunner(learner, loss_goal=0.005)\n", "# This will only get run after the runner has finished\n", "learner.plot()" ] @@ -1369,7 +1370,7 @@ "learner = adaptive.Learner1D(partial(peak, wait=False), bounds=(-1, 1))\n", "\n", "# blocks until completion\n", - "adaptive.runner.simple(learner, goal=lambda l: l.loss() < 0.002)\n", + "adaptive.runner.simple(learner, loss_goal=0.002)\n", "\n", "learner.plot()" ] @@ -1394,7 +1395,7 @@ "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n", "\n", "runner = adaptive.Runner(\n", - " learner, executor=SequentialExecutor(), goal=lambda l: l.loss() < 0.002\n", + " learner, executor=SequentialExecutor(), loss_goal=0.002\n", ")\n", "runner.live_info()\n", "runner.live_plot(update_interval=0.1)" @@ -1543,7 +1544,7 @@ "outputs": [], "source": [ "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n", - "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.1, log=True)\n", + "runner = adaptive.Runner(learner, loss_goal=0.1, log=True)\n", "runner.live_info()" ] }, @@ -1628,7 +1629,7 @@ "ioloop = asyncio.get_event_loop()\n", "\n", "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n", - "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.1)\n", + "runner = adaptive.Runner(learner, loss_goal=0.1)\n", "\n", "timer = ioloop.create_task(time(runner))\n", "runner.live_info()" @@ -1667,7 +1668,7 @@ "\n", "learner = adaptive.Learner1D(peak, (-1, 1))\n", "\n", - "adaptive.BlockingRunner(learner, goal=lambda: l: l.loss() < 0.1)\n", + "adaptive.BlockingRunner(learner, loss_goal=0.1)\n", "```\n", "\n", "If you use `asyncio` already in your script and want to integrate `adaptive` into it, then you can use the default `Runner` as you would from a notebook. If you want to wait for the runner to finish, then you can simply\n", diff --git a/setup.py b/setup.py index 08a7948f5..653e2e41b 100644 --- a/setup.py +++ b/setup.py @@ -60,7 +60,7 @@ def get_version_and_cmdclass(package_name): "distributed", "ipyparallel>=6.2.5", # because of https://github.com/ipython/ipyparallel/issues/404 "scikit-optimize>=0.8.1", # because of https://github.com/scikit-optimize/scikit-optimize/issues/931 - "scikit-learn<=0.24.2", # because of https://github.com/scikit-optimize/scikit-optimize/issues/1059 + "scikit-learn", "wexpect" if os.name == "nt" else "pexpect", ], }