From c3e314770307d87c750762cbf876af801aced13e Mon Sep 17 00:00:00 2001 From: ydcjeff Date: Tue, 23 Feb 2021 14:34:13 +0630 Subject: [PATCH 1/3] docs: rm type hints in ignite.engine --- docs/source/conf.py | 2 + ignite/engine/__init__.py | 102 ++++++++++++++++----------------- ignite/engine/deterministic.py | 28 ++++----- ignite/engine/engine.py | 40 +++++++------ ignite/engine/events.py | 15 ++--- 5 files changed, 97 insertions(+), 90 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index a69170d460c4..76598d1aac9c 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -206,6 +206,8 @@ # -- Type hints configs ------------------------------------------------------ +autodoc_inherit_docstrings = False +napoleon_use_ivar = True autoclass_content = "both" autodoc_typehints = "description" diff --git a/ignite/engine/__init__.py b/ignite/engine/__init__.py index ba8aae0465d1..0c000f1aa920 100644 --- a/ignite/engine/__init__.py +++ b/ignite/engine/__init__.py @@ -53,17 +53,17 @@ def supervised_training_step( """Factory function for supervised training. Args: - model (torch.nn.Module): the model to train. - optimizer (torch.optim.Optimizer): the optimizer to use. - loss_fn (torch.nn loss function): the loss function to use. - device (str, optional): device type specification (default: None). + model: the model to train. + optimizer: the optimizer to use. + loss_fn: the loss function to use. + device: device type specification (default: None). Applies to batches after starting the engine. Model *will not* be moved. Device can be CPU, GPU. - non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously + non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously with respect to the host. For other cases, this argument has no effect. - prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs + prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs tuple of tensors `(batch_x, batch_y)`. - output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value + output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`. Returns: @@ -109,19 +109,19 @@ def supervised_training_step_amp( """Factory function for supervised training using ``torch.cuda.amp``. Args: - model (torch.nn.Module): the model to train. - optimizer (torch.optim.Optimizer): the optimizer to use. - loss_fn (torch.nn loss function): the loss function to use. - device (str, optional): device type specification (default: None). + model: the model to train. + optimizer: the optimizer to use. + loss_fn: the loss function to use. + device: device type specification (default: None). Applies to batches after starting the engine. Model *will not* be moved. Device can be CPU, GPU. - non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously + non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously with respect to the host. For other cases, this argument has no effect. - prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs + prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs tuple of tensors `(batch_x, batch_y)`. - output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value + output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`. - scaler (torch.cuda.amp.GradScaler, optional): GradScaler instance for gradient scaling. (default: None) + scaler: GradScaler instance for gradient scaling. (default: None) Returns: Callable: update function @@ -177,17 +177,17 @@ def supervised_training_step_apex( """Factory function for supervised training using apex. Args: - model (torch.nn.Module): the model to train. - optimizer (torch.optim.Optimizer): the optimizer to use. - loss_fn (torch.nn loss function): the loss function to use. - device (str, optional): device type specification (default: None). + model: the model to train. + optimizer: the optimizer to use. + loss_fn: the loss function to use. + device: device type specification (default: None). Applies to batches after starting the engine. Model *will not* be moved. Device can be CPU, GPU. - non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously + non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously with respect to the host. For other cases, this argument has no effect. - prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs + prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs tuple of tensors `(batch_x, batch_y)`. - output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value + output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`. Returns: @@ -238,17 +238,17 @@ def supervised_training_step_tpu( """Factory function for supervised training using ``torch_xla``. Args: - model (torch.nn.Module): the model to train. - optimizer (torch.optim.Optimizer): the optimizer to use. - loss_fn (torch.nn loss function): the loss function to use. - device (str, optional): device type specification (default: None). + model: the model to train. + optimizer: the optimizer to use. + loss_fn: the loss function to use. + device: device type specification (default: None). Applies to batches after starting the engine. Model *will not* be moved. Device can be CPU, TPU. - non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously + non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously with respect to the host. For other cases, this argument has no effect. - prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs + prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs tuple of tensors `(batch_x, batch_y)`. - output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value + output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`. Returns: @@ -328,29 +328,32 @@ def create_supervised_trainer( """Factory function for creating a trainer for supervised models. Args: - model (torch.nn.Module): the model to train. - optimizer (torch.optim.Optimizer): the optimizer to use. - loss_fn (torch.nn loss function): the loss function to use. - device (str, optional): device type specification (default: None). + model: the model to train. + optimizer: the optimizer to use. + loss_fn: the loss function to use. + device: device type specification (default: None). Applies to batches after starting the engine. Model *will not* be moved. Device can be CPU, GPU or TPU. - non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously + non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously with respect to the host. For other cases, this argument has no effect. - prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs + prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs tuple of tensors `(batch_x, batch_y)`. - output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value + output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`. - deterministic (bool, optional): if True, returns deterministic engine of type + deterministic: if True, returns deterministic engine of type :class:`~ignite.engine.deterministic.DeterministicEngine`, otherwise :class:`~ignite.engine.engine.Engine` (default: False). - amp_mode (str, optional): can be ``amp`` or ``apex``, model and optimizer will be casted to float16 using + amp_mode: can be ``amp`` or ``apex``, model and optimizer will be casted to float16 using `torch.cuda.amp `_ for ``amp`` and using `apex `_ for ``apex``. (default: None) - scaler (torch.cuda.amp.GradScaler, bool, optional): GradScaler instance for gradient scaling if `torch>=1.6.0` + scaler: GradScaler instance for gradient scaling if `torch>=1.6.0` and ``amp_mode`` is ``amp``. If ``amp_mode`` is ``apex``, this argument will be ignored. If True, will create default GradScaler. If GradScaler instance is passed, it will be used instead. (default: False) + Returns: + a trainer engine with supervised update function. + Note: If ``scaler`` is True, GradScaler instance will be created internally and trainer state has attribute named ``scaler`` for that instance and can be used for saving and loading. @@ -375,9 +378,6 @@ def create_supervised_trainer( See more: https://nvidia.github.io/apex/amp.html#module-apex.amp - Returns: - Engine: a trainer engine with supervised update function. - .. versionchanged:: 0.5.0 - Added ``amp_mode`` argument for automatic mixed precision. @@ -424,18 +424,21 @@ def create_supervised_evaluator( Factory function for creating an evaluator for supervised models. Args: - model (`torch.nn.Module`): the model to train. - metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics. - device (str, optional): device type specification (default: None). + model: the model to train. + metrics: a map of metric names to Metrics. + device: device type specification (default: None). Applies to batches after starting the engine. Model *will not* be moved. - non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously + non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously with respect to the host. For other cases, this argument has no effect. - prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs + prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs tuple of tensors `(batch_x, batch_y)`. - output_transform (callable, optional): function that receives 'x', 'y', 'y_pred' and returns value + output_transform: function that receives 'x', 'y', 'y_pred' and returns value to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits output expected by metrics. If you change it you should use `output_transform` in metrics. + Returns: + an evaluator engine with supervised inference function. + Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is a tuple of `(batch_pred, batch_y)` by default. @@ -451,9 +454,6 @@ def create_supervised_evaluator( - `PyTorch Documentation `_ - `PyTorch's Explanation `_ - - Returns: - Engine: an evaluator engine with supervised inference function. """ metrics = metrics or {} diff --git a/ignite/engine/deterministic.py b/ignite/engine/deterministic.py index 3688f9a14bda..6c95f0a61aae 100644 --- a/ignite/engine/deterministic.py +++ b/ignite/engine/deterministic.py @@ -20,8 +20,8 @@ def update_dataloader(dataloader: DataLoader, new_batch_sampler: BatchSampler) - dataloader with new batch sampler. Args: - dataloader (torch.utils.data.DataLoader): input dataloader - new_batch_sampler (torch.utils.data.sampler.BatchSampler): new batch sampler to use + dataloader: input dataloader + new_batch_sampler: new batch sampler to use Returns: DataLoader @@ -39,22 +39,21 @@ class ReproducibleBatchSampler(BatchSampler): """Reproducible batch sampler. This class internally iterates and stores indices of the input batch sampler. This helps to start providing data batches from an iteration in a deterministic way. - Usage: + Example: - Setup dataloader with `ReproducibleBatchSampler` and start providing data batches from an iteration: + Setup dataloader with `ReproducibleBatchSampler` and start providing data batches from an iteration - .. code-block:: python + .. code-block:: python - from ignite.engine.deterministic import update_dataloader + from ignite.engine.deterministic import update_dataloader - dataloader = update_dataloader(dataloader, ReproducibleBatchSampler(dataloader.batch_sampler)) - # rewind dataloader to a specific iteration: - dataloader.batch_sampler.start_iteration = start_iteration + dataloader = update_dataloader(dataloader, ReproducibleBatchSampler(dataloader.batch_sampler)) + # rewind dataloader to a specific iteration: + dataloader.batch_sampler.start_iteration = start_iteration Args: - batch_sampler (torch.utils.data.sampler.BatchSampler): batch sampler same as used with - `torch.utils.data.DataLoader` - start_iteration (int, optional): optional start iteration + batch_sampler: batch sampler same as used with `torch.utils.data.DataLoader`. + start_iteration: optional start iteration. """ def __init__(self, batch_sampler: BatchSampler, start_iteration: Optional[int] = None): @@ -119,7 +118,7 @@ def keep_random_state(func: Callable) -> Callable: while executing a function. For more details on usage, please see :ref:`Dataflow synchronization`. Args: - func (callable): function to decorate + func: function to decorate """ @wraps(func) @@ -167,6 +166,9 @@ class DeterministicEngine(Engine): This class can produce exactly the same dataflow when resuming the run from an epoch (or more precisely from dataflow restart) and using torch `DataLoader` with `num_workers > 1` as data provider. + Args: + process_function: A function receiving a handle to the engine and the current batch + in each iteration, and returns data to be stored in the engine's state. """ def __init__(self, process_function: Callable): diff --git a/ignite/engine/engine.py b/ignite/engine/engine.py index da666b9ddc4b..0406156618c5 100644 --- a/ignite/engine/engine.py +++ b/ignite/engine/engine.py @@ -21,14 +21,14 @@ class Engine(Serializable): """Runs a given ``process_function`` over each batch of a dataset, emitting events as it goes. Args: - process_function (callable): A function receiving a handle to the engine and the current batch + process_function: A function receiving a handle to the engine and the current batch in each iteration, and returns data to be stored in the engine's state. Attributes: - state (State): object that is used to pass internal and user-defined state between event handlers. + state: object that is used to pass internal and user-defined state between event handlers. It is created with the engine and its attributes (e.g. ``state.iteration``, ``state.epoch`` etc) are reset on every :meth:`~ignite.engine.engine.Engine.run`. - last_event_name (Events): last event name triggered by the engine. + last_event_name: last event name triggered by the engine. Examples: @@ -118,6 +118,8 @@ def compute_mean_std(engine, batch): """ + state: State + last_event_name: Optional[Events] _state_dict_all_req_keys = ("epoch_length", "max_epochs") _state_dict_one_of_opt_keys = ("iteration", "epoch") @@ -154,9 +156,9 @@ def register_events( By default, the events from :class:`~ignite.engine.events.Events` are registered. Args: - *event_names (iterable): Defines the name of the event being supported. New events can be a str + event_names: Defines the name of the event being supported. New events can be a str or an object derived from :class:`~ignite.engine.events.EventEnum`. See example below. - event_to_attr (dict, optional): A dictionary to map an event to a state attribute. + event_to_attr: A dictionary to map an event to a state attribute. Example usage: @@ -253,19 +255,19 @@ def add_event_handler(self, event_name: Any, handler: Callable, *args: Any, **kw event_name: An event or a list of events to attach the handler. Valid events are from :class:`~ignite.engine.events.Events` or any ``event_name`` added by :meth:`~ignite.engine.engine.Engine.register_events`. - handler (callable): the callable event handler that should be invoked. No restrictions on its signature. + handler: the callable event handler that should be invoked. No restrictions on its signature. The first argument can be optionally `engine`, the :class:`~ignite.engine.engine.Engine` object, handler is bound to. *args: optional args to be passed to ``handler``. **kwargs: optional keyword args to be passed to ``handler``. + Returns: + :class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler. + Note: Note that other arguments can be passed to the handler in addition to the `*args` and `**kwargs` passed here, for example during :attr:`~ignite.engine.events.Events.EXCEPTION_RAISED`. - Returns: - :class:`~ignite.engine.RemovableEventHandle`, which can be used to remove the handler. - Example usage: .. code-block:: python @@ -328,7 +330,7 @@ def has_event_handler(self, handler: Callable, event_name: Optional[Any] = None) """Check if the specified event has the specified handler. Args: - handler (callable): the callable event handler. + handler: the callable event handler. event_name: The event the handler attached to. Set this to ``None`` to search all events. """ @@ -354,7 +356,7 @@ def remove_event_handler(self, handler: Callable, event_name: Any) -> None: """Remove event handler `handler` from registered handlers of the engine Args: - handler (callable): the callable event handler that should be removed + handler: the callable event handler that should be removed event_name: The event the handler attached to. """ @@ -376,8 +378,8 @@ def on(self, event_name: Any, *args: Any, **kwargs: Any) -> Callable: Args: event_name: An event to attach the handler to. Valid events are from :class:`~ignite.engine.events.Events` or any ``event_name`` added by :meth:`~ignite.engine.engine.Engine.register_events`. - *args: optional args to be passed to `handler`. - **kwargs: optional keyword args to be passed to `handler`. + args: optional args to be passed to `handler`. + kwargs: optional keyword args to be passed to `handler`. Example usage: @@ -514,7 +516,7 @@ def load_state_dict(self, state_dict: Mapping) -> None: This method does not remove any custom attributes added by user. Args: - state_dict (Mapping): a dict with parameters + state_dict: a dict with parameters .. code-block:: python @@ -570,7 +572,7 @@ def set_data(self, data: Union[Iterable, DataLoader]) -> None: from newly provided data. Please, note that epoch length is not modified. Args: - data (Iterable): Collection of batches allowing repeated iteration (e.g., list or `DataLoader`). + data: Collection of batches allowing repeated iteration (e.g., list or `DataLoader`). Example usage: User can switch data provider during the training: @@ -620,16 +622,16 @@ def run( - If state is defined, engine is NOT "done", then input arguments if provided override defined state. Args: - data (Iterable): Collection of batches allowing repeated iteration (e.g., list or `DataLoader`). - max_epochs (int, optional): Max epochs to run for (default: None). + data: Collection of batches allowing repeated iteration (e.g., list or `DataLoader`). + max_epochs: Max epochs to run for (default: None). If a new state should be created (first run or run again from ended engine), it's default value is 1. If run is resuming from a state, provided `max_epochs` will be taken into account and should be larger than `engine.state.max_epochs`. - epoch_length (int, optional): Number of iterations to count as one epoch. By default, it can be set as + epoch_length: Number of iterations to count as one epoch. By default, it can be set as `len(data)`. If `data` is an iterator and `epoch_length` is not set, then it will be automatically determined as the iteration on which data iterator raises `StopIteration`. This argument should not change if run is resuming from a state. - max_iters (int, optional): Number of iterations to run for. + max_iters: Number of iterations to run for. `max_iters` and `max_epochs` are mutually exclusive; only one of the two arguments should be provided. Returns: diff --git a/ignite/engine/events.py b/ignite/engine/events.py index 9afaff32dac8..d0800f0cc171 100644 --- a/ignite/engine/events.py +++ b/ignite/engine/events.py @@ -20,12 +20,11 @@ class CallableEventWithFilter: be run at the current event (if the event type is correct) Args: - value (str): The actual enum value. Only needed for internal use. Do not touch! - event_filter (callable): A function taking the engine and the current event value as input and returning a + value: The actual enum value. Only needed for internal use. Do not touch! + event_filter: A function taking the engine and the current event value as input and returning a boolean to indicate whether this event should be executed. Defaults to None, which will result to a function that always returns `True` - name (str, optional): The enum-name of the current object. Only needed for internal use. Do not touch! - + name: The enum-name of the current object. Only needed for internal use. Do not touch! """ def __init__(self, value: str, event_filter: Optional[Callable] = None, name: Optional[str] = None) -> None: @@ -58,10 +57,10 @@ def __call__( (which must take in the engine and current event value and return a boolean) or an every or once value Args: - event_filter (callable, optional): a filter function to check if the event should be executed when + event_filter: a filter function to check if the event should be executed when the event type was fired - every (int, optional): a value specifying how often the event should be fired - once (int, optional): a value specifying when the event should be fired (if only once) + every: a value specifying how often the event should be fired + once: a value specifying when the event should be fired (if only once) Returns: CallableEventWithFilter: A new event having the same value but a different filter function @@ -351,6 +350,8 @@ class State: state.times # dictionary with total and per-epoch times fetched on # keys: Events.EPOCH_COMPLETED.name and Events.COMPLETED.name + Args: + kwargs: keyword arguments to be defined as State attributes. """ event_to_attr = { From df8356ba22b9414054be44dfe2c216a0f8efc851 Mon Sep 17 00:00:00 2001 From: ydcjeff Date: Tue, 23 Feb 2021 20:50:57 +0630 Subject: [PATCH 2/3] fix: remove unneeded class attribute --- docs/source/conf.py | 1 - ignite/engine/engine.py | 6 ++---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 76598d1aac9c..914fb40b8145 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -207,7 +207,6 @@ # -- Type hints configs ------------------------------------------------------ autodoc_inherit_docstrings = False -napoleon_use_ivar = True autoclass_content = "both" autodoc_typehints = "description" diff --git a/ignite/engine/engine.py b/ignite/engine/engine.py index 0406156618c5..a34d5a3b51ee 100644 --- a/ignite/engine/engine.py +++ b/ignite/engine/engine.py @@ -118,8 +118,6 @@ def compute_mean_std(engine, batch): """ - state: State - last_event_name: Optional[Events] _state_dict_all_req_keys = ("epoch_length", "max_epochs") _state_dict_one_of_opt_keys = ("iteration", "epoch") @@ -258,8 +256,8 @@ def add_event_handler(self, event_name: Any, handler: Callable, *args: Any, **kw handler: the callable event handler that should be invoked. No restrictions on its signature. The first argument can be optionally `engine`, the :class:`~ignite.engine.engine.Engine` object, handler is bound to. - *args: optional args to be passed to ``handler``. - **kwargs: optional keyword args to be passed to ``handler``. + args: optional args to be passed to ``handler``. + kwargs: optional keyword args to be passed to ``handler``. Returns: :class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler. From f37960c5c48e39e2f1969c586df6a7ca5c85f450 Mon Sep 17 00:00:00 2001 From: ydcjeff Date: Tue, 23 Feb 2021 21:02:53 +0630 Subject: [PATCH 3/3] fix: autodoc_inherit_docstring=True in conf.py --- docs/source/conf.py | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 914fb40b8145..a69170d460c4 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -206,7 +206,6 @@ # -- Type hints configs ------------------------------------------------------ -autodoc_inherit_docstrings = False autoclass_content = "both" autodoc_typehints = "description"