From c37bfa3bfc74883403df623ae57f431b0f31fc74 Mon Sep 17 00:00:00 2001 From: ydcjeff Date: Mon, 22 Feb 2021 17:09:05 +0630 Subject: [PATCH 01/11] docs: only show type hints in docstring --- docs/source/conf.py | 4 +- ignite/contrib/engines/common.py | 142 +++++++++++++++---------------- ignite/contrib/engines/tbptt.py | 23 +++-- 3 files changed, 85 insertions(+), 84 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 372a6fb2f1eb..a69170d460c4 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -51,6 +51,7 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ + "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.doctest", "sphinx.ext.intersphinx", @@ -205,7 +206,8 @@ # -- Type hints configs ------------------------------------------------------ -autodoc_typehints = "signature" +autoclass_content = "both" +autodoc_typehints = "description" # -- A patch that turns-off cross refs for type annotations ------------------ diff --git a/ignite/contrib/engines/common.py b/ignite/contrib/engines/common.py index 7b769ea71bf7..dd6e15d622cb 100644 --- a/ignite/contrib/engines/common.py +++ b/ignite/contrib/engines/common.py @@ -59,35 +59,35 @@ def setup_common_training_handlers( - Two progress bars on epochs and optionally on iterations Args: - trainer (Engine): trainer engine. Output of trainer's `update_function` should be a dictionary + trainer: trainer engine. Output of trainer's `update_function` should be a dictionary or sequence or a single tensor. - train_sampler (torch.utils.data.DistributedSampler, optional): Optional distributed sampler used to call + train_sampler: Optional distributed sampler used to call `set_epoch` method on epoch started event. - to_save (dict, optional): dictionary with objects to save in the checkpoint. This argument is passed to + to_save: dictionary with objects to save in the checkpoint. This argument is passed to :class:`~ignite.handlers.Checkpoint` instance. - save_every_iters (int, optional): saving interval. By default, `to_save` objects are stored + save_every_iters: saving interval. By default, `to_save` objects are stored each 1000 iterations. - output_path (str, optional): output path to indicate where `to_save` objects are stored. Argument is mutually + output_path: output path to indicate where `to_save` objects are stored. Argument is mutually exclusive with ``save_handler``. - lr_scheduler (ParamScheduler or subclass of `torch.optim.lr_scheduler._LRScheduler`): learning rate scheduler + lr_scheduler: learning rate scheduler as native torch LRScheduler or ignite's parameter scheduler. - with_gpu_stats (bool, optional): if True, :class:`~ignite.contrib.metrics.handlers.GpuInfo` is attached to the + with_gpu_stats: if True, :class:`~ignite.contrib.metrics.handlers.GpuInfo` is attached to the trainer. This requires `pynvml` package to be installed. - output_names (list/tuple, optional): list of names associated with `update_function` output dictionary. - with_pbars (bool, optional): if True, two progress bars on epochs and optionally on iterations are attached. + output_names: list of names associated with `update_function` output dictionary. + with_pbars: if True, two progress bars on epochs and optionally on iterations are attached. Default, True. - with_pbar_on_iters (bool, optional): if True, a progress bar on iterations is attached to the trainer. + with_pbar_on_iters: if True, a progress bar on iterations is attached to the trainer. Default, True. - log_every_iters (int, optional): logging interval for :class:`~ignite.contrib.metrics.handlers.GpuInfo` and for + log_every_iters: logging interval for :class:`~ignite.contrib.metrics.handlers.GpuInfo` and for epoch-wise progress bar. Default, 100. - stop_on_nan (bool, optional): if True, :class:`~ignite.handlers.TerminateOnNan` handler is added to the trainer. + stop_on_nan: if True, :class:`~ignite.handlers.TerminateOnNan` handler is added to the trainer. Default, True. - clear_cuda_cache (bool, optional): if True, `torch.cuda.empty_cache()` is called every end of epoch. + clear_cuda_cache: if True, `torch.cuda.empty_cache()` is called every end of epoch. Default, True. - save_handler (callable or :class:`~ignite.handlers.checkpoint.BaseSaveHandler`, optional): Method or callable + save_handler: Method or callable class to use to store ``to_save``. See :class:`~ignite.handlers.checkpoint.Checkpoint` for more details. Argument is mutually exclusive with ``output_path``. - **kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`. + kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`. """ if idist.get_world_size() > 1: @@ -352,15 +352,15 @@ def setup_tb_logging( - Evaluation metrics Args: - output_path (str): logging directory path - trainer (Engine): trainer engine - optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of + output_path: logging directory path + trainer: trainer engine + optimizers: single or dictionary of torch optimizers. If a dictionary, keys are used as tags arguments for logging. - evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary, + evaluators: single or dictionary of evaluators. If a dictionary, keys are used as tags arguments for logging. - log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration, + log_every_iters: interval for loggers attached to iteration events. To log every iteration, value can be set to 1 or None. - **kwargs: optional keyword args to be passed to construct the logger. + kwargs: optional keyword args to be passed to construct the logger. Returns: :class:`~ignite.contrib.handlers.tensorboard_logger.TensorboardLogger` @@ -384,14 +384,14 @@ def setup_visdom_logging( - Evaluation metrics Args: - trainer (Engine): trainer engine - optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of + trainer: trainer engine + optimizers: single or dictionary of torch optimizers. If a dictionary, keys are used as tags arguments for logging. - evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary, + evaluators: single or dictionary of evaluators. If a dictionary, keys are used as tags arguments for logging. - log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration, + log_every_iters: interval for loggers attached to iteration events. To log every iteration, value can be set to 1 or None. - **kwargs: optional keyword args to be passed to construct the logger. + kwargs: optional keyword args to be passed to construct the logger. Returns: :class:`~ignite.contrib.handlers.visdom_logger.VisdomLogger` @@ -415,14 +415,14 @@ def setup_mlflow_logging( - Evaluation metrics Args: - trainer (Engine): trainer engine - optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of + trainer: trainer engine + optimizers: single or dictionary of torch optimizers. If a dictionary, keys are used as tags arguments for logging. - evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary, + evaluators: single or dictionary of evaluators. If a dictionary, keys are used as tags arguments for logging. - log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration, + log_every_iters: interval for loggers attached to iteration events. To log every iteration, value can be set to 1 or None. - **kwargs: optional keyword args to be passed to construct the logger. + kwargs: optional keyword args to be passed to construct the logger. Returns: :class:`~ignite.contrib.handlers.mlflow_logger.MLflowLogger` @@ -446,14 +446,14 @@ def setup_neptune_logging( - Evaluation metrics Args: - trainer (Engine): trainer engine - optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of + trainer: trainer engine + optimizers: single or dictionary of torch optimizers. If a dictionary, keys are used as tags arguments for logging. - evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary, + evaluators: single or dictionary of evaluators. If a dictionary, keys are used as tags arguments for logging. - log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration, + log_every_iters: interval for loggers attached to iteration events. To log every iteration, value can be set to 1 or None. - **kwargs: optional keyword args to be passed to construct the logger. + kwargs: optional keyword args to be passed to construct the logger. Returns: :class:`~ignite.contrib.handlers.neptune_logger.NeptuneLogger` @@ -477,14 +477,14 @@ def setup_wandb_logging( - Evaluation metrics Args: - trainer (Engine): trainer engine - optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of + trainer: trainer engine + optimizers: single or dictionary of torch optimizers. If a dictionary, keys are used as tags arguments for logging. - evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary, + evaluators: single or dictionary of evaluators. If a dictionary, keys are used as tags arguments for logging. - log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration, + log_every_iters: interval for loggers attached to iteration events. To log every iteration, value can be set to 1 or None. - **kwargs: optional keyword args to be passed to construct the logger. + kwargs: optional keyword args to be passed to construct the logger. Returns: :class:`~ignite.contrib.handlers.wandb_logger.WandBLogger` @@ -508,14 +508,14 @@ def setup_plx_logging( - Evaluation metrics Args: - trainer (Engine): trainer engine - optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of + trainer: trainer engine + optimizers: single or dictionary of torch optimizers. If a dictionary, keys are used as tags arguments for logging. - evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary, + evaluators: single or dictionary of evaluators. If a dictionary, keys are used as tags arguments for logging. - log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration, + log_every_iters: interval for loggers attached to iteration events. To log every iteration, value can be set to 1 or None. - **kwargs: optional keyword args to be passed to construct the logger. + kwargs: optional keyword args to be passed to construct the logger. Returns: :class:`~ignite.contrib.handlers.polyaxon_logger.PolyaxonLogger` @@ -539,14 +539,14 @@ def setup_clearml_logging( - Evaluation metrics Args: - trainer (Engine): trainer engine - optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of + trainer: trainer engine + optimizers: single or dictionary of torch optimizers. If a dictionary, keys are used as tags arguments for logging. - evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary, + evaluators: single or dictionary of evaluators. If a dictionary, keys are used as tags arguments for logging. - log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration, + log_every_iters: interval for loggers attached to iteration events. To log every iteration, value can be set to 1 or None. - **kwargs: optional keyword args to be passed to construct the logger. + kwargs: optional keyword args to be passed to construct the logger. Returns: :class:`~ignite.contrib.handlers.clearml_logger.ClearMLLogger` @@ -588,21 +588,21 @@ def gen_save_best_models_by_val_score( ``save_handler``. Args: - save_handler (callable or :class:`~ignite.handlers.checkpoint.BaseSaveHandler`): Method or callable class to + save_handler: Method or callable class to use to save engine and other provided objects. Function receives two objects: checkpoint as a dictionary and filename. If ``save_handler`` is callable class, it can inherit of :class:`~ignite.handlers.checkpoint.BaseSaveHandler` and optionally implement ``remove`` method to keep a fixed number of saved checkpoints. In case if user needs to save engine's checkpoint on a disk, ``save_handler`` can be defined with :class:`~ignite.handlers.DiskSaver`. - evaluator (Engine): evaluation engine used to provide the score - models (nn.Module or Mapping): model or dictionary with the object to save. Objects should have + evaluator: evaluation engine used to provide the score + models: model or dictionary with the object to save. Objects should have implemented ``state_dict`` and ``load_state_dict`` methods. - metric_name (str): metric name to use for score evaluation. This metric should be present in + metric_name: metric name to use for score evaluation. This metric should be present in `evaluator.state.metrics`. - n_saved (int, optional): number of best models to store - trainer (Engine, optional): trainer engine to fetch the epoch when saving the best model. - tag (str, optional): score name prefix: `{tag}_{metric_name}`. By default, tag is "val". - **kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`. + n_saved: number of best models to store + trainer: trainer engine to fetch the epoch when saving the best model. + tag: score name prefix: `{tag}_{metric_name}`. By default, tag is "val". + kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`. Returns: A :class:`~ignite.handlers.Checkpoint` handler. @@ -646,15 +646,15 @@ def save_best_model_by_val_score( Models with highest metric value will be retained. Args: - output_path (str): output path to indicate where to save best models - evaluator (Engine): evaluation engine used to provide the score - model (nn.Module): model to store - metric_name (str): metric name to use for score evaluation. This metric should be present in + output_path: output path to indicate where to save best models + evaluator: evaluation engine used to provide the score + model: model to store + metric_name: metric name to use for score evaluation. This metric should be present in `evaluator.state.metrics`. - n_saved (int, optional): number of best models to store - trainer (Engine, optional): trainer engine to fetch the epoch when saving the best model. - tag (str, optional): score name prefix: `{tag}_{metric_name}`. By default, tag is "val". - **kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`. + n_saved: number of best models to store + trainer: trainer engine to fetch the epoch when saving the best model. + tag: score name prefix: `{tag}_{metric_name}`. By default, tag is "val". + kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`. Returns: A :class:`~ignite.handlers.Checkpoint` handler. @@ -678,10 +678,10 @@ def add_early_stopping_by_val_score( Metric value should increase in order to keep training and not early stop. Args: - patience (int): number of events to wait if no improvement and then stop the training. - evaluator (Engine): evaluation engine used to provide the score - trainer (Engine): trainer engine to stop the run if no improvement. - metric_name (str): metric name to use for score evaluation. This metric should be present in + patience: number of events to wait if no improvement and then stop the training. + evaluator: evaluation engine used to provide the score + trainer: trainer engine to stop the run if no improvement. + metric_name: metric name to use for score evaluation. This metric should be present in `evaluator.state.metrics`. Returns: diff --git a/ignite/contrib/engines/tbptt.py b/ignite/contrib/engines/tbptt.py index d3efba4accbf..1826532b2384 100644 --- a/ignite/contrib/engines/tbptt.py +++ b/ignite/contrib/engines/tbptt.py @@ -57,19 +57,22 @@ def create_supervised_tbptt_trainer( `tbtt_step` time steps. Args: - model (`torch.nn.Module`): the model to train. - optimizer (`torch.optim.Optimizer`): the optimizer to use. - loss_fn (torch.nn loss function): the loss function to use. - tbtt_step (int): the length of time chunks (last one may be smaller). - dim (int): axis representing the time dimension. - device (str, optional): device type specification (default: None). + model: the model to train. + optimizer: the optimizer to use. + loss_fn: the loss function to use. + tbtt_step: the length of time chunks (last one may be smaller). + dim: axis representing the time dimension. + device: device type specification (default: None). Applies to batches. - non_blocking (bool, optional): if True and this copy is between CPU and GPU, + non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously with respect to the host. For other cases, this argument has no effect. - prepare_batch (callable, optional): function that receives `batch`, `device`, + prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs tuple of tensors `(batch_x, batch_y)`. + Returns: + a trainer engine with supervised update function. + .. warning:: The internal use of `device` has changed. @@ -80,10 +83,6 @@ def create_supervised_tbptt_trainer( * `PyTorch Documentation `_ * `PyTorch's Explanation `_ - - Returns: - Engine: a trainer engine with supervised update function. - """ def _update(engine: Engine, batch: Sequence[torch.Tensor]) -> float: From 87fa1d18d89338a447524f9f412f76ccaf1a4a50 Mon Sep 17 00:00:00 2001 From: Jeff Yang Date: Mon, 22 Feb 2021 20:38:24 +0630 Subject: [PATCH 02/11] Apply suggestions from code review Co-authored-by: vfdev --- ignite/contrib/engines/common.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ignite/contrib/engines/common.py b/ignite/contrib/engines/common.py index dd6e15d622cb..24f12205565e 100644 --- a/ignite/contrib/engines/common.py +++ b/ignite/contrib/engines/common.py @@ -71,23 +71,23 @@ def setup_common_training_handlers( exclusive with ``save_handler``. lr_scheduler: learning rate scheduler as native torch LRScheduler or ignite's parameter scheduler. - with_gpu_stats: if True, :class:`~ignite.contrib.metrics.handlers.GpuInfo` is attached to the + with_gpu_stats: if True, :class:`~ignite.contrib.metrics.GpuInfo` is attached to the trainer. This requires `pynvml` package to be installed. output_names: list of names associated with `update_function` output dictionary. with_pbars: if True, two progress bars on epochs and optionally on iterations are attached. Default, True. with_pbar_on_iters: if True, a progress bar on iterations is attached to the trainer. Default, True. - log_every_iters: logging interval for :class:`~ignite.contrib.metrics.handlers.GpuInfo` and for + log_every_iters: logging interval for :class:`~ignite.contrib.metrics.GpuInfo` and for epoch-wise progress bar. Default, 100. stop_on_nan: if True, :class:`~ignite.handlers.TerminateOnNan` handler is added to the trainer. Default, True. clear_cuda_cache: if True, `torch.cuda.empty_cache()` is called every end of epoch. Default, True. save_handler: Method or callable - class to use to store ``to_save``. See :class:`~ignite.handlers.checkpoint.Checkpoint` for more details. + class to use to store ``to_save``. See :class:`~ignite.handlers.Checkpoint` for more details. Argument is mutually exclusive with ``output_path``. - kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`. + kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.Checkpoint`. """ if idist.get_world_size() > 1: From afd2d6bd150d2d0f346d0d0cdd8ad324393cdfd9 Mon Sep 17 00:00:00 2001 From: ydcjeff Date: Mon, 22 Feb 2021 22:00:36 +0630 Subject: [PATCH 03/11] fix(docs): correctly link to missing links --- ignite/contrib/engines/common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ignite/contrib/engines/common.py b/ignite/contrib/engines/common.py index 24f12205565e..2cf2712a9fdd 100644 --- a/ignite/contrib/engines/common.py +++ b/ignite/contrib/engines/common.py @@ -602,7 +602,7 @@ def gen_save_best_models_by_val_score( n_saved: number of best models to store trainer: trainer engine to fetch the epoch when saving the best model. tag: score name prefix: `{tag}_{metric_name}`. By default, tag is "val". - kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`. + kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.Checkpoint`. Returns: A :class:`~ignite.handlers.Checkpoint` handler. @@ -654,7 +654,7 @@ def save_best_model_by_val_score( n_saved: number of best models to store trainer: trainer engine to fetch the epoch when saving the best model. tag: score name prefix: `{tag}_{metric_name}`. By default, tag is "val". - kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`. + kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.Checkpoint`. Returns: A :class:`~ignite.handlers.Checkpoint` handler. From 34c650cc4c4b7135a1707cfb07d95591e70eeeff Mon Sep 17 00:00:00 2001 From: ydcjeff Date: Mon, 22 Feb 2021 23:55:04 +0630 Subject: [PATCH 04/11] docs: rm type hints in docstring of ignite.contrib.handlers --- ignite/contrib/handlers/base_logger.py | 16 +-- ignite/contrib/handlers/clearml_logger.py | 48 +++---- ignite/contrib/handlers/lr_finder.py | 30 ++-- ignite/contrib/handlers/mlflow_logger.py | 16 +-- ignite/contrib/handlers/neptune_logger.py | 44 +++--- ignite/contrib/handlers/param_scheduler.py | 130 +++++++++--------- ignite/contrib/handlers/polyaxon_logger.py | 14 +- ignite/contrib/handlers/stores.py | 2 +- ignite/contrib/handlers/tensorboard_logger.py | 34 ++--- ignite/contrib/handlers/time_profilers.py | 17 +++ ignite/contrib/handlers/tqdm_logger.py | 18 +-- ignite/contrib/handlers/visdom_logger.py | 48 +++---- ignite/contrib/handlers/wandb_logger.py | 18 +-- 13 files changed, 226 insertions(+), 209 deletions(-) diff --git a/ignite/contrib/handlers/base_logger.py b/ignite/contrib/handlers/base_logger.py index 6fd9949b274d..637be7af2208 100644 --- a/ignite/contrib/handlers/base_logger.py +++ b/ignite/contrib/handlers/base_logger.py @@ -152,8 +152,8 @@ def attach( """Attach the logger to the engine and execute `log_handler` function at `event_name` events. Args: - engine (Engine): engine object. - log_handler (callable): a logging handler to execute + engine: engine object. + log_handler: a logging handler to execute event_name: event to attach the logging handler to. Valid events are from :class:`~ignite.engine.events.Events` or class:`~ignite.engine.events.EventsList` or any `event_name` added by :meth:`~ignite.engine.engine.Engine.register_events`. @@ -180,12 +180,12 @@ def attach_output_handler(self, engine: Engine, event_name: Any, *args: Any, **k """Shortcut method to attach `OutputHandler` to the logger. Args: - engine (Engine): engine object. + engine: engine object. event_name: event to attach the logging handler to. Valid events are from :class:`~ignite.engine.events.Events` or any `event_name` added by :meth:`~ignite.engine.engine.Engine.register_events`. - *args: args to initialize `OutputHandler` - **kwargs: kwargs to initialize `OutputHandler` + args: args to initialize `OutputHandler` + kwargs: kwargs to initialize `OutputHandler` Returns: :class:`~ignite.engine.RemovableEventHandle`, which can be used to remove the handler. @@ -198,12 +198,12 @@ def attach_opt_params_handler( """Shortcut method to attach `OptimizerParamsHandler` to the logger. Args: - engine (Engine): engine object. + engine: engine object. event_name: event to attach the logging handler to. Valid events are from :class:`~ignite.engine.events.Events` or any `event_name` added by :meth:`~ignite.engine.engine.Engine.register_events`. - *args: args to initialize `OptimizerParamsHandler` - **kwargs: kwargs to initialize `OptimizerParamsHandler` + args: args to initialize `OptimizerParamsHandler` + kwargs: kwargs to initialize `OptimizerParamsHandler` Returns: :class:`~ignite.engine.RemovableEventHandle`, which can be used to remove the handler. diff --git a/ignite/contrib/handlers/clearml_logger.py b/ignite/contrib/handlers/clearml_logger.py index e4d04f5f3dad..50965e5e8ae0 100644 --- a/ignite/contrib/handlers/clearml_logger.py +++ b/ignite/contrib/handlers/clearml_logger.py @@ -49,11 +49,11 @@ class ClearMLLogger(BaseLogger): clearml-init Args: - project_name (str): The name of the project in which the experiment will be created. If the project + project_name: The name of the project in which the experiment will be created. If the project does not exist, it is created. If ``project_name`` is ``None``, the repository name is used. (Optional) - task_name (str): The name of Task (experiment). If ``task_name`` is ``None``, the Python experiment + task_name: The name of Task (experiment). If ``task_name`` is ``None``, the Python experiment script's file name is used. (Optional) - task_type (str): Optional. The task type. Valid values are: + task_type: Optional. The task type. Valid values are: - ``TaskTypes.training`` (Default) - ``TaskTypes.train`` - ``TaskTypes.testing`` @@ -270,14 +270,14 @@ def global_step_transform(*args, **kwargs): ) Args: - tag (str): common title for all produced plots. For example, "training" - metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available + tag: common title for all produced plots. For example, "training" + metric_names: list of metric names to plot or a string "all" to plot all available metrics. - output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number. + output_transform: output transform function to prepare `engine.state.output` as a number. For example, `output_transform = lambda output: output` This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot with corresponding keys. - global_step_transform (callable, optional): global step transform function to output a desired global step. + global_step_transform: global step transform function to output a desired global step. Input of the function is `(engine, event_name)`. Output of function should be an integer. Default is None, global_step based on attached engine. If provided, uses function output as global_step. To setup global step from another engine, please use @@ -359,10 +359,10 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler): ) Args: - optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups`` + optimizer: torch optimizer or any object with attribute ``param_groups`` as a sequence. - param_name (str): parameter name - tag (str, optional): common title for all produced plots. For example, "generator" + param_name: parameter name + tag: common title for all produced plots. For example, "generator" """ def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None) -> None: @@ -410,9 +410,9 @@ class WeightsScalarHandler(BaseWeightsScalarHandler): ) Args: - model (torch.nn.Module): model to log weights - reduction (callable): function to reduce parameters into scalar - tag (str, optional): common title for all produced plots. For example, "generator" + model: model to log weights + reduction: function to reduce parameters into scalar + tag: common title for all produced plots. For example, "generator" """ @@ -463,8 +463,8 @@ class WeightsHistHandler(BaseWeightsHistHandler): ) Args: - model (torch.nn.Module): model to log weights - tag (str, optional): common title for all produced plots. For example, 'generator' + model: model to log weights + tag: common title for all produced plots. For example, 'generator' """ @@ -517,9 +517,9 @@ class GradsScalarHandler(BaseWeightsScalarHandler): ) Args: - model (torch.nn.Module): model to log weights - reduction (callable): function to reduce parameters into scalar - tag (str, optional): common title for all produced plots. For example, "generator" + model: model to log weights + reduction: function to reduce parameters into scalar + tag: common title for all produced plots. For example, "generator" """ @@ -569,8 +569,8 @@ class GradsHistHandler(BaseWeightsHistHandler): ) Args: - model (torch.nn.Module): model to log weights - tag (str, optional): common title for all produced plots. For example, 'generator' + model: model to log weights + tag: common title for all produced plots. For example, 'generator' """ @@ -602,12 +602,12 @@ class ClearMLSaver(DiskSaver): Handler that saves input checkpoint as ClearML artifacts Args: - logger (ClearMLLogger, optional): An instance of :class:`~ignite.contrib.handlers.clearml_logger.ClearMLLogger`, + logger: An instance of :class:`~ignite.contrib.handlers.clearml_logger.ClearMLLogger`, ensuring a valid ClearML ``Task`` has been initialized. If not provided, and a ClearML Task has not been manually initialized, a runtime error will be raised. - output_uri (str, optional): The default location for output models and other artifacts uploaded by ClearML. For + output_uri: The default location for output models and other artifacts uploaded by ClearML. For more information, see ``clearml.Task.init``. - dirname (str, optional): Directory path where the checkpoint will be saved. If not provided, a temporary + dirname: Directory path where the checkpoint will be saved. If not provided, a temporary directory will be created. Examples: @@ -793,7 +793,7 @@ def get_local_copy(self, filename: str) -> Optional[str]: In distributed configuration this method should be called on rank 0 process. Args: - filename (str): artifact name. + filename: artifact name. Returns: a local path to a downloaded copy of the artifact diff --git a/ignite/contrib/handlers/lr_finder.py b/ignite/contrib/handlers/lr_finder.py index bc4401f821c3..fdcf8b4f6eab 100644 --- a/ignite/contrib/handlers/lr_finder.py +++ b/ignite/contrib/handlers/lr_finder.py @@ -196,11 +196,11 @@ def plot(self, skip_start: int = 10, skip_end: int = 5, log_lr: bool = True) -> pip install matplotlib Args: - skip_start (int, optional): number of batches to trim from the start. + skip_start: number of batches to trim from the start. Default: 10. - skip_end (int, optional): number of batches to trim from the start. + skip_end: number of batches to trim from the start. Default: 5. - log_lr (bool, optional): True to plot the learning rate in a logarithmic + log_lr: True to plot the learning rate in a logarithmic scale; otherwise, plotted in a linear scale. Default: True. """ try: @@ -273,20 +273,20 @@ def attach( trainer_with_lr_finder.run(dataloader)` Args: - trainer (Engine): lr_finder is attached to this trainer. Please, keep in mind that all attached handlers + trainer: lr_finder is attached to this trainer. Please, keep in mind that all attached handlers will be executed. - to_save (Mapping): dictionary with optimizer and other objects that needs to be restored after running + to_save: dictionary with optimizer and other objects that needs to be restored after running the LR finder. For example, `to_save={'optimizer': optimizer, 'model': model}`. All objects should implement `state_dict` and `load_state_dict` methods. - output_transform (callable, optional): function that transforms the trainer's `state.output` after each + output_transform: function that transforms the trainer's `state.output` after each iteration. It must return the loss of that iteration. - num_iter (int, optional): number of iterations for lr schedule between base lr and end_lr. Default, it will + num_iter: number of iterations for lr schedule between base lr and end_lr. Default, it will run for `trainer.state.epoch_length * trainer.state.max_epochs`. - end_lr (float, optional): upper bound for lr search. Default, 10.0. - step_mode (str, optional): "exp" or "linear", which way should the lr be increased from optimizer's initial + end_lr: upper bound for lr search. Default, 10.0. + step_mode: "exp" or "linear", which way should the lr be increased from optimizer's initial lr to `end_lr`. Default, "exp". - smooth_f (float, optional): loss smoothing factor in range `[0, 1)`. Default, 0.05 - diverge_th (float, optional): Used for stopping the search when `current loss > diverge_th * best_loss`. + smooth_f: loss smoothing factor in range `[0, 1)`. Default, 0.05 + diverge_th: Used for stopping the search when `current loss > diverge_th * best_loss`. Default, 5.0. Note: @@ -363,12 +363,12 @@ class _ExponentialLR(_LRScheduler): iterations. Args: - optimizer (torch.optim.Optimizer): wrapped optimizer. - end_lr (float, optional): the initial learning rate which is the lower + optimizer: wrapped optimizer. + end_lr: the initial learning rate which is the lower boundary of the test. Default: 10. - num_iter (int, optional): the number of iterations over which the test + num_iter: the number of iterations over which the test occurs. Default: 100. - last_epoch (int): the index of last epoch. Default: -1. + last_epoch: the index of last epoch. Default: -1. """ diff --git a/ignite/contrib/handlers/mlflow_logger.py b/ignite/contrib/handlers/mlflow_logger.py index d8a7819d8e6a..89a047b47a93 100644 --- a/ignite/contrib/handlers/mlflow_logger.py +++ b/ignite/contrib/handlers/mlflow_logger.py @@ -24,7 +24,7 @@ class MLflowLogger(BaseLogger): pip install mlflow Args: - tracking_uri (str): MLflow tracking uri. See MLflow docs for more details + tracking_uri: MLflow tracking uri. See MLflow docs for more details Examples: @@ -182,14 +182,14 @@ def global_step_transform(*args, **kwargs): ) Args: - tag (str): common title for all produced plots. For example, 'training' - metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available + tag: common title for all produced plots. For example, 'training' + metric_names: list of metric names to plot or a string "all" to plot all available metrics. - output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number. + output_transform: output transform function to prepare `engine.state.output` as a number. For example, `output_transform = lambda output: output` This function can also return a dictionary, e.g `{'loss': loss1, 'another_loss': loss2}` to label the plot with corresponding keys. - global_step_transform (callable, optional): global step transform function to output a desired global step. + global_step_transform: global step transform function to output a desired global step. Input of the function is `(engine, event_name)`. Output of function should be an integer. Default is None, global_step based on attached engine. If provided, uses function output as global_step. To setup global step from another engine, please use @@ -284,10 +284,10 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler): ) Args: - optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups`` + optimizer: torch optimizer or any object with attribute ``param_groups`` as a sequence. - param_name (str): parameter name - tag (str, optional): common title for all produced plots. For example, 'generator' + param_name: parameter name + tag: common title for all produced plots. For example, 'generator' """ def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None) -> None: diff --git a/ignite/contrib/handlers/neptune_logger.py b/ignite/contrib/handlers/neptune_logger.py index 314873d97341..e44eb8fe3c31 100644 --- a/ignite/contrib/handlers/neptune_logger.py +++ b/ignite/contrib/handlers/neptune_logger.py @@ -40,30 +40,30 @@ class NeptuneLogger(BaseLogger): pip install neptune-client Args: - api_token (str | None): Required in online mode. Neputne API token, found on https://neptune.ai. + api_token: Required in online mode. Neputne API token, found on https://neptune.ai. Read how to get your API key https://docs.neptune.ai/python-api/tutorials/get-started.html#copy-api-token. - project_name (str): Required in online mode. Qualified name of a project in a form of + project_name: Required in online mode. Qualified name of a project in a form of "namespace/project_name" for example "tom/minst-classification". If None, the value of NEPTUNE_PROJECT environment variable will be taken. You need to create the project in https://neptune.ai first. - offline_mode (bool): Optional default False. If offline_mode=True no logs will be send to neptune. + offline_mode: Optional default False. If offline_mode=True no logs will be send to neptune. Usually used for debug purposes. - experiment_name (str, optional): Optional. Editable name of the experiment. + experiment_name: Optional. Editable name of the experiment. Name is displayed in the experiment’s Details (Metadata section) and in experiments view as a column. - upload_source_files (list, optional): Optional. List of source files to be uploaded. + upload_source_files: Optional. List of source files to be uploaded. Must be list of str or single str. Uploaded sources are displayed in the experiment’s Source code tab. If None is passed, Python file from which experiment was created will be uploaded. Pass empty list (`[]`) to upload no files. Unix style pathname pattern expansion is supported. For example, you can pass `*.py` to upload all python source files from the current directory. For recursion lookup use `**/*.py` (for Python 3.5 and later). For more information see glob library. - params (dict, optional): Optional. Parameters of the experiment. After experiment creation params are read-only. + params: Optional. Parameters of the experiment. After experiment creation params are read-only. Parameters are displayed in the experiment’s Parameters section and each key-value pair can be viewed in experiments view as a column. - properties (dict, optional): Optional default is `{}`. Properties of the experiment. + properties: Optional default is `{}`. Properties of the experiment. They are editable after experiment is created. Properties are displayed in the experiment’s Details and each key-value pair can be viewed in experiments view as a column. - tags (list, optional): Optional default `[]`. Must be list of str. Tags of the experiment. + tags: Optional default `[]`. Must be list of str. Tags of the experiment. Tags are displayed in the experiment’s Details and can be viewed in experiments view as a column. Examples: @@ -293,14 +293,14 @@ def global_step_transform(*args, **kwargs): ) Args: - tag (str): common title for all produced plots. For example, "training" - metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available + tag: common title for all produced plots. For example, "training" + metric_names: list of metric names to plot or a string "all" to plot all available metrics. - output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number. + output_transform: output transform function to prepare `engine.state.output` as a number. For example, `output_transform = lambda output: output` This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot with corresponding keys. - global_step_transform (callable, optional): global step transform function to output a desired global step. + global_step_transform: global step transform function to output a desired global step. Input of the function is `(engine, event_name)`. Output of function should be an integer. Default is None, global_step based on attached engine. If provided, uses function output as global_step. To setup global step from another engine, please use @@ -385,10 +385,10 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler): ) Args: - optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups`` + optimizer: torch optimizer or any object with attribute ``param_groups`` as a sequence. - param_name (str): parameter name - tag (str, optional): common title for all produced plots. For example, "generator" + param_name: parameter name + tag: common title for all produced plots. For example, "generator" """ def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None) -> None: @@ -439,9 +439,9 @@ class WeightsScalarHandler(BaseWeightsScalarHandler): ) Args: - model (torch.nn.Module): model to log weights - reduction (callable): function to reduce parameters into scalar - tag (str, optional): common title for all produced plots. For example, "generator" + model: model to log weights + reduction: function to reduce parameters into scalar + tag: common title for all produced plots. For example, "generator" """ @@ -495,9 +495,9 @@ class GradsScalarHandler(BaseWeightsScalarHandler): ) Args: - model (torch.nn.Module): model to log weights - reduction (callable): function to reduce parameters into scalar - tag (str, optional): common title for all produced plots. For example, "generator" + model: model to log weights + reduction: function to reduce parameters into scalar + tag: common title for all produced plots. For example, "generator" """ @@ -524,7 +524,7 @@ class NeptuneSaver(BaseSaveHandler): """Handler that saves input checkpoint to the Neptune server. Args: - neptune_logger (ignite.contrib.handlers.neptune_logger.NeptuneLogger): an instance of + neptune_logger: an instance of NeptuneLogger class. Examples: diff --git a/ignite/contrib/handlers/param_scheduler.py b/ignite/contrib/handlers/param_scheduler.py index 60f6d7fe0b63..8fc92f812a9f 100644 --- a/ignite/contrib/handlers/param_scheduler.py +++ b/ignite/contrib/handlers/param_scheduler.py @@ -20,12 +20,12 @@ class ParamScheduler(metaclass=ABCMeta): training. Args: - optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups`` + optimizer: torch optimizer or any object with attribute ``param_groups`` as a sequence. - param_name (str): name of optimizer's parameter to update. - save_history (bool, optional): whether to log the parameter values to + param_name: name of optimizer's parameter to update. + save_history: whether to log the parameter values to `engine.state.param_history`, (default=False). - param_group_index (int, optional): optimizer's parameters group to use + param_group_index: optimizer's parameters group to use Note: Parameter scheduler works independently of the internal state of the attached optimizer. @@ -120,7 +120,7 @@ def load_state_dict(self, state_dict: Mapping) -> None: """Copies parameters from :attr:`state_dict` into this ParamScheduler. Args: - state_dict (dict): a dict containing parameters. + state_dict: a dict containing parameters. """ if not isinstance(state_dict, Mapping): raise TypeError(f"Argument state_dict should be a dictionary, but given {type(state_dict)}") @@ -151,7 +151,7 @@ def simulate_values(cls, num_events: int, **scheduler_kwargs: Any) -> List[List[ """Method to simulate scheduled values during `num_events` events. Args: - num_events (int): number of events during the simulation. + num_events: number of events during the simulation. **scheduler_kwargs : parameter scheduler configuration kwargs. Returns: @@ -193,7 +193,7 @@ def plot_values(cls, num_events: int, **scheduler_kwargs: Mapping) -> Any: pip install matplotlib Args: - num_events (int): number of events during the simulation. + num_events: number of events during the simulation. **scheduler_kwargs : parameter scheduler configuration kwargs. Returns: @@ -230,21 +230,21 @@ class CyclicalScheduler(ParamScheduler): cycle of some size. Args: - optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups`` + optimizer: torch optimizer or any object with attribute ``param_groups`` as a sequence. - param_name (str): name of optimizer's parameter to update. - start_value (float): value at start of cycle. - end_value (float): value at the middle of the cycle. - cycle_size (int): length of cycle, value should be larger than 1. - cycle_mult (float, optional): ratio by which to change the cycle_size. + param_name: name of optimizer's parameter to update. + start_value: value at start of cycle. + end_value: value at the middle of the cycle. + cycle_size: length of cycle, value should be larger than 1. + cycle_mult: ratio by which to change the cycle_size. at the end of each cycle (default=1.0). - start_value_mult (float, optional): ratio by which to change the start value at the + start_value_mult: ratio by which to change the start value at the end of each cycle (default=1.0). - end_value_mult (float, optional): ratio by which to change the end value at the + end_value_mult: ratio by which to change the end value at the end of each cycle (default=1.0). - save_history (bool, optional): whether to log the parameter values to + save_history: whether to log the parameter values to `engine.state.param_history`, (default=False). - param_group_index (int, optional): optimizer's parameters group to use. + param_group_index: optimizer's parameters group to use. Note: If the scheduler is bound to an 'ITERATION_*' event, 'cycle_size' should @@ -304,21 +304,21 @@ class LinearCyclicalScheduler(CyclicalScheduler): adjusts it back to 'start_value' for a half-cycle. Args: - optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups`` + optimizer: torch optimizer or any object with attribute ``param_groups`` as a sequence. - param_name (str): name of optimizer's parameter to update. - start_value (float): value at start of cycle. - end_value (float): value at the middle of the cycle. - cycle_size (int): length of cycle. - cycle_mult (float, optional): ratio by which to change the cycle_size + param_name: name of optimizer's parameter to update. + start_value: value at start of cycle. + end_value: value at the middle of the cycle. + cycle_size: length of cycle. + cycle_mult: ratio by which to change the cycle_size at the end of each cycle (default=1). - start_value_mult (float, optional): ratio by which to change the start value at the + start_value_mult: ratio by which to change the start value at the end of each cycle (default=1.0). - end_value_mult (float, optional): ratio by which to change the end value at the + end_value_mult: ratio by which to change the end value at the end of each cycle (default=1.0). - save_history (bool, optional): whether to log the parameter values to + save_history: whether to log the parameter values to `engine.state.param_history`, (default=False). - param_group_index (int, optional): optimizer's parameters group to use. + param_group_index: optimizer's parameters group to use. Note: If the scheduler is bound to an 'ITERATION_*' event, 'cycle_size' should @@ -350,21 +350,21 @@ class CosineAnnealingScheduler(CyclicalScheduler): wave (as suggested in [Smith17]_). Args: - optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups`` + optimizer: torch optimizer or any object with attribute ``param_groups`` as a sequence. - param_name (str): name of optimizer's parameter to update. - start_value (float): value at start of cycle. - end_value (float): value at the end of the cycle. - cycle_size (int): length of cycle. - cycle_mult (float, optional): ratio by which to change the cycle_size + param_name: name of optimizer's parameter to update. + start_value: value at start of cycle. + end_value: value at the end of the cycle. + cycle_size: length of cycle. + cycle_mult: ratio by which to change the cycle_size at the end of each cycle (default=1). - start_value_mult (float, optional): ratio by which to change the start value at the + start_value_mult: ratio by which to change the start value at the end of each cycle (default=1.0). - end_value_mult (float, optional): ratio by which to change the end value at the + end_value_mult: ratio by which to change the end value at the end of each cycle (default=1.0). - save_history (bool, optional): whether to log the parameter values to + save_history: whether to log the parameter values to `engine.state.param_history`, (default=False). - param_group_index (int, optional): optimizer's parameters group to use. + param_group_index: optimizer's parameters group to use. Note: If the scheduler is bound to an 'ITERATION_*' event, 'cycle_size' should @@ -418,9 +418,9 @@ class ConcatScheduler(ParamScheduler): scheduler is defined by `durations` list of integers. Args: - schedulers (list of ParamScheduler): list of parameter schedulers. - durations (list of int): list of number of events that lasts a parameter scheduler from schedulers. - save_history (bool, optional): whether to log the parameter values to + schedulers: list of parameter schedulers. + durations: list of number of events that lasts a parameter scheduler from schedulers. + save_history: whether to log the parameter values to `engine.state.param_history`, (default=False). Examples: @@ -520,7 +520,7 @@ def load_state_dict(self, state_dict: Mapping) -> None: """Copies parameters from :attr:`state_dict` into this ConcatScheduler. Args: - state_dict (dict): a dict containing parameters. + state_dict: a dict containing parameters. """ if not isinstance(state_dict, Mapping): raise TypeError(f"Argument state_dict should be a dictionary, but given {type(state_dict)}") @@ -584,10 +584,10 @@ def simulate_values( # type: ignore[override] """Method to simulate scheduled values during num_events events. Args: - num_events (int): number of events during the simulation. - schedulers (list of ParamScheduler): list of parameter schedulers. - durations (list of int): list of number of events that lasts a parameter scheduler from schedulers. - param_names (list or tuple of str, optional): parameter name or list of parameter names to simulate values. + num_events: number of events during the simulation. + schedulers: list of parameter schedulers. + durations: list of number of events that lasts a parameter scheduler from schedulers. + param_names: parameter name or list of parameter names to simulate values. By default, the first scheduler's parameter name is taken. Returns: @@ -651,8 +651,8 @@ class LRScheduler(ParamScheduler): """A wrapper class to call `torch.optim.lr_scheduler` objects as `ignite` handlers. Args: - lr_scheduler (subclass of `torch.optim.lr_scheduler._LRScheduler`): lr_scheduler object to wrap. - save_history (bool, optional): whether to log the parameter values to + lr_scheduler: lr_scheduler object to wrap. + save_history: whether to log the parameter values to `engine.state.param_history`, (default=False). .. code-block:: python @@ -710,8 +710,8 @@ def simulate_values( # type: ignore[override] """Method to simulate scheduled values during num_events events. Args: - num_events (int): number of events during the simulation. - lr_scheduler (subclass of `torch.optim.lr_scheduler._LRScheduler`): lr_scheduler object to wrap. + num_events: number of events during the simulation. + lr_scheduler: lr_scheduler object to wrap. Returns: list of pairs: [event_index, value] @@ -761,15 +761,15 @@ def create_lr_scheduler_with_warmup( Helper method to create a learning rate scheduler with a linear warm-up. Args: - lr_scheduler (ParamScheduler or subclass of `torch.optim.lr_scheduler._LRScheduler`): learning rate scheduler + lr_scheduler: learning rate scheduler after the warm-up. - warmup_start_value (float): learning rate start value of the warm-up phase. - warmup_duration (int): warm-up phase duration, number of events. - warmup_end_value (float, optional): learning rate end value of the warm-up phase, (default=None). If None, + warmup_start_value: learning rate start value of the warm-up phase. + warmup_duration: warm-up phase duration, number of events. + warmup_end_value: learning rate end value of the warm-up phase, (default=None). If None, warmup_end_value is set to optimizer initial lr. - save_history (bool, optional): whether to log the parameter values to + save_history: whether to log the parameter values to `engine.state.param_history`, (default=False). - output_simulated_values (list, optional): optional output of simulated learning rate values. + output_simulated_values: optional output of simulated learning rate values. If output_simulated_values is a list of None, e.g. `[None] * 100`, after the execution it will be filled by 100 simulated learning rate values. @@ -876,14 +876,14 @@ class PiecewiseLinear(ParamScheduler): Piecewise linear parameter scheduler Args: - optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups`` + optimizer: torch optimizer or any object with attribute ``param_groups`` as a sequence. - param_name (str): name of optimizer's parameter to update. - milestones_values (list of tuples (int, float)): list of tuples (event index, parameter value) + param_name: name of optimizer's parameter to update. + milestones_values: list of tuples (event index, parameter value) represents milestones and parameter. Milestones should be increasing integers. - save_history (bool, optional): whether to log the parameter values to + save_history: whether to log the parameter values to `engine.state.param_history`, (default=False). - param_group_index (int, optional): optimizer's parameters group to use. + param_group_index: optimizer's parameters group to use. Returns: PiecewiseLinear: piecewise linear scheduler @@ -967,8 +967,8 @@ class ParamGroupScheduler: Scheduler helper to group multiple schedulers into one. Args: - schedulers (list/tuple of ParamScheduler): list/tuple of parameter schedulers. - names (list of str): list of names of schedulers. + schedulers: list/tuple of parameter schedulers. + names: list of names of schedulers. .. code-block:: python @@ -1055,7 +1055,7 @@ def load_state_dict(self, state_dict: Mapping) -> None: """Copies parameters from :attr:`state_dict` into this ParamScheduler. Args: - state_dict (dict): a dict containing parameters. + state_dict: a dict containing parameters. """ if not isinstance(state_dict, Mapping): raise TypeError(f"Argument state_dict should be a dictionary, but given {type(state_dict)}") @@ -1083,8 +1083,8 @@ def simulate_values(cls, num_events: int, schedulers: List[_LRScheduler], **kwar """Method to simulate scheduled values during num_events events. Args: - num_events (int): number of events during the simulation. - schedulers (subclass of `torch.optim.lr_scheduler._LRScheduler`): lr_scheduler object to wrap. + num_events: number of events during the simulation. + schedulers: lr_scheduler object to wrap. Returns: list of pairs: [event_index, value] diff --git a/ignite/contrib/handlers/polyaxon_logger.py b/ignite/contrib/handlers/polyaxon_logger.py index 62d3b4629c85..a57959b313bd 100644 --- a/ignite/contrib/handlers/polyaxon_logger.py +++ b/ignite/contrib/handlers/polyaxon_logger.py @@ -174,14 +174,14 @@ def global_step_transform(*args, **kwargs): ) Args: - tag (str): common title for all produced plots. For example, "training" - metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available + tag: common title for all produced plots. For example, "training" + metric_names: list of metric names to plot or a string "all" to plot all available metrics. - output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number. + output_transform: output transform function to prepare `engine.state.output` as a number. For example, `output_transform = lambda output: output` This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot with corresponding keys. - global_step_transform (callable, optional): global step transform function to output a desired global step. + global_step_transform: global step transform function to output a desired global step. Input of the function is `(engine, event_name)`. Output of function should be an integer. Default is None, global_step based on attached engine. If provided, uses function output as global_step. To setup global step from another engine, please use @@ -263,10 +263,10 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler): ) Args: - optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups`` + optimizer: torch optimizer or any object with attribute ``param_groups`` as a sequence. - param_name (str): parameter name - tag (str, optional): common title for all produced plots. For example, "generator" + param_name: parameter name + tag: common title for all produced plots. For example, "generator" """ def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None) -> None: diff --git a/ignite/contrib/handlers/stores.py b/ignite/contrib/handlers/stores.py index 67e1706fe672..4dc2db7c9c74 100644 --- a/ignite/contrib/handlers/stores.py +++ b/ignite/contrib/handlers/stores.py @@ -12,7 +12,7 @@ class EpochOutputStore: larger than available RAM. Args: - output_transform (callable, optional): a callable that is used to + output_transform: a callable that is used to transform the :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output , e.g., lambda x: x[0] diff --git a/ignite/contrib/handlers/tensorboard_logger.py b/ignite/contrib/handlers/tensorboard_logger.py index bf0cf35cd4c6..4539463f1f98 100644 --- a/ignite/contrib/handlers/tensorboard_logger.py +++ b/ignite/contrib/handlers/tensorboard_logger.py @@ -236,14 +236,14 @@ def global_step_transform(*args, **kwargs): ) Args: - tag (str): common title for all produced plots. For example, "training" - metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available + tag: common title for all produced plots. For example, "training" + metric_names: list of metric names to plot or a string "all" to plot all available metrics. - output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number. + output_transform: output transform function to prepare `engine.state.output` as a number. For example, `output_transform = lambda output: output` This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot with corresponding keys. - global_step_transform (callable, optional): global step transform function to output a desired global step. + global_step_transform: global step transform function to output a desired global step. Input of the function is `(engine, event_name)`. Output of function should be an integer. Default is None, global_step based on attached engine. If provided, uses function output as global_step. To setup global step from another engine, please use @@ -319,10 +319,10 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler): ) Args: - optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups`` + optimizer: torch optimizer or any object with attribute ``param_groups`` as a sequence. - param_name (str): parameter name - tag (str, optional): common title for all produced plots. For example, "generator" + param_name: parameter name + tag: common title for all produced plots. For example, "generator" """ def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None) -> None: @@ -365,9 +365,9 @@ class WeightsScalarHandler(BaseWeightsScalarHandler): ) Args: - model (torch.nn.Module): model to log weights - reduction (callable): function to reduce parameters into scalar - tag (str, optional): common title for all produced plots. For example, "generator" + model: model to log weights + reduction: function to reduce parameters into scalar + tag: common title for all produced plots. For example, "generator" """ @@ -411,8 +411,8 @@ class WeightsHistHandler(BaseWeightsHistHandler): ) Args: - model (torch.nn.Module): model to log weights - tag (str, optional): common title for all produced plots. For example, "generator" + model: model to log weights + tag: common title for all produced plots. For example, "generator" """ @@ -457,9 +457,9 @@ class GradsScalarHandler(BaseWeightsScalarHandler): ) Args: - model (torch.nn.Module): model to log weights - reduction (callable): function to reduce parameters into scalar - tag (str, optional): common title for all produced plots. For example, "generator" + model: model to log weights + reduction: function to reduce parameters into scalar + tag: common title for all produced plots. For example, "generator" """ @@ -502,8 +502,8 @@ class GradsHistHandler(BaseWeightsHistHandler): ) Args: - model (torch.nn.Module): model to log weights - tag (str, optional): common title for all produced plots. For example, "generator" + model: model to log weights + tag: common title for all produced plots. For example, "generator" """ diff --git a/ignite/contrib/handlers/time_profilers.py b/ignite/contrib/handlers/time_profilers.py index c081fcefb803..4cec5eba2ff5 100644 --- a/ignite/contrib/handlers/time_profilers.py +++ b/ignite/contrib/handlers/time_profilers.py @@ -268,6 +268,9 @@ def write_results(self, output_path: str) -> None: """ Method to store the unaggregated profiling results to a csv file + Args: + output_path: file output path containing a filename + .. code-block:: python profiler.write_results('path_to_dir/awesome_filename.csv') @@ -347,6 +350,9 @@ def print_results(results: Dict) -> str: """ Method to print the aggregated results from the profiler + Args: + results: the aggregated results from the profiler + .. code-block:: python profiler.print_results(results) @@ -566,6 +572,11 @@ def _as_first_started(self, engine: Engine) -> None: engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers) def attach(self, engine: Engine) -> None: + """Attach HandlersTimeProfiler to the given engine. + + Args: + engine: the instance of Engine to attach + """ if not isinstance(engine, Engine): raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}") @@ -628,6 +639,9 @@ def write_results(self, output_path: str) -> None: """ Method to store the unaggregated profiling results to a csv file + Args: + output_path: file output path containing a filename + .. code-block:: python profiler.write_results('path_to_dir/awesome_filename.csv') @@ -677,6 +691,9 @@ def print_results(results: List[List[Union[str, float]]]) -> None: """ Method to print the aggregated results from the profiler + Args: + results: the aggregated results from the profiler + .. code-block:: python profiler.print_results(results) diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py index 781b4a1c3cc0..206ee68edc0e 100644 --- a/ignite/contrib/handlers/tqdm_logger.py +++ b/ignite/contrib/handlers/tqdm_logger.py @@ -14,8 +14,8 @@ class ProgressBar(BaseLogger): TQDM progress bar handler to log training progress and computed metrics. Args: - persist (bool, optional): set to ``True`` to persist the progress bar after completion (default = ``False``) - bar_format (str, optional): Specify a custom bar string formatting. May impact performance. + persist: set to ``True`` to persist the progress bar after completion (default = ``False``) + bar_format : Specify a custom bar string formatting. May impact performance. [default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]']. Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where l_bar='{desc}: {percentage:3.0f}%|' and @@ -149,7 +149,7 @@ def log_message(self, message: str) -> None: Logs a message, preserving the progress bar correct output format. Args: - message (str): string you wish to log. + message: string you wish to log. """ from tqdm import tqdm @@ -167,10 +167,10 @@ def attach( # type: ignore[override] Attaches the progress bar to an engine object. Args: - engine (Engine): engine object. - metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available + engine: engine object. + metric_names: list of metric names to plot or a string "all" to plot all available metrics. - output_transform (callable, optional): a function to select what you want to print from the engine's + output_transform: a function to select what you want to print from the engine's output. This function may return either a dictionary with entries in the format of ``{name: value}``, or a single scalar, which will be displayed with the default name `output`. event_name: event's name on which the progress bar advances. Valid events are from @@ -217,10 +217,10 @@ class _OutputHandler(BaseOutputHandler): """Helper handler to log engine's output and/or metrics Args: - description (str): progress bar description. - metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available + description: progress bar description. + metric_names: list of metric names to plot or a string "all" to plot all available metrics. - output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number. + output_transform: output transform function to prepare `engine.state.output` as a number. For example, `output_transform = lambda output: output` This function can also return a dictionary, e.g `{'loss': loss1, 'another_loss': loss2}` to label the plot with corresponding keys. diff --git a/ignite/contrib/handlers/visdom_logger.py b/ignite/contrib/handlers/visdom_logger.py index bb114bed76ea..542a2141db46 100644 --- a/ignite/contrib/handlers/visdom_logger.py +++ b/ignite/contrib/handlers/visdom_logger.py @@ -38,9 +38,9 @@ class VisdomLogger(BaseLogger): pip install git+https://github.com/facebookresearch/visdom.git Args: - server (str, optional): visdom server URL. It can be also specified by environment variable `VISDOM_SERVER_URL` - port (int, optional): visdom server's port. It can be also specified by environment variable `VISDOM_PORT` - num_workers (int, optional): number of workers to use in `concurrent.futures.ThreadPoolExecutor` to post data to + server: visdom server URL. It can be also specified by environment variable `VISDOM_SERVER_URL` + port: visdom server's port. It can be also specified by environment variable `VISDOM_PORT` + num_workers: number of workers to use in `concurrent.futures.ThreadPoolExecutor` to post data to visdom server. Default, `num_workers=1`. If `num_workers=0` and logger uses the main thread. If using Python 2.7 and `num_workers>0` the package `futures` should be installed: `pip install futures` **kwargs: kwargs to pass into @@ -220,13 +220,13 @@ def add_scalar( Helper method to log a scalar with VisdomLogger. Args: - logger (VisdomLogger): visdom logger - k (str): scalar name which is used to set window title and y-axis label - v (int or float): scalar value, y-axis value + logger: visdom logger + k: scalar name which is used to set window title and y-axis label + v: scalar value, y-axis value event_name: Event name which is used to setup x-axis label. Valid events are from :class:`~ignite.engine.events.Events` or any `event_name` added by :meth:`~ignite.engine.engine.Engine.register_events`. - global_step (int): global step, x-axis value + global_step: global step, x-axis value """ if k not in self.windows: @@ -314,19 +314,19 @@ def global_step_transform(*args, **kwargs): ) Args: - tag (str): common title for all produced plots. For example, "training" - metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available + tag: common title for all produced plots. For example, "training" + metric_names: list of metric names to plot or a string "all" to plot all available metrics. - output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number. + output_transform: output transform function to prepare `engine.state.output` as a number. For example, `output_transform = lambda output: output` This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot with corresponding keys. - global_step_transform (callable, optional): global step transform function to output a desired global step. + global_step_transform: global step transform function to output a desired global step. Input of the function is `(engine, event_name)`. Output of function should be an integer. Default is None, global_step based on attached engine. If provided, uses function output as global_step. To setup global step from another engine, please use :meth:`~ignite.contrib.handlers.visdom_logger.global_step_from_engine`. - show_legend (bool, optional): flag to show legend in the window + show_legend: flag to show legend in the window Note: @@ -411,11 +411,11 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler, _BaseVisDrawer): ) Args: - optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups`` + optimizer: torch optimizer or any object with attribute ``param_groups`` as a sequence. - param_name (str): parameter name - tag (str, optional): common title for all produced plots. For example, "generator" - show_legend (bool, optional): flag to show legend in the window + param_name: parameter name + tag: common title for all produced plots. For example, "generator" + show_legend: flag to show legend in the window """ def __init__( @@ -463,10 +463,10 @@ class WeightsScalarHandler(BaseWeightsScalarHandler, _BaseVisDrawer): ) Args: - model (torch.nn.Module): model to log weights - reduction (callable): function to reduce parameters into scalar - tag (str, optional): common title for all produced plots. For example, "generator" - show_legend (bool, optional): flag to show legend in the window + model: model to log weights + reduction: function to reduce parameters into scalar + tag: common title for all produced plots. For example, "generator" + show_legend: flag to show legend in the window """ def __init__( @@ -513,10 +513,10 @@ class GradsScalarHandler(BaseWeightsScalarHandler, _BaseVisDrawer): ) Args: - model (torch.nn.Module): model to log weights - reduction (callable): function to reduce parameters into scalar - tag (str, optional): common title for all produced plots. For example, "generator" - show_legend (bool, optional): flag to show legend in the window + model: model to log weights + reduction: function to reduce parameters into scalar + tag: common title for all produced plots. For example, "generator" + show_legend: flag to show legend in the window """ diff --git a/ignite/contrib/handlers/wandb_logger.py b/ignite/contrib/handlers/wandb_logger.py index 4880523ee089..23d30f6f24e4 100644 --- a/ignite/contrib/handlers/wandb_logger.py +++ b/ignite/contrib/handlers/wandb_logger.py @@ -218,19 +218,19 @@ def global_step_transform(*args, **kwargs): ) Args: - tag (str): common title for all produced plots. For example, "training" - metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available + tag: common title for all produced plots. For example, "training" + metric_names: list of metric names to plot or a string "all" to plot all available metrics. - output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number. + output_transform: output transform function to prepare `engine.state.output` as a number. For example, `output_transform = lambda output: output` This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot with corresponding keys. - global_step_transform (callable, optional): global step transform function to output a desired global step. + global_step_transform: global step transform function to output a desired global step. Input of the function is `(engine, event_name)`. Output of function should be an integer. Default is None, global_step based on attached engine. If provided, uses function output as global_step. To setup global step from another engine, please use :meth:`~ignite.contrib.handlers.wandb_logger.global_step_from_engine`. - sync (bool, optional): If set to False, process calls to log in a seperate thread. Default (None) uses whatever + sync: If set to False, process calls to log in a seperate thread. Default (None) uses whatever the default value of wandb.log. Note: @@ -308,11 +308,11 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler): ) Args: - optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups`` + optimizer: torch optimizer or any object with attribute ``param_groups`` as a sequence. - param_name (str): parameter name - tag (str, optional): common title for all produced plots. For example, "generator" - sync (bool, optional): If set to False, process calls to log in a seperate thread. Default (None) uses whatever + param_name: parameter name + tag: common title for all produced plots. For example, "generator" + sync: If set to False, process calls to log in a seperate thread. Default (None) uses whatever the default value of wandb.log. """ From 302069ee27c21116892ecd3cbe09e3086fcf88d7 Mon Sep 17 00:00:00 2001 From: ydcjeff Date: Tue, 23 Feb 2021 00:59:37 +0630 Subject: [PATCH 05/11] docs: rm type hints in docstring of ignite.contrib.handlers --- ignite/contrib/handlers/base_logger.py | 2 +- ignite/contrib/handlers/lr_finder.py | 6 +++--- ignite/contrib/handlers/param_scheduler.py | 19 +++++++++++-------- ignite/contrib/handlers/polyaxon_logger.py | 4 ++-- ignite/contrib/handlers/tensorboard_logger.py | 4 ++-- ignite/contrib/handlers/tqdm_logger.py | 2 +- ignite/contrib/handlers/visdom_logger.py | 2 +- ignite/contrib/handlers/wandb_logger.py | 4 ++-- 8 files changed, 23 insertions(+), 20 deletions(-) diff --git a/ignite/contrib/handlers/base_logger.py b/ignite/contrib/handlers/base_logger.py index 637be7af2208..a356948ef8ef 100644 --- a/ignite/contrib/handlers/base_logger.py +++ b/ignite/contrib/handlers/base_logger.py @@ -155,7 +155,7 @@ def attach( engine: engine object. log_handler: a logging handler to execute event_name: event to attach the logging handler to. Valid events are from - :class:`~ignite.engine.events.Events` or class:`~ignite.engine.events.EventsList` or any `event_name` + :class:`~ignite.engine.events.Events` or :class:`~ignite.engine.events.EventsList` or any `event_name` added by :meth:`~ignite.engine.engine.Engine.register_events`. Returns: diff --git a/ignite/contrib/handlers/lr_finder.py b/ignite/contrib/handlers/lr_finder.py index fdcf8b4f6eab..95a17bdbb781 100644 --- a/ignite/contrib/handlers/lr_finder.py +++ b/ignite/contrib/handlers/lr_finder.py @@ -289,11 +289,11 @@ def attach( diverge_th: Used for stopping the search when `current loss > diverge_th * best_loss`. Default, 5.0. + Returns: + trainer_with_lr_finder (trainer used for finding the lr) + Note: lr_finder cannot be attached to more than one trainer at a time. - - Returns: - trainer_with_lr_finder: trainer used for finding the lr """ if not isinstance(to_save, Mapping): raise TypeError(f"Argument to_save should be a mapping, but given {type(to_save)}") diff --git a/ignite/contrib/handlers/param_scheduler.py b/ignite/contrib/handlers/param_scheduler.py index 8fc92f812a9f..2cdd127d9b93 100644 --- a/ignite/contrib/handlers/param_scheduler.py +++ b/ignite/contrib/handlers/param_scheduler.py @@ -152,10 +152,10 @@ def simulate_values(cls, num_events: int, **scheduler_kwargs: Any) -> List[List[ Args: num_events: number of events during the simulation. - **scheduler_kwargs : parameter scheduler configuration kwargs. + scheduler_kwargs: parameter scheduler configuration kwargs. Returns: - list of pairs: [event_index, value] + event_index, value Examples: @@ -194,7 +194,7 @@ def plot_values(cls, num_events: int, **scheduler_kwargs: Mapping) -> Any: Args: num_events: number of events during the simulation. - **scheduler_kwargs : parameter scheduler configuration kwargs. + scheduler_kwargs : parameter scheduler configuration kwargs. Returns: matplotlib.lines.Line2D @@ -589,6 +589,7 @@ def simulate_values( # type: ignore[override] durations: list of number of events that lasts a parameter scheduler from schedulers. param_names: parameter name or list of parameter names to simulate values. By default, the first scheduler's parameter name is taken. + kwargs: Returns: list of [event_index, value_0, value_1, ...], where values correspond to `param_names`. @@ -714,7 +715,7 @@ def simulate_values( # type: ignore[override] lr_scheduler: lr_scheduler object to wrap. Returns: - list of pairs: [event_index, value] + event_index, value """ @@ -774,7 +775,7 @@ def create_lr_scheduler_with_warmup( by 100 simulated learning rate values. Returns: - ConcatScheduler: learning rate scheduler with linear warm-up. + learning rate scheduler with linear warm-up. Note: If the first learning rate value provided by `lr_scheduler` is different from `warmup_end_value`, an additional @@ -886,7 +887,7 @@ class PiecewiseLinear(ParamScheduler): param_group_index: optimizer's parameters group to use. Returns: - PiecewiseLinear: piecewise linear scheduler + piecewise linear scheduler instance .. code-block:: python @@ -909,7 +910,7 @@ def __init__( milestones_values: List[Tuple[int, float]], save_history: bool = False, param_group_index: Optional[int] = None, - ): + ) -> None: super(PiecewiseLinear, self).__init__(optimizer, param_name, save_history, param_group_index=param_group_index) if not isinstance(milestones_values, Sequence): @@ -969,6 +970,7 @@ class ParamGroupScheduler: Args: schedulers: list/tuple of parameter schedulers. names: list of names of schedulers. + save_history: whether to save history or not. .. code-block:: python @@ -1085,9 +1087,10 @@ def simulate_values(cls, num_events: int, schedulers: List[_LRScheduler], **kwar Args: num_events: number of events during the simulation. schedulers: lr_scheduler object to wrap. + kwargs: Returns: - list of pairs: [event_index, value] + event_index, value """ diff --git a/ignite/contrib/handlers/polyaxon_logger.py b/ignite/contrib/handlers/polyaxon_logger.py index a57959b313bd..2c6699edf468 100644 --- a/ignite/contrib/handlers/polyaxon_logger.py +++ b/ignite/contrib/handlers/polyaxon_logger.py @@ -84,9 +84,9 @@ class PolyaxonLogger(BaseLogger): ) Args: - *args: Positional arguments accepted from + args: Positional arguments accepted from `Experiment `_. - **kwargs: Keyword arguments accepted from + kwargs: Keyword arguments accepted from `Experiment `_. """ diff --git a/ignite/contrib/handlers/tensorboard_logger.py b/ignite/contrib/handlers/tensorboard_logger.py index 4539463f1f98..2a907f8dc4c4 100644 --- a/ignite/contrib/handlers/tensorboard_logger.py +++ b/ignite/contrib/handlers/tensorboard_logger.py @@ -44,10 +44,10 @@ class TensorboardLogger(BaseLogger): (>=v1.2.0). Args: - *args: Positional arguments accepted from + args: Positional arguments accepted from `SummaryWriter `_. - **kwargs: Keyword arguments accepted from + kwargs: Keyword arguments accepted from `SummaryWriter `_. For example, `log_dir` to setup path to the directory where to log. diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py index 206ee68edc0e..b4c71be2c669 100644 --- a/ignite/contrib/handlers/tqdm_logger.py +++ b/ignite/contrib/handlers/tqdm_logger.py @@ -21,7 +21,7 @@ class ProgressBar(BaseLogger): l_bar='{desc}: {percentage:3.0f}%|' and r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the formatting, see `tqdm docs `_. - **tqdm_kwargs: kwargs passed to tqdm progress bar. + tqdm_kwargs: kwargs passed to tqdm progress bar. By default, progress bar description displays "Epoch [5/10]" where 5 is the current epoch and 10 is the number of epochs; however, if ``max_epochs`` are set to 1, the progress bar instead displays "Iteration: [5/10]". If tqdm_kwargs defines `desc`, e.g. "Predictions", than the description is diff --git a/ignite/contrib/handlers/visdom_logger.py b/ignite/contrib/handlers/visdom_logger.py index 542a2141db46..f8ae3dd56907 100644 --- a/ignite/contrib/handlers/visdom_logger.py +++ b/ignite/contrib/handlers/visdom_logger.py @@ -43,7 +43,7 @@ class VisdomLogger(BaseLogger): num_workers: number of workers to use in `concurrent.futures.ThreadPoolExecutor` to post data to visdom server. Default, `num_workers=1`. If `num_workers=0` and logger uses the main thread. If using Python 2.7 and `num_workers>0` the package `futures` should be installed: `pip install futures` - **kwargs: kwargs to pass into + kwargs: kwargs to pass into `visdom.Visdom `_. Note: diff --git a/ignite/contrib/handlers/wandb_logger.py b/ignite/contrib/handlers/wandb_logger.py index 23d30f6f24e4..1d757bee16bf 100644 --- a/ignite/contrib/handlers/wandb_logger.py +++ b/ignite/contrib/handlers/wandb_logger.py @@ -21,8 +21,8 @@ class WandBLogger(BaseLogger): this wrapper. See examples on how to save model parameters and gradients. Args: - *args: Positional arguments accepted by `wandb.init`. - **kwargs: Keyword arguments accepted by `wandb.init`. + args: Positional arguments accepted by `wandb.init`. + kwargs: Keyword arguments accepted by `wandb.init`. Please see `wandb.init `_ for documentation of possible parameters. Examples: From 8e6f8f54329a460555034d0f4aeb989dcc7b8cb4 Mon Sep 17 00:00:00 2001 From: Jeff Yang Date: Tue, 23 Feb 2021 11:52:02 +0630 Subject: [PATCH 06/11] Apply suggestions from code review Co-authored-by: vfdev --- ignite/contrib/handlers/param_scheduler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ignite/contrib/handlers/param_scheduler.py b/ignite/contrib/handlers/param_scheduler.py index 2cdd127d9b93..879f0dd10098 100644 --- a/ignite/contrib/handlers/param_scheduler.py +++ b/ignite/contrib/handlers/param_scheduler.py @@ -194,7 +194,7 @@ def plot_values(cls, num_events: int, **scheduler_kwargs: Mapping) -> Any: Args: num_events: number of events during the simulation. - scheduler_kwargs : parameter scheduler configuration kwargs. + scheduler_kwargs: parameter scheduler configuration kwargs. Returns: matplotlib.lines.Line2D @@ -1087,7 +1087,7 @@ def simulate_values(cls, num_events: int, schedulers: List[_LRScheduler], **kwar Args: num_events: number of events during the simulation. schedulers: lr_scheduler object to wrap. - kwargs: + kwargs: kwargs passed to construct an instance of :class:`ignite.contrib.handlers.ParamGroupScheduler`. Returns: event_index, value From 9307350e6e225fc9c6879137027c8603bf0cce82 Mon Sep 17 00:00:00 2001 From: ydcjeff Date: Tue, 23 Feb 2021 11:54:08 +0630 Subject: [PATCH 07/11] review: apply suggestions --- ignite/contrib/handlers/param_scheduler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ignite/contrib/handlers/param_scheduler.py b/ignite/contrib/handlers/param_scheduler.py index 879f0dd10098..861afd49fb08 100644 --- a/ignite/contrib/handlers/param_scheduler.py +++ b/ignite/contrib/handlers/param_scheduler.py @@ -775,7 +775,7 @@ def create_lr_scheduler_with_warmup( by 100 simulated learning rate values. Returns: - learning rate scheduler with linear warm-up. + ConcatScheduler Note: If the first learning rate value provided by `lr_scheduler` is different from `warmup_end_value`, an additional @@ -887,7 +887,7 @@ class PiecewiseLinear(ParamScheduler): param_group_index: optimizer's parameters group to use. Returns: - piecewise linear scheduler instance + PiecewiseLinear .. code-block:: python From 6040f9315b6a0aa3e7571ba3609d739c619063aa Mon Sep 17 00:00:00 2001 From: ydcjeff Date: Tue, 23 Feb 2021 12:41:58 +0630 Subject: [PATCH 08/11] fix: no return in __init__ --- ignite/contrib/handlers/base_logger.py | 8 ++++---- ignite/contrib/handlers/clearml_logger.py | 16 ++++++++-------- ignite/contrib/handlers/lr_finder.py | 4 ++-- ignite/contrib/handlers/mlflow_logger.py | 4 ++-- ignite/contrib/handlers/neptune_logger.py | 8 ++++---- ignite/contrib/handlers/param_scheduler.py | 15 ++++++--------- ignite/contrib/handlers/polyaxon_logger.py | 6 +++--- ignite/contrib/handlers/stores.py | 2 +- ignite/contrib/handlers/tensorboard_logger.py | 12 ++++++------ ignite/contrib/handlers/time_profilers.py | 4 ++-- ignite/contrib/handlers/tqdm_logger.py | 4 ++-- ignite/contrib/handlers/visdom_logger.py | 10 +++++----- ignite/contrib/handlers/wandb_logger.py | 6 +++--- 13 files changed, 48 insertions(+), 51 deletions(-) diff --git a/ignite/contrib/handlers/base_logger.py b/ignite/contrib/handlers/base_logger.py index a356948ef8ef..407ced050b4c 100644 --- a/ignite/contrib/handlers/base_logger.py +++ b/ignite/contrib/handlers/base_logger.py @@ -107,7 +107,7 @@ class BaseWeightsScalarHandler(BaseHandler): Helper handler to log model's weights as scalars. """ - def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None) -> None: + def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None): if not isinstance(model, torch.nn.Module): raise TypeError(f"Argument model should be of type torch.nn.Module, but given {type(model)}") @@ -159,7 +159,7 @@ def attach( added by :meth:`~ignite.engine.engine.Engine.register_events`. Returns: - :class:`~ignite.engine.RemovableEventHandle`, which can be used to remove the handler. + :class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler. """ if isinstance(event_name, EventsList): for name in event_name: @@ -188,7 +188,7 @@ def attach_output_handler(self, engine: Engine, event_name: Any, *args: Any, **k kwargs: kwargs to initialize `OutputHandler` Returns: - :class:`~ignite.engine.RemovableEventHandle`, which can be used to remove the handler. + :class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler. """ return self.attach(engine, self._create_output_handler(*args, **kwargs), event_name=event_name) @@ -206,7 +206,7 @@ def attach_opt_params_handler( kwargs: kwargs to initialize `OptimizerParamsHandler` Returns: - :class:`~ignite.engine.RemovableEventHandle`, which can be used to remove the handler. + :class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler. .. versionchanged:: 0.4.3 Added missing return statement. diff --git a/ignite/contrib/handlers/clearml_logger.py b/ignite/contrib/handlers/clearml_logger.py index 50965e5e8ae0..f67dec5ba092 100644 --- a/ignite/contrib/handlers/clearml_logger.py +++ b/ignite/contrib/handlers/clearml_logger.py @@ -119,7 +119,7 @@ class ClearMLLogger(BaseLogger): """ - def __init__(self, *_: Any, **kwargs: Any) -> None: + def __init__(self, *_: Any, **kwargs: Any): try: from clearml import Task from clearml.binding.frameworks.tensorflow_bind import WeightsGradientHistHelper @@ -299,7 +299,7 @@ def __init__( metric_names: Optional[List[str]] = None, output_transform: Optional[Callable] = None, global_step_transform: Optional[Callable] = None, - ) -> None: + ): super(OutputHandler, self).__init__(tag, metric_names, output_transform, global_step_transform) def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None: @@ -365,7 +365,7 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler): tag: common title for all produced plots. For example, "generator" """ - def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None) -> None: + def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None): super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag) def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None: @@ -416,7 +416,7 @@ class WeightsScalarHandler(BaseWeightsScalarHandler): """ - def __init__(self, model: Module, reduction: Callable = torch.norm, tag: Optional[str] = None) -> None: + def __init__(self, model: Module, reduction: Callable = torch.norm, tag: Optional[str] = None): super(WeightsScalarHandler, self).__init__(model, reduction, tag=tag) def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None: @@ -468,7 +468,7 @@ class WeightsHistHandler(BaseWeightsHistHandler): """ - def __init__(self, model: Module, tag: Optional[str] = None) -> None: + def __init__(self, model: Module, tag: Optional[str] = None): super(WeightsHistHandler, self).__init__(model, tag=tag) def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None: @@ -523,7 +523,7 @@ class GradsScalarHandler(BaseWeightsScalarHandler): """ - def __init__(self, model: Module, reduction: Callable = torch.norm, tag: Optional[str] = None) -> None: + def __init__(self, model: Module, reduction: Callable = torch.norm, tag: Optional[str] = None): super(GradsScalarHandler, self).__init__(model, reduction, tag=tag) def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None: @@ -574,7 +574,7 @@ class GradsHistHandler(BaseWeightsHistHandler): """ - def __init__(self, model: Module, tag: Optional[str] = None) -> None: + def __init__(self, model: Module, tag: Optional[str] = None): super(GradsHistHandler, self).__init__(model, tag=tag) def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None: @@ -645,7 +645,7 @@ def __init__( dirname: Optional[str] = None, *args: Any, **kwargs: Any, - ) -> None: + ): self._setup_check_clearml(logger, output_uri) diff --git a/ignite/contrib/handlers/lr_finder.py b/ignite/contrib/handlers/lr_finder.py index 95a17bdbb781..5a4f07700bd6 100644 --- a/ignite/contrib/handlers/lr_finder.py +++ b/ignite/contrib/handlers/lr_finder.py @@ -71,7 +71,7 @@ class FastaiLRFinder: fastai/lr_find: https://github.com/fastai/fastai """ - def __init__(self) -> None: + def __init__(self): self._diverge_flag = False self._history = {} # type: Dict[str, List[Any]] self._best_loss = None @@ -372,7 +372,7 @@ class _ExponentialLR(_LRScheduler): """ - def __init__(self, optimizer: Optimizer, end_lr: float, num_iter: int, last_epoch: int = -1) -> None: + def __init__(self, optimizer: Optimizer, end_lr: float, num_iter: int, last_epoch: int = -1): self.end_lr = end_lr self.num_iter = num_iter super(_ExponentialLR, self).__init__(optimizer, last_epoch) diff --git a/ignite/contrib/handlers/mlflow_logger.py b/ignite/contrib/handlers/mlflow_logger.py index 89a047b47a93..3b7602bee2a1 100644 --- a/ignite/contrib/handlers/mlflow_logger.py +++ b/ignite/contrib/handlers/mlflow_logger.py @@ -86,7 +86,7 @@ class MLflowLogger(BaseLogger): ) """ - def __init__(self, tracking_uri: Optional[str] = None) -> None: + def __init__(self, tracking_uri: Optional[str] = None): try: import mlflow except ImportError: @@ -290,7 +290,7 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler): tag: common title for all produced plots. For example, 'generator' """ - def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None) -> None: + def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None): super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag) def __call__(self, engine: Engine, logger: MLflowLogger, event_name: Union[str, Events]) -> None: diff --git a/ignite/contrib/handlers/neptune_logger.py b/ignite/contrib/handlers/neptune_logger.py index e44eb8fe3c31..f5fce066e9a6 100644 --- a/ignite/contrib/handlers/neptune_logger.py +++ b/ignite/contrib/handlers/neptune_logger.py @@ -323,7 +323,7 @@ def __init__( metric_names: Optional[Union[str, List[str]]] = None, output_transform: Optional[Callable] = None, global_step_transform: Optional[Callable] = None, - ) -> None: + ): super(OutputHandler, self).__init__(tag, metric_names, output_transform, global_step_transform) def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str, Events]) -> None: @@ -391,7 +391,7 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler): tag: common title for all produced plots. For example, "generator" """ - def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None) -> None: + def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None): super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag) def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str, Events]) -> None: @@ -445,7 +445,7 @@ class WeightsScalarHandler(BaseWeightsScalarHandler): """ - def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None) -> None: + def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None): super(WeightsScalarHandler, self).__init__(model, reduction, tag=tag) def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str, Events]) -> None: @@ -501,7 +501,7 @@ class GradsScalarHandler(BaseWeightsScalarHandler): """ - def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None) -> None: + def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None): super(GradsScalarHandler, self).__init__(model, reduction, tag=tag) def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str, Events]) -> None: diff --git a/ignite/contrib/handlers/param_scheduler.py b/ignite/contrib/handlers/param_scheduler.py index 861afd49fb08..e92489458be0 100644 --- a/ignite/contrib/handlers/param_scheduler.py +++ b/ignite/contrib/handlers/param_scheduler.py @@ -40,7 +40,7 @@ def __init__( param_name: str, save_history: bool = False, param_group_index: Optional[int] = None, - ) -> None: + ): if not ( isinstance(optimizer, Optimizer) @@ -444,7 +444,7 @@ class ConcatScheduler(ParamScheduler): """ - def __init__(self, schedulers: List[ParamScheduler], durations: List[int], save_history: bool = False) -> None: + def __init__(self, schedulers: List[ParamScheduler], durations: List[int], save_history: bool = False): if not isinstance(schedulers, Sequence): raise TypeError(f"Argument schedulers should be a sequence, but given {schedulers}") @@ -672,7 +672,7 @@ class LRScheduler(ParamScheduler): trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler) """ - def __init__(self, lr_scheduler: _LRScheduler, save_history: bool = False) -> None: + def __init__(self, lr_scheduler: _LRScheduler, save_history: bool = False): if not isinstance(lr_scheduler, _LRScheduler): raise TypeError( @@ -886,10 +886,6 @@ class PiecewiseLinear(ParamScheduler): `engine.state.param_history`, (default=False). param_group_index: optimizer's parameters group to use. - Returns: - PiecewiseLinear - - .. code-block:: python scheduler = PiecewiseLinear(optimizer, "lr", @@ -910,7 +906,7 @@ def __init__( milestones_values: List[Tuple[int, float]], save_history: bool = False, param_group_index: Optional[int] = None, - ) -> None: + ): super(PiecewiseLinear, self).__init__(optimizer, param_name, save_history, param_group_index=param_group_index) if not isinstance(milestones_values, Sequence): @@ -1087,7 +1083,8 @@ def simulate_values(cls, num_events: int, schedulers: List[_LRScheduler], **kwar Args: num_events: number of events during the simulation. schedulers: lr_scheduler object to wrap. - kwargs: kwargs passed to construct an instance of :class:`ignite.contrib.handlers.ParamGroupScheduler`. + kwargs: kwargs passed to construct an instance of + :class:`ignite.contrib.handlers.param_scheduler.ParamGroupScheduler`. Returns: event_index, value diff --git a/ignite/contrib/handlers/polyaxon_logger.py b/ignite/contrib/handlers/polyaxon_logger.py index 2c6699edf468..dc2df4183650 100644 --- a/ignite/contrib/handlers/polyaxon_logger.py +++ b/ignite/contrib/handlers/polyaxon_logger.py @@ -91,7 +91,7 @@ class PolyaxonLogger(BaseLogger): """ - def __init__(self, *args: Any, **kwargs: Any) -> None: + def __init__(self, *args: Any, **kwargs: Any): try: from polyaxon_client.tracking import Experiment except ImportError: @@ -204,7 +204,7 @@ def __init__( metric_names: Optional[List[str]] = None, output_transform: Optional[Callable] = None, global_step_transform: Optional[Callable] = None, - ) -> None: + ): super(OutputHandler, self).__init__(tag, metric_names, output_transform, global_step_transform) def __call__(self, engine: Engine, logger: PolyaxonLogger, event_name: Union[str, Events]) -> None: @@ -269,7 +269,7 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler): tag: common title for all produced plots. For example, "generator" """ - def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None) -> None: + def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None): super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag) def __call__(self, engine: Engine, logger: PolyaxonLogger, event_name: Union[str, Events]) -> None: diff --git a/ignite/contrib/handlers/stores.py b/ignite/contrib/handlers/stores.py index 4dc2db7c9c74..2c35aa5480ff 100644 --- a/ignite/contrib/handlers/stores.py +++ b/ignite/contrib/handlers/stores.py @@ -32,7 +32,7 @@ def log_training_results(engine): .. versionadded:: 0.4.2 """ - def __init__(self, output_transform: Callable = lambda x: x) -> None: + def __init__(self, output_transform: Callable = lambda x: x): self.data = [] # type: List[Union[int, Tuple[int, int]]] self.output_transform = output_transform diff --git a/ignite/contrib/handlers/tensorboard_logger.py b/ignite/contrib/handlers/tensorboard_logger.py index 2a907f8dc4c4..75d947370310 100644 --- a/ignite/contrib/handlers/tensorboard_logger.py +++ b/ignite/contrib/handlers/tensorboard_logger.py @@ -149,7 +149,7 @@ class TensorboardLogger(BaseLogger): """ - def __init__(self, *args: Any, **kwargs: Any) -> None: + def __init__(self, *args: Any, **kwargs: Any): try: from tensorboardX import SummaryWriter except ImportError: @@ -266,7 +266,7 @@ def __init__( metric_names: Optional[List[str]] = None, output_transform: Optional[Callable] = None, global_step_transform: Optional[Callable] = None, - ) -> None: + ): super(OutputHandler, self).__init__(tag, metric_names, output_transform, global_step_transform) def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, EventEnum]) -> None: @@ -325,7 +325,7 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler): tag: common title for all produced plots. For example, "generator" """ - def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None) -> None: + def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None): super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag) def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None: @@ -371,7 +371,7 @@ class WeightsScalarHandler(BaseWeightsScalarHandler): """ - def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None) -> None: + def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None): super(WeightsScalarHandler, self).__init__(model, reduction, tag=tag) def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None: @@ -463,7 +463,7 @@ class GradsScalarHandler(BaseWeightsScalarHandler): """ - def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None) -> None: + def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None): super(GradsScalarHandler, self).__init__(model, reduction, tag=tag) def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None: @@ -507,7 +507,7 @@ class GradsHistHandler(BaseWeightsHistHandler): """ - def __init__(self, model: nn.Module, tag: Optional[str] = None) -> None: + def __init__(self, model: nn.Module, tag: Optional[str] = None): super(GradsHistHandler, self).__init__(model, tag=tag) def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None: diff --git a/ignite/contrib/handlers/time_profilers.py b/ignite/contrib/handlers/time_profilers.py index 4cec5eba2ff5..f70c4932cb7e 100644 --- a/ignite/contrib/handlers/time_profilers.py +++ b/ignite/contrib/handlers/time_profilers.py @@ -42,7 +42,7 @@ def log_intermediate_results(): Events.DATALOADER_STOP_ITERATION, ] - def __init__(self) -> None: + def __init__(self): self._dataflow_timer = Timer() self._processing_timer = Timer() self._event_handlers_timer = Timer() @@ -483,7 +483,7 @@ def log_intermediate_results(): EVENT_FILTER_THESHOLD_TIME = 0.0001 - def __init__(self) -> None: + def __init__(self): self._dataflow_timer = Timer() self._processing_timer = Timer() self._event_handlers_timer = Timer() diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py index b4c71be2c669..d52280f7216c 100644 --- a/ignite/contrib/handlers/tqdm_logger.py +++ b/ignite/contrib/handlers/tqdm_logger.py @@ -105,7 +105,7 @@ def __init__( persist: bool = False, bar_format: str = "{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]", **tqdm_kwargs: Any, - ) -> None: + ): try: from tqdm.autonotebook import tqdm @@ -236,7 +236,7 @@ def __init__( metric_names: Optional[Union[str, List[str]]] = None, output_transform: Optional[Callable] = None, closing_event_name: Union[Events, CallableEventWithFilter] = Events.EPOCH_COMPLETED, - ) -> None: + ): if metric_names is None and output_transform is None: # This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler metric_names = [] diff --git a/ignite/contrib/handlers/visdom_logger.py b/ignite/contrib/handlers/visdom_logger.py index f8ae3dd56907..f580a18a24a4 100644 --- a/ignite/contrib/handlers/visdom_logger.py +++ b/ignite/contrib/handlers/visdom_logger.py @@ -209,7 +209,7 @@ def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> "OptimizerPar class _BaseVisDrawer: - def __init__(self, show_legend: bool = False) -> None: + def __init__(self, show_legend: bool = False): self.windows = {} # type: Dict[str, Any] self.show_legend = show_legend @@ -346,7 +346,7 @@ def __init__( output_transform: Optional[Callable] = None, global_step_transform: Optional[Callable] = None, show_legend: bool = False, - ) -> None: + ): super(OutputHandler, self).__init__(tag, metric_names, output_transform, global_step_transform) _BaseVisDrawer.__init__(self, show_legend=show_legend) @@ -420,7 +420,7 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler, _BaseVisDrawer): def __init__( self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None, show_legend: bool = False, - ) -> None: + ): super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag) _BaseVisDrawer.__init__(self, show_legend=show_legend) @@ -471,7 +471,7 @@ class WeightsScalarHandler(BaseWeightsScalarHandler, _BaseVisDrawer): def __init__( self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None, show_legend: bool = False, - ) -> None: + ): super(WeightsScalarHandler, self).__init__(model, reduction, tag=tag) _BaseVisDrawer.__init__(self, show_legend=show_legend) @@ -522,7 +522,7 @@ class GradsScalarHandler(BaseWeightsScalarHandler, _BaseVisDrawer): def __init__( self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None, show_legend: bool = False, - ) -> None: + ): super(GradsScalarHandler, self).__init__(model, reduction, tag) _BaseVisDrawer.__init__(self, show_legend=show_legend) diff --git a/ignite/contrib/handlers/wandb_logger.py b/ignite/contrib/handlers/wandb_logger.py index 1d757bee16bf..9b55e827a3c4 100644 --- a/ignite/contrib/handlers/wandb_logger.py +++ b/ignite/contrib/handlers/wandb_logger.py @@ -116,7 +116,7 @@ def score_function(engine): evaluator.add_event_handler(Events.COMPLETED, model_checkpoint, {'model': model}) """ - def __init__(self, *args: Any, **kwargs: Any) -> None: + def __init__(self, *args: Any, **kwargs: Any): try: import wandb @@ -251,7 +251,7 @@ def __init__( output_transform: Optional[Callable] = None, global_step_transform: Optional[Callable] = None, sync: Optional[bool] = None, - ) -> None: + ): super().__init__(tag, metric_names, output_transform, global_step_transform) self.sync = sync @@ -318,7 +318,7 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler): def __init__( self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None, sync: Optional[bool] = None, - ) -> None: + ): super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag) self.sync = sync From e209cc11dd56d4a7597db3cdd06b6bbbd318ae5e Mon Sep 17 00:00:00 2001 From: ydcjeff Date: Tue, 23 Feb 2021 12:47:34 +0630 Subject: [PATCH 09/11] fix: return None in no argument __init__ --- ignite/contrib/handlers/lr_finder.py | 2 +- ignite/contrib/handlers/time_profilers.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ignite/contrib/handlers/lr_finder.py b/ignite/contrib/handlers/lr_finder.py index 5a4f07700bd6..b319ffaabd49 100644 --- a/ignite/contrib/handlers/lr_finder.py +++ b/ignite/contrib/handlers/lr_finder.py @@ -71,7 +71,7 @@ class FastaiLRFinder: fastai/lr_find: https://github.com/fastai/fastai """ - def __init__(self): + def __init__(self) -> None: self._diverge_flag = False self._history = {} # type: Dict[str, List[Any]] self._best_loss = None diff --git a/ignite/contrib/handlers/time_profilers.py b/ignite/contrib/handlers/time_profilers.py index f70c4932cb7e..4cec5eba2ff5 100644 --- a/ignite/contrib/handlers/time_profilers.py +++ b/ignite/contrib/handlers/time_profilers.py @@ -42,7 +42,7 @@ def log_intermediate_results(): Events.DATALOADER_STOP_ITERATION, ] - def __init__(self): + def __init__(self) -> None: self._dataflow_timer = Timer() self._processing_timer = Timer() self._event_handlers_timer = Timer() @@ -483,7 +483,7 @@ def log_intermediate_results(): EVENT_FILTER_THESHOLD_TIME = 0.0001 - def __init__(self): + def __init__(self) -> None: self._dataflow_timer = Timer() self._processing_timer = Timer() self._event_handlers_timer = Timer() From 5695a7a26e90b9993cd55bbfd982dc8ea5097c47 Mon Sep 17 00:00:00 2001 From: ydcjeff Date: Tue, 23 Feb 2021 18:24:49 +0630 Subject: [PATCH 10/11] remove kwargs in ConcatScheduler.simulate_values --- ignite/contrib/handlers/param_scheduler.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ignite/contrib/handlers/param_scheduler.py b/ignite/contrib/handlers/param_scheduler.py index e92489458be0..56bc7c7a8a72 100644 --- a/ignite/contrib/handlers/param_scheduler.py +++ b/ignite/contrib/handlers/param_scheduler.py @@ -579,7 +579,6 @@ def simulate_values( # type: ignore[override] schedulers: List[ParamScheduler], durations: List[int], param_names: Optional[Union[List[str], Tuple[str]]] = None, - **kwargs: Any, ) -> List[List[int]]: """Method to simulate scheduled values during num_events events. @@ -589,7 +588,6 @@ def simulate_values( # type: ignore[override] durations: list of number of events that lasts a parameter scheduler from schedulers. param_names: parameter name or list of parameter names to simulate values. By default, the first scheduler's parameter name is taken. - kwargs: Returns: list of [event_index, value_0, value_1, ...], where values correspond to `param_names`. @@ -628,7 +626,7 @@ def simulate_values( # type: ignore[override] output = [] scheduler = cls( # type: ignore[call-arg] - schedulers=schedulers, save_history=False, durations=durations, **kwargs + schedulers=schedulers, save_history=False, durations=durations ) if param_names is None: param_names = [scheduler.param_name] From 5aa6db9ddf5701a6cf8dcb81b23eca672ea1a617 Mon Sep 17 00:00:00 2001 From: ydcjeff Date: Tue, 23 Feb 2021 19:30:00 +0630 Subject: [PATCH 11/11] fix: remove unused mypy ignore --- ignite/contrib/handlers/param_scheduler.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ignite/contrib/handlers/param_scheduler.py b/ignite/contrib/handlers/param_scheduler.py index 56bc7c7a8a72..61f8ba36ffcb 100644 --- a/ignite/contrib/handlers/param_scheduler.py +++ b/ignite/contrib/handlers/param_scheduler.py @@ -625,9 +625,7 @@ def simulate_values( # type: ignore[override] s.save_history = False output = [] - scheduler = cls( # type: ignore[call-arg] - schedulers=schedulers, save_history=False, durations=durations - ) + scheduler = cls(schedulers=schedulers, save_history=False, durations=durations) if param_names is None: param_names = [scheduler.param_name] for i in range(num_events):