diff --git a/monai/data/dataset.py b/monai/data/dataset.py index 474684c4e2..debf9c6aa4 100644 --- a/monai/data/dataset.py +++ b/monai/data/dataset.py @@ -589,8 +589,9 @@ def __init__( If num_workers is None then the number returned by os.cpu_count() is used. progress: whether to display a progress bar. copy_cache: whether to `deepcopy` the cache content before applying the random transforms, - default to `True`. if the random transforms don't modify the cache content - or every cache item is only used once in a `multi-processing` environment, + default to `True`. if the random transforms don't modify the cached content + (for example, randomly crop from the cached image and deepcopy the crop region) + or if every cache item is only used once in a `multi-processing` environment, may set `copy=False` for better performance. """ if not isinstance(transform, Compose): diff --git a/monai/handlers/classification_saver.py b/monai/handlers/classification_saver.py index 815be87754..4481ae0fec 100644 --- a/monai/handlers/classification_saver.py +++ b/monai/handlers/classification_saver.py @@ -55,9 +55,15 @@ def __init__( batch_transform: a callable that is used to extract the `meta_data` dictionary of the input images from `ignite.engine.state.batch`. the purpose is to get the input filenames from the `meta_data` and store with classification results together. + `engine.state` and `batch_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. output_transform: a callable that is used to extract the model prediction data from `ignite.engine.state.output`. the first dimension of its output will be treated as the batch dimension. each item in the batch will be saved individually. + `engine.state` and `output_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. name: identifier of logging.logger to use, defaulting to `engine.logger`. save_rank: only the handler on specified rank will save to CSV file in multi-gpus validation, default to 0. diff --git a/monai/handlers/confusion_matrix.py b/monai/handlers/confusion_matrix.py index 44edcae00f..24adeb879c 100644 --- a/monai/handlers/confusion_matrix.py +++ b/monai/handlers/confusion_matrix.py @@ -43,8 +43,9 @@ def __init__( output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`. - for example: if `ignite.engine.state.output` is `{"pred": xxx, "label": xxx, "other": xxx}`, - output_transform can be `lambda x: (x["pred"], x["label"])`. + `engine.state` and `output_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. save_details: whether to save metric computation details per image, for example: TP/TN/FP/FN of every image. default to True, will save to `engine.state.metric_details` dict with the metric name as key. diff --git a/monai/handlers/hausdorff_distance.py b/monai/handlers/hausdorff_distance.py index ea505ef02e..321e840353 100644 --- a/monai/handlers/hausdorff_distance.py +++ b/monai/handlers/hausdorff_distance.py @@ -44,8 +44,9 @@ def __init__( output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`. - for example: if `ignite.engine.state.output` is `{"pred": xxx, "label": xxx, "other": xxx}`, - output_transform can be `lambda x: (x["pred"], x["label"])`. + `engine.state` and `output_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. save_details: whether to save metric computation details per image, for example: hausdorff distance of every image. default to True, will save to `engine.state.metric_details` dict with the metric name as key. diff --git a/monai/handlers/ignite_metric.py b/monai/handlers/ignite_metric.py index cc87842fa5..ec99e83752 100644 --- a/monai/handlers/ignite_metric.py +++ b/monai/handlers/ignite_metric.py @@ -41,8 +41,9 @@ class IgniteMetric(Metric): # type: ignore[valid-type, misc] # due to optional_ output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`. - for example: if `ignite.engine.state.output` is `{"pred": xxx, "label": xxx, "other": xxx}`, - output_transform can be `lambda x: (x["pred"], x["label"])`. + `engine.state` and `output_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. save_details: whether to save metric computation details per image, for example: mean_dice of every image. default to True, will save to `engine.state.metric_details` dict with the metric name as key. diff --git a/monai/handlers/mean_dice.py b/monai/handlers/mean_dice.py index 9d3b95a735..6c270caa4c 100644 --- a/monai/handlers/mean_dice.py +++ b/monai/handlers/mean_dice.py @@ -32,8 +32,9 @@ def __init__( output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`. - for example: if `ignite.engine.state.output` is `{"pred": xxx, "label": xxx, "other": xxx}`, - output_transform can be `lambda x: (x["pred"], x["label"])`. + `engine.state` and `output_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. save_details: whether to save metric computation details per image, for example: mean dice of every image. default to True, will save to `engine.state.metric_details` dict with the metric name as key. diff --git a/monai/handlers/metric_logger.py b/monai/handlers/metric_logger.py index 64553955b7..048f230d1a 100644 --- a/monai/handlers/metric_logger.py +++ b/monai/handlers/metric_logger.py @@ -57,6 +57,9 @@ class MetricLogger: Args: loss_transform: Converts the `output` value from the trainer's state into a loss value + `engine.state` and `loss_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. metric_transform: Converts the metric value coming from the trainer/evaluator's state into a storable value evaluator: Optional evaluator to consume metric results from at the end of its evaluation run """ diff --git a/monai/handlers/metrics_saver.py b/monai/handlers/metrics_saver.py index 4c722eb35b..d6aa0c7b9f 100644 --- a/monai/handlers/metrics_saver.py +++ b/monai/handlers/metrics_saver.py @@ -50,6 +50,9 @@ class MetricsSaver: batch_transform: a callable that is used to extract the `meta_data` dictionary of the input images from `ignite.engine.state.batch` if saving metric details. the purpose is to get the input filenames from the `meta_data` and store with metric details together. + `engine.state` and `batch_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. summary_ops: expected computation operations to generate the summary report. it can be: None, "*" or list of strings, default to None. None - don't generate summary report for every expected metric_details. diff --git a/monai/handlers/mlflow_handler.py b/monai/handlers/mlflow_handler.py index b4ce4c384b..7bf6596437 100644 --- a/monai/handlers/mlflow_handler.py +++ b/monai/handlers/mlflow_handler.py @@ -59,6 +59,9 @@ class MLFlowHandler: By default this value logging happens when every iteration completed. The default behavior is to track loss from output[0] as output is a decollated list and we replicated loss value for every item of the decollated list. + `engine.state` and `output_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. global_epoch_transform: a callable that is used to customize global epoch number. For example, in evaluation, the evaluator engine might want to track synced epoch number with the trainer engine. diff --git a/monai/handlers/regression_metrics.py b/monai/handlers/regression_metrics.py index bef3123bd8..9758d86bae 100644 --- a/monai/handlers/regression_metrics.py +++ b/monai/handlers/regression_metrics.py @@ -28,8 +28,9 @@ def __init__(self, output_transform: Callable = lambda x: x, save_details: bool output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`. - for example: if `ignite.engine.state.output` is `{"pred": xxx, "label": xxx, "other": xxx}`, - output_transform can be `lambda x: (x["pred"], x["label"])`. + `engine.state` and `output_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. save_details: whether to save metric computation details per image, for example: mean squared error of every image. default to True, will save to `engine.state.metric_details` dict with the metric name as key. @@ -49,8 +50,13 @@ def __init__(self, output_transform: Callable = lambda x: x, save_details: bool """ Args: - output_transform: transform the ignite.engine.state.output into [y_pred, y] pair. - save_details: whether to save metric computation details per image, for example: mean absolute error of every image. + output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then + construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or + lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`. + `engine.state` and `output_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. + save_details: whether to save metric computation details per image, for example: mean squared error of every image. default to True, will save to `engine.state.metric_details` dict with the metric name as key. See also: @@ -69,8 +75,13 @@ def __init__(self, output_transform: Callable = lambda x: x, save_details: bool """ Args: - output_transform: transform the ignite.engine.state.output into [y_pred, y] pair. - save_details: whether to save metric computation details per image, for example: root mean squared error of every image. + output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then + construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or + lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`. + `engine.state` and `output_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. + save_details: whether to save metric computation details per image, for example: mean squared error of every image. default to True, will save to `engine.state.metric_details` dict with the metric name as key. See also: @@ -93,8 +104,13 @@ def __init__( Args: max_val: The dynamic range of the images/volumes (i.e., the difference between the maximum and the minimum allowed values e.g. 255 for a uint8 image). - output_transform: transform the ignite.engine.state.output into [y_pred, y] pair. - save_details: whether to save metric computation details per image, for example: PSNR of every image. + output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then + construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or + lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`. + `engine.state` and `output_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. + save_details: whether to save metric computation details per image, for example: mean squared error of every image. default to True, will save to `engine.state.metric_details` dict with the metric name as key. reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``, diff --git a/monai/handlers/roc_auc.py b/monai/handlers/roc_auc.py index 90c5fe2f03..125a4991ea 100644 --- a/monai/handlers/roc_auc.py +++ b/monai/handlers/roc_auc.py @@ -36,8 +36,9 @@ class ROCAUC(IgniteMetric): # type: ignore[valid-type, misc] # due to optional output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`. - for example: if `ignite.engine.state.output` is `{"pred": xxx, "label": xxx, "other": xxx}`, - output_transform can be `lambda x: (x["pred"], x["label"])`. + `engine.state` and `output_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. Note: ROCAUC expects y to be comprised of 0's and 1's. diff --git a/monai/handlers/segmentation_saver.py b/monai/handlers/segmentation_saver.py index 27b2cbc039..479cc1408a 100644 --- a/monai/handlers/segmentation_saver.py +++ b/monai/handlers/segmentation_saver.py @@ -113,9 +113,15 @@ def __init__( batch_transform: a callable that is used to extract the `meta_data` dictionary of the input images from `ignite.engine.state.batch`. the purpose is to extract necessary information from the meta data: filename, affine, original_shape, etc. + `engine.state` and `batch_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. output_transform: a callable that is used to extract the model prediction data from `ignite.engine.state.output`. the first dimension of its output will be treated as the batch dimension. each item in the batch will be saved individually. + `engine.state` and `output_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. name: identifier of logging.logger to use, defaulting to `engine.logger`. """ diff --git a/monai/handlers/stats_handler.py b/monai/handlers/stats_handler.py index 7d40f2a2f2..7c88634820 100644 --- a/monai/handlers/stats_handler.py +++ b/monai/handlers/stats_handler.py @@ -69,6 +69,9 @@ def __init__( By default this value logging happens when every iteration completed. The default behavior is to print loss from output[0] as output is a decollated list and we replicated loss value for every item of the decollated list. + `engine.state` and `output_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. global_epoch_transform: a callable that is used to customize global epoch number. For example, in evaluation, the evaluator engine might want to print synced epoch number with the trainer engine. diff --git a/monai/handlers/surface_distance.py b/monai/handlers/surface_distance.py index 3d010888c1..aee1475ae7 100644 --- a/monai/handlers/surface_distance.py +++ b/monai/handlers/surface_distance.py @@ -41,8 +41,9 @@ def __init__( output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`. - for example: if `ignite.engine.state.output` is `{"pred": xxx, "label": xxx, "other": xxx}`, - output_transform can be `lambda x: (x["pred"], x["label"])`. + `engine.state` and `output_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. save_details: whether to save metric computation details per image, for example: surface dice of every image. default to True, will save to `engine.state.metric_details` dict with the metric name as key. diff --git a/monai/handlers/tensorboard_handlers.py b/monai/handlers/tensorboard_handlers.py index 411c197af6..42bed14c83 100644 --- a/monai/handlers/tensorboard_handlers.py +++ b/monai/handlers/tensorboard_handlers.py @@ -109,6 +109,9 @@ def __init__( By default this value plotting happens when every iteration completed. The default behavior is to print loss from output[0] as output is a decollated list and we replicated loss value for every item of the decollated list. + `engine.state` and `output_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. global_epoch_transform: a callable that is used to customize global epoch number. For example, in evaluation, the evaluator engine might want to use trainer engines epoch number when plotting epoch vs metric curves. @@ -283,8 +286,14 @@ def __init__( then construct `(image, label)` pair. for example: if `ignite.engine.state.batch` is `{"image": xxx, "label": xxx, "other": xxx}`, `batch_transform` can be `lambda x: (x["image"], x["label"])`. will use the result to plot image from `result[0][index]` and plot label from `result[1][index]`. + `engine.state` and `batch_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. output_transform: a callable that is used to extract the `predictions` data from `ignite.engine.state.output`, will use the result to plot output from `result[index]`. + `engine.state` and `output_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. global_iter_transform: a callable that is used to customize global step number for TensorBoard. For example, in evaluation, the evaluator engine needs to know current epoch from trainer. index: plot which element in a data batch, default is the first element. diff --git a/monai/transforms/croppad/array.py b/monai/transforms/croppad/array.py index 8b4b400aab..cc8dd677da 100644 --- a/monai/transforms/croppad/array.py +++ b/monai/transforms/croppad/array.py @@ -397,7 +397,9 @@ def __init__( data=roi_center, output_type=torch.Tensor, dtype=torch.int16, wrap_sequence=True ) roi_size, *_ = convert_to_dst_type(src=roi_size, dst=roi_center, wrap_sequence=True) - roi_start_torch = maximum(roi_center - floor_divide(roi_size, 2), torch.zeros_like(roi_center)) # type: ignore + roi_start_torch = maximum( + roi_center - floor_divide(roi_size, 2), torch.zeros_like(roi_center) # type: ignore + ) roi_end_torch = maximum(roi_start_torch + roi_size, roi_start_torch) else: if roi_start is None or roi_end is None: