From 745c10b7d9bafab470a93c705a282e1f72f63000 Mon Sep 17 00:00:00 2001 From: Aobo Yang Date: Thu, 22 Sep 2022 22:43:04 -0700 Subject: [PATCH 1/5] fix docstring - multiple or --- captum/_utils/gradient.py | 4 ++-- captum/attr/_core/layer/layer_activation.py | 2 +- captum/attr/_core/layer/layer_gradient_x_activation.py | 2 +- captum/attr/_core/occlusion.py | 2 +- captum/influence/_utils/common.py | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/captum/_utils/gradient.py b/captum/_utils/gradient.py index aabb65dce6..e0f8b0378c 100644 --- a/captum/_utils/gradient.py +++ b/captum/_utils/gradient.py @@ -724,7 +724,7 @@ def _compute_jacobian_wrt_params( It is unpacked before passing to `model`, so it must be a tuple. The individual elements of `inputs` can be anything. labels (Tensor or None): Labels for input if computing a loss function. - loss_fn (torch.nn.Module or Callable or None): The loss function. If a library + loss_fn (torch.nn.Module or Callable, optional): The loss function. If a library defined loss function is provided, it would be expected to be a torch.nn.Module. If a custom loss is provided, it can be either type, but must behave as a library loss function would if `reduction='none'`. @@ -793,7 +793,7 @@ def _compute_jacobian_wrt_params_with_sample_wise_trick( It is unpacked before passing to `model`, so it must be a tuple. The individual elements of `inputs` can be anything. labels (Tensor or None): Labels for input if computing a loss function. - loss_fn (torch.nn.Module or Callable or None): The loss function. If a library + loss_fn (torch.nn.Module or Callable, optional): The loss function. If a library defined loss function is provided, it would be expected to be a torch.nn.Module. If a custom loss is provided, it can be either type, but must behave as a library loss function would if `reduction='sum'` or diff --git a/captum/attr/_core/layer/layer_activation.py b/captum/attr/_core/layer/layer_activation.py index f967f21790..7c53570ac2 100644 --- a/captum/attr/_core/layer/layer_activation.py +++ b/captum/attr/_core/layer/layer_activation.py @@ -88,7 +88,7 @@ def attribute( Returns: *Tensor* or *tuple[Tensor, ...]* or list of **attributions**: - - **attributions** (*Tensor* or *tuple[Tensor, ...]* or *list*): + - **attributions** (*Tensor*, *tuple[Tensor, ...]*, or *list*): Activation of each neuron in given layer output. Attributions will always be the same size as the output of the given layer. diff --git a/captum/attr/_core/layer/layer_gradient_x_activation.py b/captum/attr/_core/layer/layer_gradient_x_activation.py index b188760639..0d6a2075cd 100644 --- a/captum/attr/_core/layer/layer_gradient_x_activation.py +++ b/captum/attr/_core/layer/layer_gradient_x_activation.py @@ -135,7 +135,7 @@ def attribute( Returns: *Tensor* or *tuple[Tensor, ...]* or list of **attributions**: - - **attributions** (*Tensor* or *tuple[Tensor, ...]* or *list*): + - **attributions** (*Tensor*, *tuple[Tensor, ...]*, or *list*): Product of gradient and activation for each neuron in given layer output. Attributions will always be the same size as the diff --git a/captum/attr/_core/occlusion.py b/captum/attr/_core/occlusion.py index 6c54b1bf8a..6ca1355944 100644 --- a/captum/attr/_core/occlusion.py +++ b/captum/attr/_core/occlusion.py @@ -80,7 +80,7 @@ def attribute( # type: ignore this must be a tuple containing one tuple for each input tensor defining the dimensions of the patch for that input tensor, as described for the single tensor case. - strides (int or tuple or tuple[int] or tuple[tuple], optional): + strides (int, tuple, tuple[int], or tuple[tuple], optional): This defines the step by which the occlusion hyperrectangle should be shifted by in each direction for each iteration. For a single tensor input, this can be either a single diff --git a/captum/influence/_utils/common.py b/captum/influence/_utils/common.py index f828390f49..b43e3aa553 100644 --- a/captum/influence/_utils/common.py +++ b/captum/influence/_utils/common.py @@ -86,7 +86,7 @@ def _jacobian_loss_wrt_inputs( batch). Args: - loss_fn (torch.nn.Module or Callable or None): The loss function. If a library + loss_fn (torch.nn.Module, Callable, or None): The loss function. If a library defined loss function is provided, it would be expected to be a torch.nn.Module. If a custom loss is provided, it can be either type, but must behave as a library loss function would if `reduction='sum'` From 06e18a925ab4280c246f1dbb95125d4a0a30f11c Mon Sep 17 00:00:00 2001 From: Aobo Yang Date: Mon, 26 Sep 2022 11:27:13 -0700 Subject: [PATCH 2/5] fix docstring utils/gradient --- captum/_utils/gradient.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/captum/_utils/gradient.py b/captum/_utils/gradient.py index e85665ef26..834ca4625c 100644 --- a/captum/_utils/gradient.py +++ b/captum/_utils/gradient.py @@ -813,17 +813,17 @@ def _compute_jacobian_wrt_params_with_sample_wise_trick( inputs (tuple[Any, ...]): The minibatch for which the forward pass is computed. It is unpacked before passing to `model`, so it must be a tuple. The individual elements of `inputs` can be anything. - labels (Tensor or None): Labels for input if computing a loss function. + labels (Tensor, optional): Labels for input if computing a loss function. loss_fn (torch.nn.Module or Callable, optional): The loss function. If a library defined loss function is provided, it would be expected to be a torch.nn.Module. If a custom loss is provided, it can be either type, but must behave as a library loss function would if `reduction='sum'` or `reduction='mean'`. - reduction_type (str): The type of reduction applied. If a loss_fn is passed, + reduction_type (str, optional): The type of reduction applied. If a loss_fn is passed, this should match `loss_fn.reduction`. Else if gradients are being computed on direct model outputs (scores), then 'sum' should be used. Defaults to 'sum'. - layer_modules (torch.nn.Module): A list of PyTorch modules w.r.t. which + layer_modules (torch.nn.Module, optional): A list of PyTorch modules w.r.t. which jacobian gradients are computed. Returns: From d68d0a9b23ccb20bb1a99923ef0cc7ecb3c3985d Mon Sep 17 00:00:00 2001 From: Aobo Yang Date: Mon, 26 Sep 2022 11:28:56 -0700 Subject: [PATCH 3/5] fix docstring utils/gradient --- captum/_utils/gradient.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/captum/_utils/gradient.py b/captum/_utils/gradient.py index 834ca4625c..9b042a480e 100644 --- a/captum/_utils/gradient.py +++ b/captum/_utils/gradient.py @@ -738,12 +738,12 @@ def _compute_jacobian_wrt_params( inputs (tuple[Any, ...]): The minibatch for which the forward pass is computed. It is unpacked before passing to `model`, so it must be a tuple. The individual elements of `inputs` can be anything. - labels (Tensor or None): Labels for input if computing a loss function. + labels (Tensor, optional): Labels for input if computing a loss function. loss_fn (torch.nn.Module or Callable, optional): The loss function. If a library defined loss function is provided, it would be expected to be a torch.nn.Module. If a custom loss is provided, it can be either type, but must behave as a library loss function would if `reduction='none'`. - layer_modules (List[torch.nn.Module]): A list of PyTorch modules w.r.t. which + layer_modules (List[torch.nn.Module], optional): A list of PyTorch modules w.r.t. which jacobian gradients are computed. Returns: grads (tuple[Tensor, ...]): Returns the Jacobian for the minibatch as a From b077b6067dab20bd1bdebc032fc67f856a20e890 Mon Sep 17 00:00:00 2001 From: Aobo Yang Date: Mon, 3 Oct 2022 12:44:16 -0700 Subject: [PATCH 4/5] fix flake err --- captum/_utils/gradient.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/captum/_utils/gradient.py b/captum/_utils/gradient.py index 9b042a480e..91a898a5ed 100644 --- a/captum/_utils/gradient.py +++ b/captum/_utils/gradient.py @@ -743,8 +743,8 @@ def _compute_jacobian_wrt_params( defined loss function is provided, it would be expected to be a torch.nn.Module. If a custom loss is provided, it can be either type, but must behave as a library loss function would if `reduction='none'`. - layer_modules (List[torch.nn.Module], optional): A list of PyTorch modules w.r.t. which - jacobian gradients are computed. + layer_modules (List[torch.nn.Module], optional): A list of PyTorch modules + w.r.t. which jacobian gradients are computed. Returns: grads (tuple[Tensor, ...]): Returns the Jacobian for the minibatch as a tuple of gradients corresponding to the tuple of trainable parameters @@ -819,12 +819,12 @@ def _compute_jacobian_wrt_params_with_sample_wise_trick( torch.nn.Module. If a custom loss is provided, it can be either type, but must behave as a library loss function would if `reduction='sum'` or `reduction='mean'`. - reduction_type (str, optional): The type of reduction applied. If a loss_fn is passed, - this should match `loss_fn.reduction`. Else if gradients are being + reduction_type (str, optional): The type of reduction applied. If a loss_fn is + passed, this should match `loss_fn.reduction`. Else if gradients are being computed on direct model outputs (scores), then 'sum' should be used. Defaults to 'sum'. - layer_modules (torch.nn.Module, optional): A list of PyTorch modules w.r.t. which - jacobian gradients are computed. + layer_modules (torch.nn.Module, optional): A list of PyTorch modules w.r.t. + which jacobian gradients are computed. Returns: grads (tuple[Tensor, ...]): Returns the Jacobian for the minibatch as a From f0c8997d80c4f9f69e75f6e078eb620e3e4fd8f6 Mon Sep 17 00:00:00 2001 From: Aobo Yang Date: Mon, 3 Oct 2022 14:21:27 -0700 Subject: [PATCH 5/5] fix flake err --- captum/_utils/gradient.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/captum/_utils/gradient.py b/captum/_utils/gradient.py index 91a898a5ed..d96e588581 100644 --- a/captum/_utils/gradient.py +++ b/captum/_utils/gradient.py @@ -820,8 +820,9 @@ def _compute_jacobian_wrt_params_with_sample_wise_trick( but must behave as a library loss function would if `reduction='sum'` or `reduction='mean'`. reduction_type (str, optional): The type of reduction applied. If a loss_fn is - passed, this should match `loss_fn.reduction`. Else if gradients are being - computed on direct model outputs (scores), then 'sum' should be used. + passed, this should match `loss_fn.reduction`. Else if gradients are + being computed on direct model outputs (scores), then 'sum' should be + used. Defaults to 'sum'. layer_modules (torch.nn.Module, optional): A list of PyTorch modules w.r.t. which jacobian gradients are computed.