From fd254c43ba75bd6b539fb4781b3db7e8c12a16bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C5=BEenan=20Zuki=C4=87?= Date: Wed, 30 Nov 2022 16:24:14 -0500 Subject: [PATCH 1/3] Add split_channels parameter to LayerGradCam.attribute This allows examination of each channel's contribution. That is useful if channels are something other than standard RGB, for example multi-spectral input, potentially with many spectral channels. --- captum/attr/_core/layer/grad_cam.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/captum/attr/_core/layer/grad_cam.py b/captum/attr/_core/layer/grad_cam.py index df839a811c..c72d6dadf9 100644 --- a/captum/attr/_core/layer/grad_cam.py +++ b/captum/attr/_core/layer/grad_cam.py @@ -82,6 +82,7 @@ def attribute( additional_forward_args: Any = None, attribute_to_layer_input: bool = False, relu_attributions: bool = False, + split_channels: bool = False, ) -> Union[Tensor, Tuple[Tensor, ...]]: r""" Args: @@ -149,6 +150,10 @@ def attribute( otherwise, by default, both positive and negative attributions are returned. Default: False + split_channels (bool, optional): Indicates whether to + keep attributions split per channel. + The default (False) means to sum per channels. + Default: False Returns: *Tensor* or *tuple[Tensor, ...]* of **attributions**: @@ -208,10 +213,16 @@ def attribute( for layer_grad in layer_gradients ) - scaled_acts = tuple( - torch.sum(summed_grad * layer_eval, dim=1, keepdim=True) - for summed_grad, layer_eval in zip(summed_grads, layer_evals) - ) + if split_channels: + scaled_acts = tuple( + summed_grad * layer_eval + for summed_grad, layer_eval in zip(summed_grads, layer_evals) + ) + else: + scaled_acts = tuple( + torch.sum(summed_grad * layer_eval, dim=1, keepdim=True) + for summed_grad, layer_eval in zip(summed_grads, layer_evals) + ) if relu_attributions: scaled_acts = tuple(F.relu(scaled_act) for scaled_act in scaled_acts) return _format_output(len(scaled_acts) > 1, scaled_acts) From cce95cde066c80f1fc73de79a9621a62c1a78eed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C5=BEenan=20Zuki=C4=87?= Date: Thu, 22 Dec 2022 16:08:27 -0500 Subject: [PATCH 2/3] Add test for split_channels parameter to LayerGradCam.attribute --- tests/attr/layer/test_grad_cam.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tests/attr/layer/test_grad_cam.py b/tests/attr/layer/test_grad_cam.py index 6f0229a76b..385da55465 100644 --- a/tests/attr/layer/test_grad_cam.py +++ b/tests/attr/layer/test_grad_cam.py @@ -33,6 +33,23 @@ def test_simple_input_conv(self) -> None: net, net.conv1, inp, [[[[11.25, 13.5], [20.25, 22.5]]]] ) + def test_simple_input_conv_split_channels(self) -> None: + net = BasicModel_ConvNet_One_Conv() + inp = torch.arange(16).view(1, 1, 4, 4).float() + expected_result = [ + [ + [[-3.7500, 3.0000], [23.2500, 30.0000]], + [[15.0000, 10.5000], [-3.0000, -7.5000]], + ] + ] + self._grad_cam_test_assert( + net, + net.conv1, + inp, + expected_activation=expected_result, + split_channels=True, + ) + def test_simple_input_conv_no_grad(self) -> None: net = BasicModel_ConvNet_One_Conv() @@ -100,6 +117,7 @@ def _grad_cam_test_assert( additional_input: Any = None, attribute_to_layer_input: bool = False, relu_attributions: bool = False, + split_channels: bool = False, ): layer_gc = LayerGradCam(model, target_layer) self.assertFalse(layer_gc.multiplies_by_inputs) @@ -109,6 +127,7 @@ def _grad_cam_test_assert( additional_forward_args=additional_input, attribute_to_layer_input=attribute_to_layer_input, relu_attributions=relu_attributions, + split_channels=split_channels, ) assertTensorTuplesAlmostEqual( self, attributions, expected_activation, delta=0.01 From 0f3718678c51a18fd0d6e5584983be0b69c8946e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C5=BEenan=20Zuki=C4=87?= Date: Tue, 27 Dec 2022 17:58:30 -0500 Subject: [PATCH 3/3] Rename split_channels into attr_dim_summation and invert the logic --- captum/attr/_core/layer/grad_cam.py | 17 +++++++++-------- tests/attr/layer/test_grad_cam.py | 6 +++--- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/captum/attr/_core/layer/grad_cam.py b/captum/attr/_core/layer/grad_cam.py index c72d6dadf9..90c57c87e8 100644 --- a/captum/attr/_core/layer/grad_cam.py +++ b/captum/attr/_core/layer/grad_cam.py @@ -82,7 +82,7 @@ def attribute( additional_forward_args: Any = None, attribute_to_layer_input: bool = False, relu_attributions: bool = False, - split_channels: bool = False, + attr_dim_summation: bool = True, ) -> Union[Tensor, Tuple[Tensor, ...]]: r""" Args: @@ -150,10 +150,10 @@ def attribute( otherwise, by default, both positive and negative attributions are returned. Default: False - split_channels (bool, optional): Indicates whether to - keep attributions split per channel. - The default (False) means to sum per channels. - Default: False + attr_dim_summation (bool, optional): Indicates whether to + sum attributions along dimension 1 (usually channel). + The default (True) means to sum along dimension 1. + Default: True Returns: *Tensor* or *tuple[Tensor, ...]* of **attributions**: @@ -213,16 +213,17 @@ def attribute( for layer_grad in layer_gradients ) - if split_channels: + if attr_dim_summation: scaled_acts = tuple( - summed_grad * layer_eval + torch.sum(summed_grad * layer_eval, dim=1, keepdim=True) for summed_grad, layer_eval in zip(summed_grads, layer_evals) ) else: scaled_acts = tuple( - torch.sum(summed_grad * layer_eval, dim=1, keepdim=True) + summed_grad * layer_eval for summed_grad, layer_eval in zip(summed_grads, layer_evals) ) + if relu_attributions: scaled_acts = tuple(F.relu(scaled_act) for scaled_act in scaled_acts) return _format_output(len(scaled_acts) > 1, scaled_acts) diff --git a/tests/attr/layer/test_grad_cam.py b/tests/attr/layer/test_grad_cam.py index 385da55465..a8cafbf441 100644 --- a/tests/attr/layer/test_grad_cam.py +++ b/tests/attr/layer/test_grad_cam.py @@ -47,7 +47,7 @@ def test_simple_input_conv_split_channels(self) -> None: net.conv1, inp, expected_activation=expected_result, - split_channels=True, + attr_dim_summation=False, ) def test_simple_input_conv_no_grad(self) -> None: @@ -117,7 +117,7 @@ def _grad_cam_test_assert( additional_input: Any = None, attribute_to_layer_input: bool = False, relu_attributions: bool = False, - split_channels: bool = False, + attr_dim_summation: bool = True, ): layer_gc = LayerGradCam(model, target_layer) self.assertFalse(layer_gc.multiplies_by_inputs) @@ -127,7 +127,7 @@ def _grad_cam_test_assert( additional_forward_args=additional_input, attribute_to_layer_input=attribute_to_layer_input, relu_attributions=relu_attributions, - split_channels=split_channels, + attr_dim_summation=attr_dim_summation, ) assertTensorTuplesAlmostEqual( self, attributions, expected_activation, delta=0.01