python_code
stringlengths 0
229k
|
---|
#!/usr/bin/env python3
from typing import cast, Tuple, Union
import numpy as np
import torch
from captum._utils.typing import Tensor
from captum.attr._core.gradient_shap import GradientShap
from captum.attr._core.integrated_gradients import IntegratedGradients
from numpy import ndarray
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicLinearModel, BasicModel2
from tests.helpers.classification_models import SoftmaxModel
class Test(BaseTest):
# This test reproduces some of the test cases from the original implementation
# https://github.com/slundberg/shap/
# explainers/test_gradient.py
def test_basic_multi_input(self) -> None:
batch_size = 10
x1 = torch.ones(batch_size, 3)
x2 = torch.ones(batch_size, 4)
inputs = (x1, x2)
batch_size_baselines = 20
baselines = (
torch.zeros(batch_size_baselines, 3),
torch.zeros(batch_size_baselines, 4),
)
model = BasicLinearModel()
model.eval()
model.zero_grad()
np.random.seed(0)
torch.manual_seed(0)
gradient_shap = GradientShap(model)
n_samples = 50
attributions, delta = cast(
Tuple[Tuple[Tensor, ...], Tensor],
gradient_shap.attribute(
inputs, baselines, n_samples=n_samples, return_convergence_delta=True
),
)
attributions_without_delta = gradient_shap.attribute((x1, x2), baselines)
_assert_attribution_delta(self, inputs, attributions, n_samples, delta)
# Compare with integrated gradients
ig = IntegratedGradients(model)
baselines = (torch.zeros(batch_size, 3), torch.zeros(batch_size, 4))
attributions_ig = ig.attribute(inputs, baselines=baselines)
self._assert_shap_ig_comparision(attributions, attributions_ig)
# compare attributions retrieved with and without
# `return_convergence_delta` flag
for attribution, attribution_without_delta in zip(
attributions, attributions_without_delta
):
assertTensorAlmostEqual(self, attribution, attribution_without_delta)
def test_basic_multi_input_wo_mutliplying_by_inputs(self) -> None:
batch_size = 10
x1 = torch.ones(batch_size, 3)
x2 = torch.ones(batch_size, 4)
inputs = (x1, x2)
batch_size_baselines = 20
baselines = (
torch.ones(batch_size_baselines, 3) + 2e-5,
torch.ones(batch_size_baselines, 4) + 2e-5,
)
model = BasicLinearModel()
model.eval()
model.zero_grad()
np.random.seed(0)
torch.manual_seed(0)
gradient_shap = GradientShap(model)
gradient_shap_wo_mutliplying_by_inputs = GradientShap(
model, multiply_by_inputs=False
)
n_samples = 50
attributions = cast(
Tuple[Tuple[Tensor, ...], Tensor],
gradient_shap.attribute(
inputs,
baselines,
n_samples=n_samples,
stdevs=0.0,
),
)
attributions_wo_mutliplying_by_inputs = cast(
Tuple[Tuple[Tensor, ...], Tensor],
gradient_shap_wo_mutliplying_by_inputs.attribute(
inputs,
baselines,
n_samples=n_samples,
stdevs=0.0,
),
)
assertTensorAlmostEqual(
self,
attributions_wo_mutliplying_by_inputs[0] * (x1 - baselines[0][0:1]),
attributions[0],
)
assertTensorAlmostEqual(
self,
attributions_wo_mutliplying_by_inputs[1] * (x2 - baselines[1][0:1]),
attributions[1],
)
def test_classification_baselines_as_function(self) -> None:
num_in = 40
inputs = torch.arange(0.0, num_in * 2.0).reshape(2, num_in)
def generate_baselines() -> Tensor:
return torch.arange(0.0, num_in * 4.0).reshape(4, num_in)
def generate_baselines_with_inputs(inputs: Tensor) -> Tensor:
inp_shape = cast(Tuple[int, ...], inputs.shape)
return torch.arange(0.0, inp_shape[1] * 2.0).reshape(2, inp_shape[1])
def generate_baselines_returns_array() -> ndarray:
return np.arange(0.0, num_in * 4.0).reshape(4, num_in)
# 10-class classification model
model = SoftmaxModel(num_in, 20, 10)
model.eval()
model.zero_grad()
gradient_shap = GradientShap(model)
n_samples = 10
attributions, delta = gradient_shap.attribute(
inputs,
baselines=generate_baselines,
target=torch.tensor(1),
n_samples=n_samples,
stdevs=0.009,
return_convergence_delta=True,
)
_assert_attribution_delta(self, (inputs,), (attributions,), n_samples, delta)
attributions, delta = gradient_shap.attribute(
inputs,
baselines=generate_baselines_with_inputs,
target=torch.tensor(1),
n_samples=n_samples,
stdevs=0.00001,
return_convergence_delta=True,
)
_assert_attribution_delta(self, (inputs,), (attributions,), n_samples, delta)
with self.assertRaises(AssertionError):
attributions, delta = gradient_shap.attribute( # type: ignore
inputs,
# Intentionally passing wrong type.
baselines=generate_baselines_returns_array,
target=torch.tensor(1),
n_samples=n_samples,
stdevs=0.00001,
return_convergence_delta=True,
)
def test_classification(self) -> None:
num_in = 40
inputs = torch.arange(0.0, num_in * 2.0).reshape(2, num_in)
baselines = torch.arange(0.0, num_in * 4.0).reshape(4, num_in)
target = torch.tensor(1)
# 10-class classification model
model = SoftmaxModel(num_in, 20, 10)
model.eval()
model.zero_grad()
gradient_shap = GradientShap(model)
n_samples = 10
attributions, delta = gradient_shap.attribute(
inputs,
baselines=baselines,
target=target,
n_samples=n_samples,
stdevs=0.009,
return_convergence_delta=True,
)
_assert_attribution_delta(self, (inputs,), (attributions,), n_samples, delta)
# try to call `compute_convergence_delta` externally
with self.assertRaises(AssertionError):
gradient_shap.compute_convergence_delta(
attributions, inputs, baselines, target=target
)
# now, let's expand target and choose random baselines from `baselines` tensor
rand_indices = np.random.choice(baselines.shape[0], inputs.shape[0]).tolist()
chosen_baselines = baselines[rand_indices]
target_extendes = torch.tensor([1, 1])
external_delta = gradient_shap.compute_convergence_delta(
attributions, chosen_baselines, inputs, target=target_extendes
)
_assert_delta(self, external_delta)
# Compare with integrated gradients
ig = IntegratedGradients(model)
baselines = torch.arange(0.0, num_in * 2.0).reshape(2, num_in)
attributions_ig = ig.attribute(inputs, baselines=baselines, target=target)
self._assert_shap_ig_comparision((attributions,), (attributions_ig,))
def test_basic_relu_multi_input(self) -> None:
model = BasicModel2()
input1 = torch.tensor([[3.0]])
input2 = torch.tensor([[1.0]], requires_grad=True)
baseline1 = torch.tensor([[0.0]])
baseline2 = torch.tensor([[0.0]])
inputs = (input1, input2)
baselines = (baseline1, baseline2)
gs = GradientShap(model)
n_samples = 20000
attributions, delta = cast(
Tuple[Tuple[Tensor, ...], Tensor],
gs.attribute(
inputs,
baselines=baselines,
n_samples=n_samples,
return_convergence_delta=True,
),
)
_assert_attribution_delta(
self, inputs, attributions, n_samples, delta, delta_thresh=0.008
)
ig = IntegratedGradients(model)
attributions_ig = ig.attribute(inputs, baselines=baselines)
self._assert_shap_ig_comparision(attributions, attributions_ig)
def _assert_shap_ig_comparision(
self, attributions1: Tuple[Tensor, ...], attributions2: Tuple[Tensor, ...]
) -> None:
for attribution1, attribution2 in zip(attributions1, attributions2):
for attr_row1, attr_row2 in zip(attribution1, attribution2):
assertTensorAlmostEqual(self, attr_row1, attr_row2, 0.05, "max")
def _assert_attribution_delta(
test: BaseTest,
inputs: Union[Tensor, Tuple[Tensor, ...]],
attributions: Union[Tensor, Tuple[Tensor, ...]],
n_samples: int,
delta: Tensor,
delta_thresh: Union[float, Tensor] = 0.0006,
is_layer: bool = False,
) -> None:
if not is_layer:
for input, attribution in zip(inputs, attributions):
test.assertEqual(attribution.shape, input.shape)
if isinstance(inputs, tuple):
bsz = inputs[0].shape[0]
else:
bsz = inputs.shape[0]
test.assertEqual([bsz * n_samples], list(delta.shape))
delta = torch.mean(delta.reshape(bsz, -1), dim=1)
_assert_delta(test, delta, delta_thresh)
def _assert_delta(
test: BaseTest, delta: Tensor, delta_thresh: Union[Tensor, float] = 0.0006
) -> None:
delta_condition = (delta.abs() < delta_thresh).all()
test.assertTrue(
delta_condition,
"Sum of SHAP values {} does"
" not match the difference of endpoints.".format(delta),
)
|
#!/usr/bin/env python3
from typing import Any, Callable, cast, Dict, Optional, Tuple, Type
import torch
from captum._utils.common import _format_additional_forward_args
from captum.attr._core.feature_permutation import FeaturePermutation
from captum.attr._core.integrated_gradients import IntegratedGradients
from captum.attr._core.lime import Lime
from captum.attr._core.noise_tunnel import NoiseTunnel
from captum.attr._utils.attribution import Attribution, InternalAttribution
from tests.attr.helpers.gen_test_utils import (
gen_test_name,
get_target_layer,
parse_test_config,
should_create_generated_test,
)
from tests.attr.helpers.test_config import config
from tests.helpers.basic import assertTensorTuplesAlmostEqual, BaseTest, deep_copy_args
from tests.helpers.basic_models import BasicModel_MultiLayer
from torch import Tensor
from torch.nn import Module
"""
Tests in this file are dynamically generated based on the config
defined in tests/attr/helpers/test_config.py. To add new test cases,
read the documentation in test_config.py and add cases based on the
schema described there.
"""
class TargetsMeta(type):
"""
Target tests created in TargetsMeta apply to any test case with targets being a
list or tensor.
Attribution of each example is computed independently with the appropriate target
and compared to the corresponding result of attributing to a batch with a tensor
/ list of targets.
"""
def __new__(cls, name: str, bases: Tuple, attrs: Dict):
for test_config in config:
(
algorithms,
model,
args,
layer,
noise_tunnel,
baseline_distr,
) = parse_test_config(test_config)
target_delta = (
test_config["target_delta"] if "target_delta" in test_config else 0.0001
)
if "target" not in args or not isinstance(args["target"], (list, Tensor)):
continue
for algorithm in algorithms:
# FeaturePermutation requires a batch of inputs
# so skipping tests
if issubclass(
algorithm, FeaturePermutation
) or not should_create_generated_test(algorithm):
continue
test_method = cls.make_single_target_test(
algorithm,
model,
layer,
args,
target_delta,
noise_tunnel,
baseline_distr,
)
test_name = gen_test_name(
"test_target",
cast(str, test_config["name"]),
algorithm,
noise_tunnel,
)
if test_name in attrs:
raise AssertionError(
"Trying to overwrite existing test with name: %r" % test_name
)
attrs[test_name] = test_method
return super(TargetsMeta, cls).__new__(cls, name, bases, attrs)
# Arguments are deep copied to ensure tests are independent and are not affected
# by any modifications within a previous test.
@classmethod
@deep_copy_args
def make_single_target_test(
cls,
algorithm: Type[Attribution],
model: Module,
layer: Optional[str],
args: Dict[str, Any],
target_delta: float,
noise_tunnel: bool,
baseline_distr: bool,
) -> Callable:
"""
This method creates a single target test for the given algorithm and parameters.
"""
target_layer = get_target_layer(model, layer) if layer is not None else None
# Obtains initial arguments to replace with each example
# individually.
original_inputs = args["inputs"]
original_targets = args["target"]
original_additional_forward_args = (
_format_additional_forward_args(args["additional_forward_args"])
if "additional_forward_args" in args
else None
)
num_examples = (
len(original_inputs)
if isinstance(original_inputs, Tensor)
else len(original_inputs[0])
)
replace_baselines = "baselines" in args and not baseline_distr
if replace_baselines:
original_baselines = args["baselines"]
def target_test_assert(self) -> None:
attr_method: Attribution
if target_layer:
internal_algorithm = cast(Type[InternalAttribution], algorithm)
attr_method = internal_algorithm(model, target_layer)
else:
attr_method = algorithm(model)
if noise_tunnel:
attr_method = NoiseTunnel(attr_method)
attributions_orig = attr_method.attribute(**args)
self.setUp()
for i in range(num_examples):
args["target"] = (
original_targets[i]
if len(original_targets) == num_examples
else original_targets
)
args["inputs"] = (
original_inputs[i : i + 1]
if isinstance(original_inputs, Tensor)
else tuple(
original_inp[i : i + 1] for original_inp in original_inputs
)
)
if original_additional_forward_args is not None:
args["additional_forward_args"] = tuple(
single_add_arg[i : i + 1]
if isinstance(single_add_arg, Tensor)
else single_add_arg
for single_add_arg in original_additional_forward_args
)
if replace_baselines:
if isinstance(original_inputs, Tensor):
args["baselines"] = original_baselines[i : i + 1]
elif isinstance(original_baselines, tuple):
args["baselines"] = tuple(
single_baseline[i : i + 1]
if isinstance(single_baseline, Tensor)
else single_baseline
for single_baseline in original_baselines
)
# Since Lime methods compute attributions for a batch
# sequentially, random seed should not be reset after
# each example after the first.
if not issubclass(algorithm, Lime):
self.setUp()
single_attr = attr_method.attribute(**args)
current_orig_attributions = (
attributions_orig[i : i + 1]
if isinstance(attributions_orig, Tensor)
else tuple(
single_attrib[i : i + 1] for single_attrib in attributions_orig
)
)
assertTensorTuplesAlmostEqual(
self,
current_orig_attributions,
single_attr,
delta=target_delta,
mode="max",
)
if (
not issubclass(algorithm, Lime)
and len(original_targets) == num_examples
):
# If original_targets contained multiple elements, then
# we also compare with setting targets to a list with
# a single element.
args["target"] = original_targets[i : i + 1]
self.setUp()
single_attr_target_list = attr_method.attribute(**args)
assertTensorTuplesAlmostEqual(
self,
current_orig_attributions,
single_attr_target_list,
delta=target_delta,
mode="max",
)
return target_test_assert
class TestTargets(BaseTest, metaclass=TargetsMeta):
def test_simple_target_missing_error(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.zeros((1, 3))
with self.assertRaises(AssertionError):
attr = IntegratedGradients(net)
attr.attribute(inp)
def test_multi_target_error(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.zeros((1, 3))
with self.assertRaises(AssertionError):
attr = IntegratedGradients(net)
attr.attribute(inp, additional_forward_args=(None, True), target=(1, 0))
|
#!/usr/bin/env python3
from typing import Any, cast
import torch
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._core.input_x_gradient import InputXGradient
from captum.attr._core.noise_tunnel import NoiseTunnel
from tests.attr.test_saliency import _get_basic_config, _get_multiargs_basic_config
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.classification_models import SoftmaxModel
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_input_x_gradient_test_basic_vanilla(self) -> None:
self._input_x_gradient_base_assert(*_get_basic_config())
def test_input_x_gradient_test_basic_smoothgrad(self) -> None:
self._input_x_gradient_base_assert(*_get_basic_config(), nt_type="smoothgrad")
def test_input_x_gradient_test_basic_vargrad(self) -> None:
self._input_x_gradient_base_assert(*_get_basic_config(), nt_type="vargrad")
def test_saliency_test_basic_multi_variable_vanilla(self) -> None:
self._input_x_gradient_base_assert(*_get_multiargs_basic_config())
def test_saliency_test_basic_multi_variable_smoothgrad(self) -> None:
self._input_x_gradient_base_assert(
*_get_multiargs_basic_config(), nt_type="smoothgrad"
)
def test_saliency_test_basic_multi_vargrad(self) -> None:
self._input_x_gradient_base_assert(
*_get_multiargs_basic_config(), nt_type="vargrad"
)
def test_input_x_gradient_classification_vanilla(self) -> None:
self._input_x_gradient_classification_assert()
def test_input_x_gradient_classification_smoothgrad(self) -> None:
self._input_x_gradient_classification_assert(nt_type="smoothgrad")
def test_input_x_gradient_classification_vargrad(self) -> None:
self._input_x_gradient_classification_assert(nt_type="vargrad")
def _input_x_gradient_base_assert(
self,
model: Module,
inputs: TensorOrTupleOfTensorsGeneric,
expected_grads: TensorOrTupleOfTensorsGeneric,
additional_forward_args: Any = None,
nt_type: str = "vanilla",
) -> None:
input_x_grad = InputXGradient(model)
self.assertTrue(input_x_grad.multiplies_by_inputs)
attributions: TensorOrTupleOfTensorsGeneric
if nt_type == "vanilla":
attributions = input_x_grad.attribute(
inputs,
additional_forward_args=additional_forward_args,
)
else:
nt = NoiseTunnel(input_x_grad)
attributions = nt.attribute(
inputs,
nt_type=nt_type,
nt_samples=10,
stdevs=0.0002,
additional_forward_args=additional_forward_args,
)
if isinstance(attributions, tuple):
for input, attribution, expected_grad in zip(
inputs, attributions, expected_grads
):
if nt_type == "vanilla":
self._assert_attribution(expected_grad, input, attribution)
self.assertEqual(input.shape, attribution.shape)
elif isinstance(attributions, Tensor):
if nt_type == "vanilla":
self._assert_attribution(expected_grads, inputs, attributions)
self.assertEqual(
cast(Tensor, inputs).shape, cast(Tensor, attributions).shape
)
def _assert_attribution(self, expected_grad, input, attribution):
assertTensorAlmostEqual(
self,
attribution,
(expected_grad * input),
delta=0.05,
mode="max",
)
def _input_x_gradient_classification_assert(self, nt_type: str = "vanilla") -> None:
num_in = 5
input = torch.tensor([[0.0, 1.0, 2.0, 3.0, 4.0]], requires_grad=True)
target = torch.tensor(5)
# 10-class classification model
model = SoftmaxModel(num_in, 20, 10)
input_x_grad = InputXGradient(model.forward)
if nt_type == "vanilla":
attributions = input_x_grad.attribute(input, target)
output = model(input)[:, target]
output.backward()
expected = input.grad * input
assertTensorAlmostEqual(self, attributions, expected, 0.00001, "max")
else:
nt = NoiseTunnel(input_x_grad)
attributions = nt.attribute(
input, nt_type=nt_type, nt_samples=10, stdevs=1.0, target=target
)
self.assertEqual(attributions.shape, input.shape)
|
#!/usr/bin/env python3
import torch
from captum.attr._core.noise_tunnel import SUPPORTED_NOISE_TUNNEL_TYPES
from captum.attr._utils.common import _validate_input, _validate_noise_tunnel_type
from tests.helpers.basic import BaseTest
class Test(BaseTest):
def test_validate_input(self) -> None:
with self.assertRaises(AssertionError):
_validate_input((torch.tensor([-1.0, 1.0]),), (torch.tensor([-2.0]),))
_validate_input(
(torch.tensor([-1.0, 1.0]),), (torch.tensor([-1.0, 1.0]),), n_steps=-1
)
_validate_input(
(torch.tensor([-1.0, 1.0]),),
(torch.tensor([-1.0, 1.0]),),
method="abcde",
)
_validate_input((torch.tensor([-1.0]),), (torch.tensor([-2.0]),))
_validate_input(
(torch.tensor([-1.0]),), (torch.tensor([-2.0]),), method="gausslegendre"
)
def test_validate_nt_type(self) -> None:
with self.assertRaises(AssertionError):
_validate_noise_tunnel_type("abc", SUPPORTED_NOISE_TUNNEL_TYPES)
_validate_noise_tunnel_type("smoothgrad", SUPPORTED_NOISE_TUNNEL_TYPES)
_validate_noise_tunnel_type("smoothgrad_sq", SUPPORTED_NOISE_TUNNEL_TYPES)
_validate_noise_tunnel_type("vargrad", SUPPORTED_NOISE_TUNNEL_TYPES)
|
#!/usr/bin/env python3
import unittest
from typing import Any
import torch
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._core.guided_grad_cam import GuidedGradCam
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModel_ConvNet_One_Conv
from torch.nn import Module
class Test(BaseTest):
def test_simple_input_conv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = 1.0 * torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
ex = [
[
[
[0.0, 0.0, 4.0, 4.0],
[0.0, 0.0, 12.0, 8.0],
[28.0, 84.0, 97.5, 65.0],
[28.0, 56.0, 65.0, 32.5],
]
]
]
self._guided_grad_cam_test_assert(net, net.relu1, inp, ex)
def test_simple_multi_input_conv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
ex = [
[
[
[14.5, 29.0, 38.0, 19.0],
[29.0, 58.0, 76.0, 38.0],
[65.0, 130.0, 148.0, 74.0],
[32.5, 65.0, 74.0, 37.0],
]
]
]
self._guided_grad_cam_test_assert(net, net.conv1, (inp, inp2), (ex, ex))
def test_simple_multi_input_relu_input(self) -> None:
net = BasicModel_ConvNet_One_Conv(inplace=True)
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
ex = [
[
[
[14.5, 29.0, 38.0, 19.0],
[29.0, 58.0, 76.0, 38.0],
[65.0, 130.0, 148.0, 74.0],
[32.5, 65.0, 74.0, 37.0],
]
]
]
self._guided_grad_cam_test_assert(
net, net.relu1, (inp, inp2), (ex, ex), attribute_to_layer_input=True
)
def test_simple_multi_input_conv_inplace(self) -> None:
net = BasicModel_ConvNet_One_Conv(inplace=True)
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
ex = [
[
[
[14.5, 29.0, 38.0, 19.0],
[29.0, 58.0, 76.0, 38.0],
[65.0, 130.0, 148.0, 74.0],
[32.5, 65.0, 74.0, 37.0],
]
]
]
self._guided_grad_cam_test_assert(net, net.conv1, (inp, inp2), (ex, ex))
def test_improper_dims_multi_input_conv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones(1)
ex = [
[
[
[14.5, 29.0, 38.0, 19.0],
[29.0, 58.0, 76.0, 38.0],
[65.0, 130.0, 148.0, 74.0],
[32.5, 65.0, 74.0, 37.0],
]
]
]
self._guided_grad_cam_test_assert(net, net.conv1, (inp, inp2), (ex, []))
def test_improper_method_multi_input_conv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones(1)
self._guided_grad_cam_test_assert(
net, net.conv1, (inp, inp2), ([], []), interpolate_mode="made_up_nonlinear"
)
def _guided_grad_cam_test_assert(
self,
model: Module,
target_layer: Module,
test_input: TensorOrTupleOfTensorsGeneric,
expected,
additional_input: Any = None,
interpolate_mode: str = "nearest",
attribute_to_layer_input: bool = False,
) -> None:
guided_gc = GuidedGradCam(model, target_layer)
self.assertFalse(guided_gc.multiplies_by_inputs)
attributions = guided_gc.attribute(
test_input,
target=0,
additional_forward_args=additional_input,
interpolate_mode=interpolate_mode,
attribute_to_layer_input=attribute_to_layer_input,
)
if isinstance(test_input, tuple):
for i in range(len(test_input)):
assertTensorAlmostEqual(
self,
attributions[i],
expected[i],
delta=0.01,
)
else:
assertTensorAlmostEqual(
self,
attributions,
expected,
delta=0.01,
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
from enum import Enum
from typing import Any, Callable, cast, Dict, Optional, Tuple, Type
import torch
from captum.attr._core.noise_tunnel import NoiseTunnel
from captum.attr._models.base import _set_deep_layer_value
from captum.attr._utils.attribution import Attribution, InternalAttribution
from tests.attr.helpers.gen_test_utils import (
gen_test_name,
get_target_layer,
parse_test_config,
should_create_generated_test,
)
from tests.attr.helpers.test_config import config
from tests.helpers.basic import BaseTest, deep_copy_args
from torch.nn import Module
"""
Tests in this file are dynamically generated based on the config
defined in tests/attr/helpers/test_config.py. To add new test cases,
read the documentation in test_config.py and add cases based on the
schema described there.
"""
class HookRemovalMode(Enum):
"""
Defines modes for hook removal tests:
`normal` - Verifies no hooks remain after running an attribution method
normally
`incorrect_target_or_neuron` - Verifies no hooks remain after an incorrect
target and neuron_selector are provided, which causes an assertion error
in the algorithm.
`invalid_module` - Verifies no hooks remain after an invalid module
is executed, which causes an assertion error in model execution.
"""
normal = 1
incorrect_target_or_neuron = 2
invalid_module = 3
class ErrorModule(Module):
def __init__(
self,
) -> None:
super().__init__()
self.relu = torch.nn.ReLU()
def forward(*args, **kwargs):
raise AssertionError("Raising error on execution")
class HookRemovalMeta(type):
"""
Attribution is computed either normally or with the changes based on the
mode, which cause an error. Once attribution is calculated, test verifies
that no forward, backward or forward pre hooks remain on any modules.
"""
def __new__(cls, name: str, bases: Tuple, attrs: Dict):
created_tests: Dict[Tuple[Type[Attribution], HookRemovalMode], bool] = {}
for test_config in config:
(
algorithms,
model,
args,
layer,
noise_tunnel,
_,
) = parse_test_config(test_config)
for algorithm in algorithms:
if not should_create_generated_test(algorithm):
continue
for mode in HookRemovalMode:
if mode is HookRemovalMode.invalid_module and layer is None:
continue
# Only one test per algorithm and mode is necessary
if (algorithm, mode) in created_tests:
continue
test_method = cls.make_single_hook_removal_test(
algorithm,
model,
layer,
args,
noise_tunnel,
mode,
)
test_name = gen_test_name(
"test_hook_removal_" + mode.name,
cast(str, test_config["name"]),
algorithm,
noise_tunnel,
)
if test_name in attrs:
raise AssertionError(
"Trying to overwrite existing test with name: %r"
% test_name
)
attrs[test_name] = test_method
created_tests[(algorithm, mode)] = True
return super(HookRemovalMeta, cls).__new__(cls, name, bases, attrs)
# Arguments are deep copied to ensure tests are independent and are not affected
# by any modifications within a previous test.
@classmethod
@deep_copy_args
def make_single_hook_removal_test(
cls,
algorithm: Type[Attribution],
model: Module,
layer: Optional[str],
args: Dict[str, Any],
noise_tunnel: bool,
mode: HookRemovalMode,
) -> Callable:
"""
This method creates a single hook removal test for the given
algorithm and parameters.
"""
def hook_removal_test_assert(self) -> None:
attr_method: Attribution
expect_error = False
if layer is not None:
if mode is HookRemovalMode.invalid_module:
expect_error = True
if isinstance(layer, list):
_set_deep_layer_value(model, layer[0], ErrorModule())
else:
_set_deep_layer_value(model, layer, ErrorModule())
target_layer = get_target_layer(model, layer)
internal_algorithm = cast(Type[InternalAttribution], algorithm)
attr_method = internal_algorithm(model, target_layer)
else:
attr_method = algorithm(model)
if noise_tunnel:
attr_method = NoiseTunnel(attr_method)
if mode is HookRemovalMode.incorrect_target_or_neuron:
# Overwriting target and neuron index arguments to
# incorrect values.
if "target" in args:
args["target"] = (9999,) * 20
expect_error = True
if "neuron_selector" in args:
args["neuron_selector"] = (9999,) * 20
expect_error = True
if expect_error:
with self.assertRaises(AssertionError):
attr_method.attribute(**args)
else:
attr_method.attribute(**args)
def check_leftover_hooks(module):
self.assertEqual(len(module._forward_hooks), 0)
self.assertEqual(len(module._backward_hooks), 0)
self.assertEqual(len(module._forward_pre_hooks), 0)
model.apply(check_leftover_hooks)
return hook_removal_test_assert
class TestHookRemoval(BaseTest, metaclass=HookRemovalMeta):
pass
|
#!/usr/bin/env python3
import functools
import inspect
from typing import Callable, Dict, Tuple
import torch
from captum._utils.gradient import _forward_layer_eval
from captum.attr import (
DeepLift,
DeepLiftShap,
FeatureAblation,
GradientShap,
InputXGradient,
IntegratedGradients,
LayerDeepLift,
LayerDeepLiftShap,
LayerFeatureAblation,
LayerGradientShap,
LayerGradientXActivation,
LayerIntegratedGradients,
)
from captum.attr._utils.input_layer_wrapper import ModelInputWrapper
from tests.helpers.basic import assertTensorTuplesAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel,
BasicModel_MultiLayer_TrueMultiInput,
MixedKwargsAndArgsModule,
)
layer_methods_to_test_with_equiv = [
# layer_method, equiv_method, whether or not to use multiple layers
(LayerIntegratedGradients, IntegratedGradients, [True, False]),
(LayerGradientXActivation, InputXGradient, [True, False]),
(LayerFeatureAblation, FeatureAblation, [False]),
(LayerDeepLift, DeepLift, [False]),
(LayerDeepLiftShap, DeepLiftShap, [False]),
(LayerGradientShap, GradientShap, [False]),
# TODO: add other algorithms here
]
class InputLayerMeta(type):
def __new__(cls, name: str, bases: Tuple, attrs: Dict):
for (
layer_method,
equiv_method,
multi_layers,
) in layer_methods_to_test_with_equiv:
for multi_layer in multi_layers:
test_name = (
f"test_{layer_method.__name__}"
+ f"_{equiv_method.__name__}_{multi_layer}"
)
attrs[
test_name
] = lambda self: self.layer_method_with_input_layer_patches(
layer_method, equiv_method, multi_layer
)
return super(InputLayerMeta, cls).__new__(cls, name, bases, attrs)
class TestInputLayerWrapper(BaseTest, metaclass=InputLayerMeta):
def test_forward_layer_eval_on_mixed_args_kwargs_module(self) -> None:
x = torch.randn(10, 5)
y = torch.randn(10, 5)
model = MixedKwargsAndArgsModule()
self.forward_eval_layer_with_inputs_helper(model, {"x": x})
self.forward_eval_layer_with_inputs_helper(model, {"x": x, "y": y})
def layer_method_with_input_layer_patches(
self,
layer_method_class: Callable,
equiv_method_class: Callable,
multi_layer: bool,
) -> None:
model = BasicModel_MultiLayer_TrueMultiInput() if multi_layer else BasicModel()
input_names = ["x1", "x2", "x3", "x4"] if multi_layer else ["input"]
model = ModelInputWrapper(model)
layers = [model.input_maps[inp] for inp in input_names]
layer_method = layer_method_class(
model, layer=layers if multi_layer else layers[0]
)
equivalent_method = equiv_method_class(model)
inputs = tuple(torch.rand(5, 3) for _ in input_names)
baseline = tuple(torch.zeros(5, 3) for _ in input_names)
args = inspect.getfullargspec(equivalent_method.attribute.__wrapped__).args
args_to_use = [inputs]
if "baselines" in args:
args_to_use += [baseline]
a1 = layer_method.attribute(*args_to_use, target=0)
a2 = layer_method.attribute(
*args_to_use, target=0, attribute_to_layer_input=True
)
real_attributions = equivalent_method.attribute(*args_to_use, target=0)
if not isinstance(a1, tuple):
a1 = (a1,)
a2 = (a2,)
if not isinstance(real_attributions, tuple):
real_attributions = (real_attributions,)
assertTensorTuplesAlmostEqual(self, a1, a2)
assertTensorTuplesAlmostEqual(self, a1, real_attributions)
def forward_eval_layer_with_inputs_helper(self, model, inputs_to_test):
# hard coding for simplicity
# 0 if using args, 1 if using kwargs
# => no 0s after first 1 (left to right)
#
# used to test utilization of args/kwargs
use_args_or_kwargs = [
[[0], [1]],
[
[0, 0],
[0, 1],
[1, 1],
],
]
model = ModelInputWrapper(model)
def forward_func(*args, args_or_kwargs=None):
# convert to args or kwargs to test *args and **kwargs wrapping behavior
new_args = []
new_kwargs = {}
for args_or_kwarg, name, inp in zip(
args_or_kwargs, inputs_to_test.keys(), args
):
if args_or_kwarg:
new_kwargs[name] = inp
else:
new_args.append(inp)
return model(*new_args, **new_kwargs)
for args_or_kwargs in use_args_or_kwargs[len(inputs_to_test) - 1]:
with self.subTest(args_or_kwargs=args_or_kwargs):
inputs = _forward_layer_eval(
functools.partial(forward_func, args_or_kwargs=args_or_kwargs),
inputs=tuple(inputs_to_test.values()),
layer=[model.input_maps[name] for name in inputs_to_test.keys()],
)
inputs_with_attrib_to_inp = _forward_layer_eval(
functools.partial(forward_func, args_or_kwargs=args_or_kwargs),
inputs=tuple(inputs_to_test.values()),
layer=[model.input_maps[name] for name in inputs_to_test.keys()],
attribute_to_layer_input=True,
)
for i1, i2, i3 in zip(
inputs, inputs_with_attrib_to_inp, inputs_to_test.values()
):
self.assertTrue((i1[0] == i2[0]).all())
self.assertTrue((i1[0] == i3).all())
|
#!/usr/bin/env python3
from typing import Union
import torch
from captum._utils.typing import TargetType
from captum.attr._core.deep_lift import DeepLift, DeepLiftShap
from captum.attr._core.integrated_gradients import IntegratedGradients
from tests.helpers.basic import assertAttributionComparision, BaseTest
from tests.helpers.basic_models import (
BasicModel_ConvNet,
BasicModel_ConvNet_MaxPool1d,
BasicModel_ConvNet_MaxPool3d,
)
from tests.helpers.classification_models import (
SigmoidDeepLiftModel,
SoftmaxDeepLiftModel,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_sigmoid_classification(self) -> None:
num_in = 20
input = torch.arange(0.0, num_in * 1.0, requires_grad=True).unsqueeze(0)
baseline = 0 * input
target = torch.tensor(0)
# TODO add test cases for multiple different layers
model = SigmoidDeepLiftModel(num_in, 5, 1)
dl = DeepLift(model)
model.zero_grad()
attributions, delta = dl.attribute(
input, baseline, target=target, return_convergence_delta=True
)
self._assert_attributions(model, attributions, input, baseline, delta, target)
# compare with integrated gradients
ig = IntegratedGradients(model)
attributions_ig = ig.attribute(input, baseline, target=target)
assertAttributionComparision(self, (attributions,), (attributions_ig,))
def test_softmax_classification_zero_baseline(self) -> None:
num_in = 20
input = torch.arange(0.0, num_in * 1.0, requires_grad=True).unsqueeze(0)
baselines = 0.0
model = SoftmaxDeepLiftModel(num_in, 20, 10)
dl = DeepLift(model)
self.softmax_classification(model, dl, input, baselines, torch.tensor(2))
def test_softmax_classification_batch_zero_baseline(self) -> None:
num_in = 40
input = torch.arange(0.0, num_in * 3.0, requires_grad=True).reshape(3, num_in)
baselines = 0
model = SoftmaxDeepLiftModel(num_in, 20, 10)
dl = DeepLift(model)
self.softmax_classification(
model, dl, input, baselines, torch.tensor([2, 2, 2])
)
def test_softmax_classification_batch_multi_target(self) -> None:
num_in = 40
inputs = torch.arange(0.0, num_in * 3.0, requires_grad=True).reshape(3, num_in)
baselines = torch.arange(1.0, num_in + 1).reshape(1, num_in)
model = SoftmaxDeepLiftModel(num_in, 20, 10)
dl = DeepLift(model)
self.softmax_classification(
model, dl, inputs, baselines, torch.tensor([2, 2, 2])
)
def test_softmax_classification_multi_baseline(self) -> None:
num_in = 40
input = torch.arange(0.0, num_in * 1.0, requires_grad=True).unsqueeze(0)
baselines = torch.randn(5, 40)
model = SoftmaxDeepLiftModel(num_in, 20, 10)
dl = DeepLiftShap(model)
self.softmax_classification(model, dl, input, baselines, torch.tensor(2))
def test_softmax_classification_batch_multi_baseline(self) -> None:
num_in = 40
input = torch.arange(0.0, num_in * 2.0, requires_grad=True).reshape(2, num_in)
baselines = torch.randn(5, 40)
model = SoftmaxDeepLiftModel(num_in, 20, 10)
dl = DeepLiftShap(model)
self.softmax_classification(model, dl, input, baselines, torch.tensor(2))
def test_convnet_with_maxpool3d(self) -> None:
input = 100 * torch.randn(2, 1, 10, 10, 10, requires_grad=True)
baseline = 20 * torch.randn(2, 1, 10, 10, 10)
model = BasicModel_ConvNet_MaxPool3d()
dl = DeepLift(model)
self.softmax_classification(model, dl, input, baseline, torch.tensor(2))
def test_convnet_with_maxpool3d_large_baselines(self) -> None:
input = 100 * torch.randn(2, 1, 10, 10, 10, requires_grad=True)
baseline = 600 * torch.randn(2, 1, 10, 10, 10)
model = BasicModel_ConvNet_MaxPool3d()
dl = DeepLift(model)
self.softmax_classification(model, dl, input, baseline, torch.tensor(2))
def test_convnet_with_maxpool2d(self) -> None:
input = 100 * torch.randn(2, 1, 10, 10, requires_grad=True)
baseline = 20 * torch.randn(2, 1, 10, 10)
model = BasicModel_ConvNet()
dl = DeepLift(model)
self.softmax_classification(model, dl, input, baseline, torch.tensor(2))
def test_convnet_with_maxpool2d_large_baselines(self) -> None:
input = 100 * torch.randn(2, 1, 10, 10, requires_grad=True)
baseline = 500 * torch.randn(2, 1, 10, 10)
model = BasicModel_ConvNet()
dl = DeepLift(model)
self.softmax_classification(model, dl, input, baseline, torch.tensor(2))
def test_convnet_with_maxpool1d(self) -> None:
input = 100 * torch.randn(2, 1, 10, requires_grad=True)
baseline = 20 * torch.randn(2, 1, 10)
model = BasicModel_ConvNet_MaxPool1d()
dl = DeepLift(model)
self.softmax_classification(model, dl, input, baseline, torch.tensor(2))
def test_convnet_with_maxpool1d_large_baselines(self) -> None:
input = 100 * torch.randn(2, 1, 10, requires_grad=True)
baseline = 500 * torch.randn(2, 1, 10)
model = BasicModel_ConvNet_MaxPool1d()
dl = DeepLift(model)
self.softmax_classification(model, dl, input, baseline, torch.tensor(2))
def softmax_classification(
self,
model: Module,
attr_method: Union[DeepLift, DeepLiftShap],
input: Tensor,
baselines,
target: TargetType,
) -> None:
# TODO add test cases for multiple different layers
model.zero_grad()
attributions, delta = attr_method.attribute(
input, baselines=baselines, target=target, return_convergence_delta=True
)
self._assert_attributions(model, attributions, input, baselines, delta, target)
target2 = torch.tensor(1)
attributions, delta = attr_method.attribute(
input, baselines=baselines, target=target2, return_convergence_delta=True
)
self._assert_attributions(model, attributions, input, baselines, delta, target2)
def _assert_attributions(
self,
model: Module,
attributions: Tensor,
inputs: Tensor,
baselines: Union[Tensor, int, float],
delta: Tensor,
target: TargetType = None,
) -> None:
self.assertEqual(inputs.shape, attributions.shape)
delta_condition = (delta.abs() < 0.003).all()
self.assertTrue(
delta_condition,
"The sum of attribution values {} is not "
"nearly equal to the difference between the endpoint for "
"some samples".format(delta),
)
# compare with integrated gradients
if isinstance(baselines, (int, float)) or inputs.shape == baselines.shape:
ig = IntegratedGradients(model)
attributions_ig = ig.attribute(inputs, baselines=baselines, target=target)
assertAttributionComparision(self, attributions, attributions_ig)
|
#!/usr/bin/env python3
from typing import List, Tuple
import torch
from captum.attr._core.feature_permutation import _permute_feature, FeaturePermutation
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModelWithSparseInputs
from torch import Tensor
class Test(BaseTest):
def _check_features_are_permuted(
self, inp: Tensor, perm_inp: Tensor, mask: Tensor
) -> None:
permuted_features = mask.expand_as(inp[0])
unpermuted_features = permuted_features.bitwise_not()
self.assertTrue(inp.dtype == perm_inp.dtype)
self.assertTrue(inp.shape == perm_inp.shape)
self.assertTrue(
(inp[:, permuted_features] != perm_inp[:, permuted_features]).any()
)
self.assertTrue(
(inp[:, unpermuted_features] == perm_inp[:, unpermuted_features]).all()
)
def _check_perm_fn_with_mask(self, inp: Tensor, mask: Tensor) -> None:
perm_inp = _permute_feature(inp, mask)
self._check_features_are_permuted(inp, perm_inp, mask)
def test_perm_fn_single_feature(self) -> None:
batch_size = 2
sizes_to_test: List[Tuple[int, ...]] = [(10,), (4, 5), (3, 4, 5)]
for inp_size in sizes_to_test:
inp = torch.randn((batch_size,) + inp_size)
flat_mask = torch.zeros_like(inp[0]).flatten().bool()
num_features = inp.numel() // batch_size
for i in range(num_features):
flat_mask[i] = 1
self._check_perm_fn_with_mask(inp, flat_mask.view_as(inp[0]))
flat_mask[i] = 0
def test_perm_fn_broadcastable_masks(self) -> None:
batch_size = 5
inp_size = (3, 20, 30)
inp = torch.randn((batch_size,) + inp_size)
# To be broadcastable dimensions have
# match from end to beginning, by equalling 1 or the dim.
#
# If a dimension is missing then it must be the
# last dim provided (from right to left). The missing
# dimensions are implied to be = 1
#
# Here I write them explicitly for clarity
mask_sizes: List[Tuple[int, ...]] = [
# dims = 1
(1, 20, 30),
(3, 1, 30),
(3, 20, 1),
(1, 1, 30),
(1, 20, 1),
# missing
(1,), # empty set (all features)
(30,),
(20, 30),
(3, 20, 30),
]
for mask_size in mask_sizes:
mask = torch.randint(0, 2, mask_size).bool()
self.assertTrue(mask.shape == mask_size)
self._check_perm_fn_with_mask(inp, mask)
def test_single_input(self) -> None:
batch_size = 2
input_size = (6,)
constant_value = 10000
def forward_func(x: Tensor) -> Tensor:
return x.sum(dim=-1)
feature_importance = FeaturePermutation(forward_func=forward_func)
inp = torch.randn((batch_size,) + input_size)
inp[:, 0] = constant_value
zeros = torch.zeros_like(inp[:, 0])
attribs = feature_importance.attribute(inp)
self.assertTrue(attribs.squeeze(0).size() == (batch_size,) + input_size)
assertTensorAlmostEqual(self, attribs[:, 0], zeros, delta=0.05, mode="max")
self.assertTrue((attribs[:, 1 : input_size[0]].abs() > 0).all())
def test_multi_input(self) -> None:
batch_size = 20
inp1_size = (5, 2)
inp2_size = (5, 3)
labels = torch.randn(batch_size)
def forward_func(*x: Tensor) -> Tensor:
y = torch.zeros(x[0].shape[0:2])
for xx in x:
y += xx[:, :, 0] * xx[:, :, 1]
y = y.sum(dim=-1)
return torch.mean((y - labels) ** 2)
feature_importance = FeaturePermutation(forward_func=forward_func)
inp = (
torch.randn((batch_size,) + inp1_size),
torch.randn((batch_size,) + inp2_size),
)
feature_mask = (
torch.arange(inp[0][0].numel()).view_as(inp[0][0]).unsqueeze(0),
torch.arange(inp[1][0].numel()).view_as(inp[1][0]).unsqueeze(0),
)
inp[1][:, :, 1] = 4
attribs = feature_importance.attribute(inp, feature_mask=feature_mask)
self.assertTrue(isinstance(attribs, tuple))
self.assertTrue(len(attribs) == 2)
self.assertTrue(attribs[0].squeeze(0).size() == inp1_size)
self.assertTrue(attribs[1].squeeze(0).size() == inp2_size)
self.assertTrue((attribs[1][:, :, 1] == 0).all())
self.assertTrue((attribs[1][:, :, 2] == 0).all())
self.assertTrue((attribs[0] != 0).all())
self.assertTrue((attribs[1][:, :, 0] != 0).all())
def test_mulitple_perturbations_per_eval(self) -> None:
perturbations_per_eval = 4
batch_size = 2
input_size = (4,)
inp = torch.randn((batch_size,) + input_size)
def forward_func(x):
return 1 - x
target = 1
feature_importance = FeaturePermutation(forward_func=forward_func)
attribs = feature_importance.attribute(
inp, perturbations_per_eval=perturbations_per_eval, target=target
)
self.assertTrue(attribs.size() == (batch_size,) + input_size)
for i in range(inp.size(1)):
if i == target:
continue
assertTensorAlmostEqual(
self, attribs[:, i], torch.zeros_like(attribs[:, i])
)
y = forward_func(inp)
actual_diff = torch.stack([(y[0] - y[1])[target], (y[1] - y[0])[target]])
assertTensorAlmostEqual(self, attribs[:, target], actual_diff)
def test_broadcastable_masks(self) -> None:
# integration test to ensure that
# permutation function works with custom masks
def forward_func(x: Tensor) -> Tensor:
return x.view(x.shape[0], -1).sum(dim=-1)
batch_size = 2
inp = torch.randn((batch_size,) + (3, 4, 4))
feature_importance = FeaturePermutation(forward_func=forward_func)
masks = [
torch.tensor([0]),
torch.tensor([[0, 1, 2, 3]]),
torch.tensor([[[0, 1, 2, 3], [3, 3, 4, 5], [6, 6, 4, 6], [7, 8, 9, 10]]]),
]
for mask in masks:
attribs = feature_importance.attribute(inp, feature_mask=mask)
self.assertTrue(attribs is not None)
self.assertTrue(attribs.shape == inp.shape)
fm = mask.expand_as(inp[0])
features = set(mask.flatten())
for feature in features:
m = (fm == feature).bool()
attribs_for_feature = attribs[:, m]
assertTensorAlmostEqual(
self,
attribs_for_feature[0],
-attribs_for_feature[1],
delta=0.05,
mode="max",
)
def test_empty_sparse_features(self) -> None:
model = BasicModelWithSparseInputs()
inp1 = torch.tensor([[1.0, -2.0, 3.0], [2.0, -1.0, 3.0]])
inp2 = torch.tensor([])
# test empty sparse tensor
feature_importance = FeaturePermutation(model)
attr1, attr2 = feature_importance.attribute((inp1, inp2))
self.assertEqual(attr1.shape, (1, 3))
self.assertEqual(attr2.shape, (1,))
def test_sparse_features(self) -> None:
model = BasicModelWithSparseInputs()
inp1 = torch.tensor([[1.0, -2.0, 3.0], [2.0, -1.0, 3.0]])
# Length of sparse index list may not match # of examples
inp2 = torch.tensor([1, 7, 2, 4, 5, 3, 6])
feature_importance = FeaturePermutation(model)
total_attr1, total_attr2 = feature_importance.attribute((inp1, inp2))
for _ in range(50):
attr1, attr2 = feature_importance.attribute((inp1, inp2))
total_attr1 += attr1
total_attr2 += attr2
total_attr1 /= 50
total_attr2 /= 50
self.assertEqual(total_attr2.shape, (1,))
assertTensorAlmostEqual(self, total_attr1, torch.zeros_like(total_attr1))
assertTensorAlmostEqual(self, total_attr2, [-6.0], delta=0.2)
|
#!/usr/bin/env python3
import torch
from captum.attr import ClassSummarizer, CommonStats
from tests.helpers.basic import BaseTest
class Test(BaseTest):
def class_test(self, data, classes, x_sizes):
summarizer = ClassSummarizer(stats=CommonStats())
for x, y in data:
summarizer.update(x, y)
summ = summarizer.summary
self.assertIsNotNone(summ)
self.assertIsInstance(summ, list)
for s, size in zip(summ, x_sizes):
self.assertIsInstance(s, dict)
for key in s:
self.assertEqual(s[key].size(), size)
self.assertIsNotNone(summarizer.class_summaries)
all_classes = torch.zeros(len(classes))
class_summaries = summarizer.class_summaries
all_keys = set(class_summaries.keys())
for i, clazz in enumerate(classes):
self.assertTrue(clazz in class_summaries)
all_keys.remove(clazz)
all_classes[i] = 1
summ = class_summaries[clazz]
self.assertIsNotNone(summ)
self.assertIsInstance(summ, list)
for s, size in zip(summ, x_sizes):
self.assertIsInstance(s, dict)
for key in s:
self.assertEqual(s[key].size(), size)
self.assertEqual(len(all_keys), 0)
self.assertEqual(all_classes.sum(), len(classes))
def test_classes(self):
sizes_to_test = [
# ((1,),),
((3, 2, 10, 3), (1,)),
# ((20,),),
]
list_of_classes = [
list(range(100)),
["%d" % i for i in range(100)],
list(range(300, 400)),
]
for batch_size in [None, 1, 4]:
for sizes, classes in zip(sizes_to_test, list_of_classes):
def create_batch_labels(batch_idx):
if batch_size is None:
# batch_size = 1
return classes[batch_idx]
return classes[
batch_idx * batch_size : (batch_idx + 1) * batch_size
]
bs = 1 if batch_size is None else batch_size
num_batches = len(classes) // bs
sizes_plus_batch = tuple((bs,) + si for si in sizes)
data = [
(
tuple(torch.randn(si) for si in sizes_plus_batch),
create_batch_labels(batch_idx),
)
for batch_idx in range(num_batches)
]
with self.subTest(
batch_size=batch_size, sizes=sizes_plus_batch, classes=classes
):
self.class_test(data, classes, sizes)
def test_no_class(self) -> None:
size = (30, 20)
summarizer = ClassSummarizer(stats=CommonStats())
for _ in range(10):
x = torch.randn(size)
summarizer.update(x)
summ = summarizer.summary
self.assertIsNotNone(summ)
self.assertIsInstance(summ, dict)
for key in summ:
self.assertTrue(summ[key].size() == size)
self.assertIsNotNone(summarizer.class_summaries)
self.assertIsInstance(summarizer.class_summaries, dict)
self.assertEqual(len(summarizer.class_summaries), 0)
def test_single_label(self) -> None:
size = (4, 3, 2, 1)
data = torch.randn((100,) + size)
single_labels = [1, "apple"]
for label in single_labels:
summarizer = ClassSummarizer(stats=CommonStats())
summarizer.update(data, label)
summ1 = summarizer.summary
summ2 = summarizer.class_summaries
self.assertIsNotNone(summ1)
self.assertIsNotNone(summ2)
self.assertIsInstance(summ1, list)
self.assertTrue(len(summ1) == 1)
self.assertIsInstance(summ2, dict)
self.assertTrue(label in summ2)
self.assertTrue(len(summ1) == len(summ2[label]))
for key in summ1[0].keys():
self.assertTrue((summ1[0][key] == summ2[label][0][key]).all())
|
#!/usr/bin/env python3
import torch
from captum.attr import CommonStats, Summarizer
from tests.helpers.basic import BaseTest
class Test(BaseTest):
def test_single_input(self) -> None:
size = (2, 3)
summarizer = Summarizer(stats=CommonStats())
for _ in range(10):
attrs = torch.randn(size)
summarizer.update(attrs)
summ = summarizer.summary
self.assertIsNotNone(summ)
self.assertTrue(isinstance(summ, dict))
for k in summ:
self.assertTrue(summ[k].size() == size)
def test_multi_input(self) -> None:
size1 = (10, 5, 5)
size2 = (3, 5)
summarizer = Summarizer(stats=CommonStats())
for _ in range(10):
a1 = torch.randn(size1)
a2 = torch.randn(size2)
summarizer.update((a1, a2))
summ = summarizer.summary
self.assertIsNotNone(summ)
self.assertTrue(len(summ) == 2)
self.assertTrue(isinstance(summ[0], dict))
self.assertTrue(isinstance(summ[1], dict))
for k in summ[0]:
self.assertTrue(summ[0][k].size() == size1)
self.assertTrue(summ[1][k].size() == size2)
|
#!/usr/bin/env python3
import copy
import os
from enum import Enum
from typing import Any, Callable, cast, Dict, Optional, Tuple, Type
import torch
import torch.distributed as dist
from captum.attr._core.guided_grad_cam import GuidedGradCam
from captum.attr._core.layer.layer_deep_lift import LayerDeepLift, LayerDeepLiftShap
from captum.attr._core.layer.layer_lrp import LayerLRP
from captum.attr._core.neuron.neuron_deep_lift import NeuronDeepLift, NeuronDeepLiftShap
from captum.attr._core.neuron.neuron_guided_backprop_deconvnet import (
NeuronDeconvolution,
NeuronGuidedBackprop,
)
from captum.attr._core.noise_tunnel import NoiseTunnel
from captum.attr._utils.attribution import Attribution, InternalAttribution
from tests.attr.helpers.gen_test_utils import (
gen_test_name,
get_target_layer,
parse_test_config,
should_create_generated_test,
)
from tests.attr.helpers.test_config import config
from tests.helpers.basic import assertTensorTuplesAlmostEqual, BaseTest, deep_copy_args
from torch import Tensor
from torch.nn import Module
"""
Tests in this file are dynamically generated based on the config
defined in tests/attr/helpers/test_config.py. To add new test cases,
read the documentation in test_config.py and add cases based on the
schema described there.
"""
# Distributed Data Parallel env setup
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = "29500"
dist.init_process_group(backend="gloo", rank=0, world_size=1)
class DataParallelCompareMode(Enum):
"""
Defines modes for DataParallel tests:
`cpu_cuda` - Compares results when running attribution method on CPU vs GPU / CUDA
`data_parallel_default` - Compares results when running attribution method on GPU
with DataParallel
`data_parallel_alt_dev_ids` - Compares results when running attribution method on
GPU with DataParallel, but with an alternate device ID ordering (not default)
"""
cpu_cuda = 1
data_parallel_default = 2
data_parallel_alt_dev_ids = 3
dist_data_parallel = 4
class DataParallelMeta(type):
def __new__(cls, name: str, bases: Tuple, attrs: Dict):
for test_config in config:
(
algorithms,
model,
args,
layer,
noise_tunnel,
baseline_distr,
) = parse_test_config(test_config)
dp_delta = test_config["dp_delta"] if "dp_delta" in test_config else 0.0001
for algorithm in algorithms:
if not should_create_generated_test(algorithm):
continue
for mode in DataParallelCompareMode:
# Creates test case corresponding to each algorithm and
# DataParallelCompareMode
test_method = cls.make_single_dp_test(
algorithm,
model,
layer,
args,
dp_delta,
noise_tunnel,
baseline_distr,
mode,
)
test_name = gen_test_name(
"test_dp_" + mode.name,
cast(str, test_config["name"]),
algorithm,
noise_tunnel,
)
if test_name in attrs:
raise AssertionError(
"Trying to overwrite existing test with name: %r"
% test_name
)
attrs[test_name] = test_method
return super(DataParallelMeta, cls).__new__(cls, name, bases, attrs)
# Arguments are deep copied to ensure tests are independent and are not affected
# by any modifications within a previous test.
@classmethod
@deep_copy_args
def make_single_dp_test(
cls,
algorithm: Type[Attribution],
model: Module,
target_layer: Optional[str],
args: Dict[str, Any],
dp_delta: float,
noise_tunnel: bool,
baseline_distr: bool,
mode: DataParallelCompareMode,
) -> Callable:
"""
This method creates a single Data Parallel / GPU test for the given
algorithm and parameters.
"""
def data_parallel_test_assert(self) -> None:
# Construct cuda_args, moving all tensor inputs in args to CUDA device
cuda_args = {}
for key in args:
if isinstance(args[key], Tensor):
cuda_args[key] = args[key].cuda()
elif isinstance(args[key], tuple):
cuda_args[key] = tuple(
elem.cuda() if isinstance(elem, Tensor) else elem
for elem in args[key]
)
else:
cuda_args[key] = args[key]
alt_device_ids = None
cuda_model = copy.deepcopy(model).cuda()
# Initialize models based on DataParallelCompareMode
if mode is DataParallelCompareMode.cpu_cuda:
model_1, model_2 = model, cuda_model
args_1, args_2 = args, cuda_args
elif mode is DataParallelCompareMode.data_parallel_default:
model_1, model_2 = (
cuda_model,
torch.nn.parallel.DataParallel(cuda_model),
)
args_1, args_2 = cuda_args, cuda_args
elif mode is DataParallelCompareMode.data_parallel_alt_dev_ids:
alt_device_ids = [0] + [
x for x in range(torch.cuda.device_count() - 1, 0, -1)
]
model_1, model_2 = (
cuda_model,
torch.nn.parallel.DataParallel(
cuda_model, device_ids=alt_device_ids
),
)
args_1, args_2 = cuda_args, cuda_args
elif mode is DataParallelCompareMode.dist_data_parallel:
model_1, model_2 = (
cuda_model,
torch.nn.parallel.DistributedDataParallel(
cuda_model, device_ids=[0], output_device=0
),
)
args_1, args_2 = cuda_args, cuda_args
else:
raise AssertionError("DataParallel compare mode type is not valid.")
attr_method_1: Attribution
attr_method_2: Attribution
if target_layer:
internal_algorithm = cast(Type[InternalAttribution], algorithm)
attr_method_1 = internal_algorithm(
model_1, get_target_layer(model_1, target_layer)
)
# cuda_model is used to obtain target_layer since DataParallel
# adds additional wrapper.
# model_2 is always either the CUDA model itself or DataParallel
if alt_device_ids is None:
attr_method_2 = internal_algorithm(
model_2, get_target_layer(cuda_model, target_layer)
)
else:
# LayerDeepLift and LayerDeepLiftShap do not take device ids
# as a parameter, since they must always have the DataParallel
# model object directly.
# Some neuron methods and GuidedGradCAM also require the
# model and cannot take a forward function.
if issubclass(
internal_algorithm,
(
LayerDeepLift,
LayerDeepLiftShap,
LayerLRP,
NeuronDeepLift,
NeuronDeepLiftShap,
NeuronDeconvolution,
NeuronGuidedBackprop,
GuidedGradCam,
),
):
attr_method_2 = internal_algorithm(
model_2,
get_target_layer(cuda_model, target_layer), # type: ignore
)
else:
attr_method_2 = internal_algorithm(
model_2.forward,
get_target_layer(cuda_model, target_layer),
device_ids=alt_device_ids,
)
else:
attr_method_1 = algorithm(model_1)
attr_method_2 = algorithm(model_2)
if noise_tunnel:
attr_method_1 = NoiseTunnel(attr_method_1)
attr_method_2 = NoiseTunnel(attr_method_2)
if attr_method_1.has_convergence_delta():
attributions_1, delta_1 = attr_method_1.attribute(
return_convergence_delta=True, **args_1
)
self.setUp()
attributions_2, delta_2 = attr_method_2.attribute(
return_convergence_delta=True, **args_2
)
if isinstance(attributions_1, list):
for i in range(len(attributions_1)):
assertTensorTuplesAlmostEqual(
self,
attributions_1[i],
attributions_2[i],
mode="max",
delta=dp_delta,
)
else:
assertTensorTuplesAlmostEqual(
self, attributions_1, attributions_2, mode="max", delta=dp_delta
)
assertTensorTuplesAlmostEqual(
self, delta_1, delta_2, mode="max", delta=dp_delta
)
else:
attributions_1 = attr_method_1.attribute(**args_1)
self.setUp()
attributions_2 = attr_method_2.attribute(**args_2)
if isinstance(attributions_1, list):
for i in range(len(attributions_1)):
assertTensorTuplesAlmostEqual(
self,
attributions_1[i],
attributions_2[i],
mode="max",
delta=dp_delta,
)
else:
assertTensorTuplesAlmostEqual(
self, attributions_1, attributions_2, mode="max", delta=dp_delta
)
return data_parallel_test_assert
if torch.cuda.is_available() and torch.cuda.device_count() != 0:
class DataParallelTest(BaseTest, metaclass=DataParallelMeta):
@classmethod
def tearDownClass(cls):
if torch.distributed.is_initialized():
dist.destroy_process_group()
|
#!/usr/bin/env python3
import io
import unittest
import unittest.mock
from functools import partial
from typing import Any, Callable, Generator, List, Optional, Tuple, Union
import torch
from captum._utils.models.linear_model import SGDLasso, SkLearnLasso
from captum._utils.models.model import Model
from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.lime import get_exp_kernel_similarity_function, Lime, LimeBase
from captum.attr._utils.batching import _batch_example_iterator
from captum.attr._utils.common import (
_construct_default_feature_mask,
_format_input_baseline,
_format_tensor_into_tuples,
)
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
)
from tests.helpers.basic_models import (
BasicLinearModel,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
BasicModelBoolInput,
)
from torch import Tensor
def alt_perturb_func(
original_inp: TensorOrTupleOfTensorsGeneric, **kwargs
) -> TensorOrTupleOfTensorsGeneric:
if isinstance(original_inp, Tensor):
device = original_inp.device
else:
device = original_inp[0].device
feature_mask = kwargs["feature_mask"]
probs = torch.ones(1, kwargs["num_interp_features"]) * 0.5
curr_sample = torch.bernoulli(probs).to(device=device)
binary_mask: TensorOrTupleOfTensorsGeneric
if isinstance(original_inp, Tensor):
binary_mask = curr_sample[0][feature_mask]
return binary_mask * original_inp + (1 - binary_mask) * kwargs["baselines"]
else:
binary_mask = tuple(
curr_sample[0][feature_mask[j]] for j in range(len(feature_mask))
)
return tuple(
binary_mask[j] * original_inp[j]
+ (1 - binary_mask[j]) * kwargs["baselines"][j]
for j in range(len(feature_mask))
)
def alt_perturb_generator(
original_inp: TensorOrTupleOfTensorsGeneric, **kwargs
) -> Generator[TensorOrTupleOfTensorsGeneric, None, None]:
while True:
yield alt_perturb_func(original_inp, **kwargs)
def alt_to_interp_rep(
curr_sample: TensorOrTupleOfTensorsGeneric,
original_input: TensorOrTupleOfTensorsGeneric,
**kwargs: Any,
) -> Tensor:
binary_vector = torch.zeros(1, kwargs["num_interp_features"])
feature_mask = kwargs["feature_mask"]
for i in range(kwargs["num_interp_features"]):
curr_total = 1
if isinstance(curr_sample, Tensor):
if (
torch.sum(
torch.abs(
(feature_mask == i).float() * (curr_sample - original_input)
)
)
> 0.001
):
curr_total = 0
else:
sum_diff = sum(
torch.sum(torch.abs((mask == i).float() * (sample - inp)))
for inp, sample, mask in zip(original_input, curr_sample, feature_mask)
)
if sum_diff > 0.001:
curr_total = 0
binary_vector[0][i] = curr_total
return binary_vector
class Test(BaseTest):
def setUp(self) -> None:
super().setUp()
try:
import sklearn # noqa: F401
assert (
sklearn.__version__ >= "0.23.0"
), "Must have sklearn version 0.23.0 or higher to use "
"sample_weight in Lasso regression."
except (ImportError, AssertionError):
raise unittest.SkipTest("Skipping Lime tests, sklearn not available.")
def test_simple_lime(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._lime_test_assert(
net,
inp,
[[73.3716, 193.3349, 113.3349]],
perturbations_per_eval=(1, 2, 3),
n_samples=500,
expected_coefs_only=[[73.3716, 193.3349, 113.3349]],
test_generator=True,
)
def test_simple_lime_sgd_model(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
interpretable_model = SGDLasso()
interpretable_model.fit = partial( # type: ignore
interpretable_model.fit, initial_lr=0.1, max_epoch=500
)
self._lime_test_assert(
net,
inp,
[[73.3716, 193.3349, 113.3349]],
n_samples=1000,
expected_coefs_only=[[73.3716, 193.3349, 113.3349]],
interpretable_model=interpretable_model,
)
def test_simple_lime_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._lime_test_assert(
net,
inp,
[[271.0, 271.0, 111.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
n_samples=500,
expected_coefs_only=[[271.0, 111.0]],
)
def test_simple_lime_with_baselines(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]])
self._lime_test_assert(
net,
inp,
[[244.0, 244.0, 100.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
baselines=4,
perturbations_per_eval=(1, 2, 3),
expected_coefs_only=[[244.0, 100.0]],
test_generator=True,
)
def test_simple_lime_boolean(self) -> None:
net = BasicModelBoolInput()
inp = torch.tensor([[True, False, True]])
self._lime_test_assert(
net,
inp,
[[31.42, 31.42, 30.90]],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
test_generator=True,
)
def test_simple_lime_boolean_with_baselines(self) -> None:
net = BasicModelBoolInput()
inp = torch.tensor([[True, False, True]])
self._lime_test_assert(
net,
inp,
[[-36.0, -36.0, 0.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
baselines=True,
perturbations_per_eval=(1, 2, 3),
test_generator=True,
)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_simple_lime_with_show_progress(self, mock_stderr) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
# test progress output for each batch size
for bsz in (1, 2, 3):
self._lime_test_assert(
net,
inp,
[[73.3716, 193.3349, 113.3349]],
perturbations_per_eval=(bsz,),
n_samples=500,
test_generator=True,
show_progress=True,
)
output = mock_stderr.getvalue()
# to test if progress calculation aligns with the actual iteration
# all perturbations_per_eval should reach progress of 100%
assert (
"Lime attribution: 100%" in output
), f"Error progress output: {repr(output)}"
mock_stderr.seek(0)
mock_stderr.truncate(0)
def test_simple_batch_lime(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0], [10.0, 14.0, 4.0]], requires_grad=True)
self._lime_test_assert(
net,
inp,
[[73.4450, 193.5979, 113.4363], [32.11, 48.00, 11.00]],
perturbations_per_eval=(1, 2, 3),
n_samples=800,
expected_coefs_only=[[73.4450, 193.5979, 113.4363], [32.11, 48.00, 11.00]],
)
def test_simple_batch_lime_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0], [10.0, 14.0, 4.0]], requires_grad=True)
self._lime_test_assert(
net,
inp,
[[271.0, 271.0, 111.0], [32.11, 48.00, 11.00]],
feature_mask=torch.tensor([[0, 0, 1], [0, 1, 2]]),
perturbations_per_eval=(1, 2, 3),
n_samples=600,
expected_coefs_only=[[271.0, 111.0, 0.0], [32.11, 48.00, 11.00]],
test_generator=True,
)
def test_multi_input_lime_without_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 0.0, 0.0]])
inp2 = torch.tensor([[20.0, 0.0, 50.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0]])
expected = (
[[87, 0, 0]],
[[75, 0, 195]],
[[0, 395, 35]],
)
self._lime_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
n_samples=2000,
expected_coefs_only=[[87, 0, 0, 75, 0, 195, 0, 395, 35]],
)
def test_multi_input_lime_with_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[20.0, 50.0, 30.0]])
inp2 = torch.tensor([[0.0, 100.0, 0.0]])
inp3 = torch.tensor([[2.0, 10.0, 3.0]])
mask1 = torch.tensor([[0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 0, 0]])
expected = (
[[251.0, 591.0, 251.0]],
[[251.0, 591.0, 0.0]],
[[251.0, 251.0, 251.0]],
)
self._lime_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
n_samples=500,
expected_coefs_only=[[251.0, 591.0, 0.0]],
)
expected_with_baseline = (
[[180, 576.0, 180]],
[[180, 576.0, -8.0]],
[[180, 180, 180]],
)
self._lime_test_assert(
net,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
n_samples=500,
expected_coefs_only=[[180, 576.0, -8.0]],
test_generator=True,
)
def test_multi_input_lime_with_empty_input(self) -> None:
net = BasicLinearModel()
inp1 = torch.tensor([[23.0, 0.0, 0.0, 23.0, 0.0, 0.0, 23.0]])
inp2 = torch.tensor([[]]) # empty input
mask1 = torch.tensor([[0, 1, 2, 3, 4, 5, 6]])
mask2 = torch.tensor([[]], dtype=torch.long) # empty mask
expected: Tuple[List[List[float]], ...] = (
[[-4.0, 0, 0, 0, 0, 0, -4.0]],
[[]],
)
# no mask
self._lime_test_assert(
net,
(inp1, inp2),
expected,
n_samples=2000,
expected_coefs_only=[[-4.0, 0, 0, 0, 0, 0, -4.0]],
)
# with mask
self._lime_test_assert(
net,
(inp1, inp2),
expected,
n_samples=2000,
expected_coefs_only=[[-4.0, 0, 0, 0, 0, 0, -4.0]],
feature_mask=(mask1, mask2),
)
def test_multi_input_batch_lime_without_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 0.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 0.0, 50.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [0.0, 10.0, 0.0]])
expected = (
[[87.8777, 0.0000, 0.0000], [75.8461, 195.6842, 115.3390]],
[[74.7283, 0.0000, 195.1708], [0.0000, 395.3823, 0.0000]],
[[0.0000, 395.5216, 35.5530], [0.0000, 35.1349, 0.0000]],
)
self._lime_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
n_samples=1000,
expected_coefs_only=[
[87.8777, 0.0, 0.0, 74.7283, 0.0, 195.1708, 0.0, 395.5216, 35.5530],
[
75.8461,
195.6842,
115.3390,
0.0000,
395.3823,
0.0000,
0.0000,
35.1349,
0.0000,
],
],
delta=1.2,
)
def test_multi_input_batch_lime(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
mask1 = torch.tensor([[1, 1, 1], [0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2], [0, 0, 0]])
expected = (
[[1086.2802, 1086.2802, 1086.2802], [250.8907, 590.9789, 250.8907]],
[[73.2166, 1086.2802, 152.6888], [250.8907, 590.9789, 0.0000]],
[[73.2166, 1086.2802, 152.6888], [250.8907, 250.8907, 250.8907]],
)
self._lime_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
)
expected_with_baseline = (
[[1036.4233, 1036.4233, 1036.4233], [180.3035, 575.8969, 180.3035]],
[[48.2441, 1036.4233, 128.3161], [180.3035, 575.8969, -8.3229]],
[[48.2441, 1036.4233, 128.3161], [180.3035, 180.3035, 180.3035]],
)
self._lime_test_assert(
net,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
expected_coefs_only=[
[48.2441, 1036.4233, 128.3161],
[180.3035, 575.8969, -8.3229],
],
n_samples=500,
test_generator=True,
)
# Remaining tests are for cases where forward function returns a scalar
# as either a float, integer, 0d tensor or 1d tensor.
def test_single_lime_scalar_float(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_lime_assert(lambda inp: torch.sum(net(inp)).item())
def test_single_lime_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_lime_assert(lambda inp: torch.sum(net(inp)))
def test_single_lime_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_lime_assert(
lambda inp: torch.sum(net(inp)).reshape(1)
)
def test_single_lime_scalar_int(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_lime_assert(
lambda inp: int(torch.sum(net(inp)).item())
)
def _single_input_scalar_lime_assert(self, func: Callable) -> None:
inp = torch.tensor([[2.0, 10.0, 3.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1]])
self._lime_test_assert(
func,
inp,
[[75.0, 75.0, 17.0]],
feature_mask=mask,
perturbations_per_eval=(1,),
target=None,
expected_coefs_only=[[75.0, 17.0]],
n_samples=700,
)
def test_multi_inp_lime_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_lime_assert(lambda *inp: torch.sum(net(*inp)))
def test_multi_inp_lime_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_lime_assert(
lambda *inp: torch.sum(net(*inp)).reshape(1)
)
def test_multi_inp_lime_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_lime_assert(
lambda *inp: int(torch.sum(net(*inp)).item())
)
def test_multi_inp_lime_scalar_float(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_lime_assert(lambda *inp: torch.sum(net(*inp)).item())
def _multi_input_scalar_lime_assert(self, func: Callable) -> None:
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [20.0, 10.0, 13.0]])
mask1 = torch.tensor([[1, 1, 1]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2]])
expected = (
[[3850.6666, 3850.6666, 3850.6666]] * 2,
[[305.5, 3850.6666, 410.1]] * 2,
[[305.5, 3850.6666, 410.1]] * 2,
)
self._lime_test_assert(
func,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
perturbations_per_eval=(1,),
target=None,
n_samples=1500,
expected_coefs_only=[[305.5, 3850.6666, 410.1]],
delta=1.5,
batch_attr=True,
test_generator=True,
)
def _lime_test_assert(
self,
model: Callable,
test_input: TensorOrTupleOfTensorsGeneric,
expected_attr,
expected_coefs_only=None,
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
additional_input: Any = None,
perturbations_per_eval: Tuple[int, ...] = (1,),
baselines: BaselineType = None,
target: Union[None, int] = 0,
n_samples: int = 100,
delta: float = 1.0,
batch_attr: bool = False,
test_generator: bool = False,
show_progress: bool = False,
interpretable_model: Optional[Model] = None,
) -> None:
for batch_size in perturbations_per_eval:
lime = Lime(
model,
similarity_func=get_exp_kernel_similarity_function("cosine", 10.0),
interpretable_model=interpretable_model
if interpretable_model
else SkLearnLasso(alpha=1.0),
)
attributions = lime.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
show_progress=show_progress,
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_attr, delta=delta, mode="max"
)
if expected_coefs_only is not None:
# Test with return_input_shape = False
attributions = lime.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
return_input_shape=False,
show_progress=show_progress,
)
assertTensorAlmostEqual(
self, attributions, expected_coefs_only, delta=delta, mode="max"
)
lime_alt = LimeBase(
model,
interpretable_model
if interpretable_model
else SkLearnLasso(alpha=1.0),
get_exp_kernel_similarity_function("euclidean", 1000.0),
alt_perturb_generator if test_generator else alt_perturb_func,
False,
None,
alt_to_interp_rep,
)
# Test with equivalent sampling in original input space
formatted_inputs, baselines = _format_input_baseline(
test_input, baselines
)
if feature_mask is None:
(
formatted_feature_mask,
num_interp_features,
) = _construct_default_feature_mask(formatted_inputs)
else:
formatted_feature_mask = _format_tensor_into_tuples(feature_mask)
num_interp_features = int(
max(
torch.max(single_mask).item()
for single_mask in feature_mask
if single_mask.numel()
)
+ 1
)
if batch_attr:
attributions = lime_alt.attribute(
test_input,
target=target,
feature_mask=formatted_feature_mask
if isinstance(test_input, tuple)
else formatted_feature_mask[0],
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
num_interp_features=num_interp_features,
show_progress=show_progress,
)
assertTensorAlmostEqual(
self, attributions, expected_coefs_only, delta=delta, mode="max"
)
return
bsz = formatted_inputs[0].shape[0]
for (
curr_inps,
curr_target,
curr_additional_args,
curr_baselines,
curr_feature_mask,
expected_coef_single,
) in _batch_example_iterator(
bsz,
test_input,
target,
additional_input,
baselines if isinstance(test_input, tuple) else baselines[0],
formatted_feature_mask
if isinstance(test_input, tuple)
else formatted_feature_mask[0],
expected_coefs_only,
):
attributions = lime_alt.attribute(
curr_inps,
target=curr_target,
feature_mask=curr_feature_mask,
additional_forward_args=curr_additional_args,
baselines=curr_baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
num_interp_features=num_interp_features,
show_progress=show_progress,
)
assertTensorAlmostEqual(
self,
attributions,
expected_coef_single,
delta=delta,
mode="max",
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import unittest
from enum import Enum
from typing import Any, Callable, cast, Dict, Tuple, Type
import torch
from captum._utils.common import (
_format_additional_forward_args,
_format_tensor_into_tuples,
)
from captum.attr._core.feature_ablation import FeatureAblation
from captum.attr._core.feature_permutation import FeaturePermutation
from captum.attr._core.gradient_shap import GradientShap
from captum.attr._core.input_x_gradient import InputXGradient
from captum.attr._core.integrated_gradients import IntegratedGradients
from captum.attr._core.kernel_shap import KernelShap
from captum.attr._core.lime import Lime
from captum.attr._core.noise_tunnel import NoiseTunnel
from captum.attr._core.occlusion import Occlusion
from captum.attr._core.saliency import Saliency
from captum.attr._core.shapley_value import ShapleyValueSampling
from captum.attr._utils.attribution import Attribution
from tests.attr.helpers.gen_test_utils import (
gen_test_name,
parse_test_config,
should_create_generated_test,
)
from tests.attr.helpers.test_config import config
from tests.helpers.basic import assertTensorTuplesAlmostEqual, BaseTest, deep_copy_args
from torch import Tensor
from torch.nn import Module
JIT_SUPPORTED = [
IntegratedGradients,
FeatureAblation,
FeaturePermutation,
GradientShap,
InputXGradient,
Occlusion,
Saliency,
ShapleyValueSampling,
Lime,
KernelShap,
]
"""
Tests in this file are dynamically generated based on the config
defined in tests/attr/helpers/test_config.py. To add new test cases,
read the documentation in test_config.py and add cases based on the
schema described there.
"""
class JITCompareMode(Enum):
"""
Defines modes for JIT tests:
`cpu_jit_trace` - Compares results of running the test case with a standard model
on CPU with the result of JIT tracing the model and computing attributions
`cpu_jit_script` - Compares results of running the test case with a standard model
on CPU with the result of JIT scripting the model and computing attributions
`data_parallel_jit_trace` - Compares results of running the test case with a
standard model on CPU with the result of JIT tracing the model wrapped in
DataParallel and computing attributions
`data_parallel_jit_script` - Compares results of running the test case with a
standard model on CPU with the result of JIT scripting the model wrapped
in DataParallel and computing attributions
"""
cpu_jit_trace = 1
cpu_jit_script = 2
data_parallel_jit_trace = 3
data_parallel_jit_script = 3
class JITMeta(type):
def __new__(cls, name: str, bases: Tuple, attrs: Dict):
for test_config in config:
(
algorithms,
model,
args,
layer,
noise_tunnel,
baseline_distr,
) = parse_test_config(test_config)
for algorithm in algorithms:
if not should_create_generated_test(algorithm):
continue
if algorithm in JIT_SUPPORTED:
for mode in JITCompareMode:
# Creates test case corresponding to each algorithm and
# JITCompareMode
test_method = cls.make_single_jit_test(
algorithm, model, args, noise_tunnel, baseline_distr, mode
)
test_name = gen_test_name(
"test_jit_" + mode.name,
cast(str, test_config["name"]),
algorithm,
noise_tunnel,
)
if test_name in attrs:
raise AssertionError(
"Trying to overwrite existing test with name: %r"
% test_name
)
attrs[test_name] = test_method
return super(JITMeta, cls).__new__(cls, name, bases, attrs)
# Arguments are deep copied to ensure tests are independent and are not affected
# by any modifications within a previous test.
@classmethod
@deep_copy_args
def make_single_jit_test(
cls,
algorithm: Type[Attribution],
model: Module,
args: Dict[str, Any],
noise_tunnel: bool,
baseline_distr: bool,
mode: JITCompareMode,
) -> Callable:
"""
This method creates a single JIT test for the given algorithm and parameters.
"""
def jit_test_assert(self) -> None:
model_1 = model
attr_args = args
if (
mode is JITCompareMode.data_parallel_jit_trace
or JITCompareMode.data_parallel_jit_script
):
if not torch.cuda.is_available() or torch.cuda.device_count() == 0:
raise unittest.SkipTest(
"Skipping GPU test since CUDA not available."
)
# Construct cuda_args, moving all tensor inputs in args to CUDA device
cuda_args = {}
for key in args:
if isinstance(args[key], Tensor):
cuda_args[key] = args[key].cuda()
elif isinstance(args[key], tuple):
cuda_args[key] = tuple(
elem.cuda() if isinstance(elem, Tensor) else elem
for elem in args[key]
)
else:
cuda_args[key] = args[key]
attr_args = cuda_args
model_1 = model_1.cuda()
# Initialize models based on JITCompareMode
if (
mode is JITCompareMode.cpu_jit_script
or JITCompareMode.data_parallel_jit_script
):
model_2 = torch.jit.script(model_1) # type: ignore
elif (
mode is JITCompareMode.cpu_jit_trace
or JITCompareMode.data_parallel_jit_trace
):
all_inps = _format_tensor_into_tuples(args["inputs"]) + (
_format_additional_forward_args(args["additional_forward_args"])
if "additional_forward_args" in args
and args["additional_forward_args"] is not None
else ()
)
model_2 = torch.jit.trace(model_1, all_inps) # type: ignore
else:
raise AssertionError("JIT compare mode type is not valid.")
attr_method_1 = algorithm(model_1)
attr_method_2 = algorithm(model_2)
if noise_tunnel:
attr_method_1 = NoiseTunnel(attr_method_1)
attr_method_2 = NoiseTunnel(attr_method_2)
if attr_method_1.has_convergence_delta():
attributions_1, delta_1 = attr_method_1.attribute(
return_convergence_delta=True, **attr_args
)
self.setUp()
attributions_2, delta_2 = attr_method_2.attribute(
return_convergence_delta=True, **attr_args
)
assertTensorTuplesAlmostEqual(
self, attributions_1, attributions_2, mode="max"
)
assertTensorTuplesAlmostEqual(self, delta_1, delta_2, mode="max")
else:
attributions_1 = attr_method_1.attribute(**attr_args)
self.setUp()
attributions_2 = attr_method_2.attribute(**attr_args)
assertTensorTuplesAlmostEqual(
self, attributions_1, attributions_2, mode="max"
)
return jit_test_assert
if torch.cuda.is_available() and torch.cuda.device_count() != 0:
class JITTest(BaseTest, metaclass=JITMeta):
pass
|
#!/usr/bin/env python3
from inspect import signature
from typing import Callable, List, Tuple, Union
import torch
from captum.attr._core.deep_lift import DeepLift, DeepLiftShap
from captum.attr._core.integrated_gradients import IntegratedGradients
from tests.helpers.basic import (
assertAttributionComparision,
assertTensorAlmostEqual,
BaseTest,
)
from tests.helpers.basic_models import (
BasicModelWithReusedModules,
Conv1dSeqModel,
LinearMaxPoolLinearModel,
ReLUDeepLiftModel,
ReLULinearModel,
TanhDeepLiftModel,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_relu_deeplift(self) -> None:
x1 = torch.tensor([1.0], requires_grad=True)
x2 = torch.tensor([2.0], requires_grad=True)
b1 = torch.tensor([0.0], requires_grad=True)
b2 = torch.tensor([0.0], requires_grad=True)
inputs = (x1, x2)
baselines = (b1, b2)
model = ReLUDeepLiftModel()
self._deeplift_assert(model, DeepLift(model), inputs, baselines)
def test_relu_deeplift_exact_match(self) -> None:
x1 = torch.tensor([1.0], requires_grad=True)
x2 = torch.tensor([2.0], requires_grad=True)
b1 = torch.tensor([0.0], requires_grad=True)
b2 = torch.tensor([0.0], requires_grad=True)
inputs = (x1, x2)
baselines = (b1, b2)
model = ReLUDeepLiftModel()
dl = DeepLift(model)
attributions, delta = dl.attribute(
inputs, baselines, return_convergence_delta=True
)
self.assertEqual(attributions[0][0], 2.0)
self.assertEqual(attributions[1][0], 1.0)
self.assertEqual(delta[0], 0.0)
def test_relu_deeplift_exact_match_wo_mutliplying_by_inputs(self) -> None:
x1 = torch.tensor([1.0])
x2 = torch.tensor([2.0])
inputs = (x1, x2)
model = ReLUDeepLiftModel()
dl = DeepLift(model, multiply_by_inputs=False)
attributions = dl.attribute(inputs)
self.assertEqual(attributions[0][0], 2.0)
self.assertEqual(attributions[1][0], 0.5)
def test_tanh_deeplift(self) -> None:
x1 = torch.tensor([-1.0], requires_grad=True)
x2 = torch.tensor([-2.0], requires_grad=True)
b1 = torch.tensor([0.0], requires_grad=True)
b2 = torch.tensor([0.0], requires_grad=True)
inputs = (x1, x2)
baselines = (b1, b2)
model = TanhDeepLiftModel()
self._deeplift_assert(model, DeepLift(model), inputs, baselines)
def test_relu_deeplift_batch(self) -> None:
x1 = torch.tensor([[1.0], [1.0], [1.0], [1.0]], requires_grad=True)
x2 = torch.tensor([[2.0], [2.0], [2.0], [2.0]], requires_grad=True)
b1 = torch.tensor([[0.0], [0.0], [0.0], [0.0]], requires_grad=True)
b2 = torch.tensor([[0.0], [0.0], [0.0], [0.0]], requires_grad=True)
inputs = (x1, x2)
baselines = (b1, b2)
model = ReLUDeepLiftModel()
self._deeplift_assert(model, DeepLift(model), inputs, baselines)
def test_relu_linear_deeplift(self) -> None:
model = ReLULinearModel(inplace=False)
x1 = torch.tensor([[-10.0, 1.0, -5.0]], requires_grad=True)
x2 = torch.tensor([[3.0, 3.0, 1.0]], requires_grad=True)
inputs = (x1, x2)
baselines = (0, 0.0001)
# expected = [[[0.0, 0.0]], [[6.0, 2.0]]]
self._deeplift_assert(model, DeepLift(model), inputs, baselines)
def test_relu_linear_deeplift_compare_inplace(self) -> None:
model1 = ReLULinearModel(inplace=True)
x1 = torch.tensor([[-10.0, 1.0, -5.0], [2.0, 3.0, 4.0]], requires_grad=True)
x2 = torch.tensor([[3.0, 3.0, 1.0], [2.3, 5.0, 4.0]], requires_grad=True)
inputs = (x1, x2)
attributions1 = DeepLift(model1).attribute(inputs)
model2 = ReLULinearModel()
attributions2 = DeepLift(model2).attribute(inputs)
assertTensorAlmostEqual(self, attributions1[0], attributions2[0])
assertTensorAlmostEqual(self, attributions1[1], attributions2[1])
def test_relu_linear_deepliftshap_compare_inplace(self) -> None:
model1 = ReLULinearModel(inplace=True)
x1 = torch.tensor([[-10.0, 1.0, -5.0], [2.0, 3.0, 4.0]], requires_grad=True)
x2 = torch.tensor([[3.0, 3.0, 1.0], [2.3, 5.0, 4.0]], requires_grad=True)
inputs = (x1, x2)
b1 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
b2 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
baselines = (b1, b2)
attributions1 = DeepLiftShap(model1).attribute(inputs, baselines)
model2 = ReLULinearModel()
attributions2 = DeepLiftShap(model2).attribute(inputs, baselines)
assertTensorAlmostEqual(self, attributions1[0], attributions2[0])
assertTensorAlmostEqual(self, attributions1[1], attributions2[1])
def test_relu_linear_deeplift_batch(self) -> None:
model = ReLULinearModel(inplace=True)
x1 = torch.tensor([[-10.0, 1.0, -5.0], [2.0, 3.0, 4.0]], requires_grad=True)
x2 = torch.tensor([[3.0, 3.0, 1.0], [2.3, 5.0, 4.0]], requires_grad=True)
inputs = (x1, x2)
baselines = (torch.zeros(1, 3), torch.rand(1, 3) * 0.001)
# expected = [[[0.0, 0.0]], [[6.0, 2.0]]]
self._deeplift_assert(model, DeepLift(model), inputs, baselines)
def test_relu_deeplift_with_hypothetical_contrib_func(self) -> None:
model = Conv1dSeqModel()
rand_seq_data = torch.abs(torch.randn(2, 4, 1000))
rand_seq_ref = torch.abs(torch.randn(2, 4, 1000))
dls = DeepLift(model)
attr = dls.attribute(
rand_seq_data,
rand_seq_ref,
custom_attribution_func=_hypothetical_contrib_func,
target=(1, 0),
)
self.assertEqual(attr.shape, rand_seq_data.shape)
def test_relu_deepliftshap_batch_4D_input(self) -> None:
x1 = torch.ones(4, 1, 1, 1)
x2 = torch.tensor([[[[2.0]]]] * 4)
b1 = torch.zeros(4, 1, 1, 1)
b2 = torch.zeros(4, 1, 1, 1)
inputs = (x1, x2)
baselines = (b1, b2)
model = ReLUDeepLiftModel()
self._deeplift_assert(model, DeepLiftShap(model), inputs, baselines)
def test_relu_deepliftshap_batch_4D_input_wo_mutliplying_by_inputs(self) -> None:
x1 = torch.ones(4, 1, 1, 1)
x2 = torch.tensor([[[[2.0]]]] * 4)
b1 = torch.zeros(4, 1, 1, 1)
b2 = torch.zeros(4, 1, 1, 1)
inputs = (x1, x2)
baselines = (b1, b2)
model = ReLUDeepLiftModel()
attr = DeepLiftShap(model, multiply_by_inputs=False).attribute(
inputs, baselines
)
assertTensorAlmostEqual(self, attr[0], 2 * torch.ones(4, 1, 1, 1))
assertTensorAlmostEqual(self, attr[1], 0.5 * torch.ones(4, 1, 1, 1))
def test_relu_deepliftshap_multi_ref(self) -> None:
x1 = torch.tensor([[1.0]], requires_grad=True)
x2 = torch.tensor([[2.0]], requires_grad=True)
b1 = torch.tensor([[0.0], [0.0], [0.0], [0.0]], requires_grad=True)
b2 = torch.tensor([[0.0], [0.0], [0.0], [0.0]], requires_grad=True)
inputs = (x1, x2)
baselines = (b1, b2)
model = ReLUDeepLiftModel()
self._deeplift_assert(model, DeepLiftShap(model), inputs, baselines)
def test_relu_deepliftshap_baselines_as_func(self) -> None:
model = ReLULinearModel(inplace=True)
x1 = torch.tensor([[-10.0, 1.0, -5.0]])
x2 = torch.tensor([[3.0, 3.0, 1.0]])
def gen_baselines() -> Tuple[Tensor, ...]:
b1 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
b2 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
return (b1, b2)
def gen_baselines_scalar() -> Tuple[float, ...]:
return (0.0, 0.0001)
def gen_baselines_with_inputs(inputs: Tuple[Tensor, ...]) -> Tuple[Tensor, ...]:
b1 = torch.cat([inputs[0], inputs[0] - 10])
b2 = torch.cat([inputs[1], inputs[1] - 10])
return (b1, b2)
def gen_baselines_returns_array() -> Tuple[List[List[float]], ...]:
b1 = [[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]
b2 = [[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]
return (b1, b2)
inputs = (x1, x2)
dl_shap = DeepLiftShap(model)
self._deeplift_assert(model, dl_shap, inputs, gen_baselines)
self._deeplift_assert(model, dl_shap, inputs, gen_baselines_with_inputs)
with self.assertRaises(AssertionError):
self._deeplift_assert(
model, DeepLiftShap(model), inputs, gen_baselines_returns_array
)
with self.assertRaises(AssertionError):
self._deeplift_assert(model, dl_shap, inputs, gen_baselines_scalar)
baselines = gen_baselines()
attributions = dl_shap.attribute(inputs, baselines)
attributions_with_func = dl_shap.attribute(inputs, gen_baselines)
assertTensorAlmostEqual(self, attributions[0], attributions_with_func[0])
assertTensorAlmostEqual(self, attributions[1], attributions_with_func[1])
def test_relu_deepliftshap_with_custom_attr_func(self) -> None:
def custom_attr_func(
multipliers: Tuple[Tensor, ...],
inputs: Tuple[Tensor, ...],
baselines: Tuple[Tensor, ...],
) -> Tuple[Tensor, ...]:
return tuple(multiplier * 0.0 for multiplier in multipliers)
model = ReLULinearModel(inplace=True)
x1 = torch.tensor([[-10.0, 1.0, -5.0]])
x2 = torch.tensor([[3.0, 3.0, 1.0]])
b1 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
b2 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
inputs = (x1, x2)
baselines = (b1, b2)
dls = DeepLiftShap(model)
attr_w_func = dls.attribute(
inputs, baselines, custom_attribution_func=custom_attr_func
)
assertTensorAlmostEqual(self, attr_w_func[0], [[0.0, 0.0, 0.0]], 0.0)
assertTensorAlmostEqual(self, attr_w_func[1], [[0.0, 0.0, 0.0]], 0.0)
def test_relu_deepliftshap_with_hypothetical_contrib_func(self) -> None:
model = Conv1dSeqModel()
rand_seq_data = torch.abs(torch.randn(2, 4, 1000))
rand_seq_ref = torch.abs(torch.randn(3, 4, 1000))
dls = DeepLiftShap(model)
attr = dls.attribute(
rand_seq_data,
rand_seq_ref,
custom_attribution_func=_hypothetical_contrib_func,
target=(0, 0),
)
self.assertEqual(attr.shape, rand_seq_data.shape)
def test_reusable_modules(self) -> None:
model = BasicModelWithReusedModules()
input = torch.rand(1, 3)
dl = DeepLift(model)
with self.assertRaises(RuntimeError):
dl.attribute(input, target=0)
def test_lin_maxpool_lin_classification(self) -> None:
inputs = torch.ones(2, 4)
baselines = torch.tensor([[1, 2, 3, 9], [4, 8, 6, 7]]).float()
model = LinearMaxPoolLinearModel()
dl = DeepLift(model)
attrs, delta = dl.attribute(
inputs, baselines, target=0, return_convergence_delta=True
)
expected = torch.Tensor([[0.0, 0.0, 0.0, -8.0], [0.0, -7.0, 0.0, 0.0]])
expected_delta = torch.Tensor([0.0, 0.0])
assertTensorAlmostEqual(self, attrs, expected, 0.0001)
assertTensorAlmostEqual(self, delta, expected_delta, 0.0001)
def _deeplift_assert(
self,
model: Module,
attr_method: Union[DeepLift, DeepLiftShap],
inputs: Tuple[Tensor, ...],
baselines,
custom_attr_func: Callable[..., Tuple[Tensor, ...]] = None,
) -> None:
input_bsz = len(inputs[0])
if callable(baselines):
baseline_parameters = signature(baselines).parameters
if len(baseline_parameters) > 0:
baselines = baselines(inputs)
else:
baselines = baselines()
baseline_bsz = (
len(baselines[0]) if isinstance(baselines[0], torch.Tensor) else 1
)
# Run attribution multiple times to make sure that it is
# working as expected
for _ in range(5):
model.zero_grad()
attributions, delta = attr_method.attribute(
inputs,
baselines,
return_convergence_delta=True,
custom_attribution_func=custom_attr_func,
)
attributions_without_delta = attr_method.attribute(
inputs, baselines, custom_attribution_func=custom_attr_func
)
for attribution, attribution_without_delta in zip(
attributions, attributions_without_delta
):
self.assertTrue(
torch.all(torch.eq(attribution, attribution_without_delta))
)
if isinstance(attr_method, DeepLiftShap):
self.assertEqual([input_bsz * baseline_bsz], list(delta.shape))
else:
self.assertEqual([input_bsz], list(delta.shape))
delta_external = attr_method.compute_convergence_delta(
attributions, baselines, inputs
)
assertTensorAlmostEqual(
self, delta, delta_external, delta=0.0, mode="max"
)
delta_condition = (delta.abs() < 0.00001).all()
self.assertTrue(
delta_condition,
"The sum of attribution values {} is not "
"nearly equal to the difference between the endpoint for "
"some samples".format(delta),
)
for input, attribution in zip(inputs, attributions):
self.assertEqual(input.shape, attribution.shape)
if (
isinstance(baselines[0], (int, float))
or inputs[0].shape == baselines[0].shape
):
# Compare with Integrated Gradients
ig = IntegratedGradients(model)
attributions_ig = ig.attribute(inputs, baselines)
assertAttributionComparision(self, attributions, attributions_ig)
def _hypothetical_contrib_func(
multipliers: Tuple[Tensor, ...],
inputs: Tuple[Tensor, ...],
baselines: Tuple[Tensor, ...],
) -> Tuple[Tensor, ...]:
r"""
Implements hypothetical input contributions based on the logic described here:
https://github.com/kundajelab/deeplift/pull/36/files
This is using a dummy model for test purposes
"""
# we assume that multiplies, inputs and baselines have the following shape:
# tuple((bsz x len x channel), )
assert len(multipliers[0].shape) == 3, multipliers[0].shape
assert len(inputs[0].shape) == 3, inputs[0].shape
assert len(baselines[0].shape) == 3, baselines[0].shape
assert len(multipliers) == len(inputs) and len(inputs) == len(baselines), (
"multipliers, inputs and baselines must have the same shape but"
"multipliers: {}, inputs: {}, baselines: {}".format(
len(multipliers), len(inputs), len(baselines)
)
)
attributions = []
for k in range(len(multipliers)):
sub_attributions = torch.zeros_like(inputs[k])
for i in range(inputs[k].shape[-1]):
hypothetical_input = torch.zeros_like(inputs[k])
hypothetical_input[:, :, i] = 1.0
hypothetical_input_ref_diff = hypothetical_input - baselines[k]
sub_attributions[:, :, i] = torch.sum(
hypothetical_input_ref_diff * multipliers[k], dim=-1
)
attributions.append(sub_attributions)
return tuple(attributions)
|
#!/usr/bin/env python3
from __future__ import print_function
import unittest
from typing import Any, Tuple, Union
import torch
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._core.guided_backprop_deconvnet import Deconvolution
from captum.attr._core.neuron.neuron_guided_backprop_deconvnet import (
NeuronDeconvolution,
)
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModel_ConvNet_One_Conv
from torch.nn import Module
class Test(BaseTest):
def test_simple_input_conv_deconv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = 1.0 * torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
exp = [
[2.0, 3.0, 3.0, 1.0],
[3.0, 5.0, 5.0, 2.0],
[3.0, 5.0, 5.0, 2.0],
[1.0, 2.0, 2.0, 1.0],
]
exp = torch.tensor(exp).view(1, 1, 4, 4)
self._deconv_test_assert(net, (inp,), (exp,))
def test_simple_input_conv_neuron_deconv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = 1.0 * torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
exp = [
[2.0, 3.0, 3.0, 1.0],
[3.0, 5.0, 5.0, 2.0],
[3.0, 5.0, 5.0, 2.0],
[1.0, 2.0, 2.0, 1.0],
]
exp = torch.tensor(exp).view(1, 1, 4, 4)
self._neuron_deconv_test_assert(net, net.fc1, (0,), (inp,), (exp,))
def test_simple_input_conv_neuron_deconv_agg_neurons(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = 1.0 * torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
exp = [
[2.0, 3.0, 3.0, 1.0],
[3.0, 5.0, 5.0, 2.0],
[3.0, 5.0, 5.0, 2.0],
[1.0, 2.0, 2.0, 1.0],
]
exp = torch.tensor(exp).view(1, 1, 4, 4)
self._neuron_deconv_test_assert(net, net.fc1, (slice(0, 1, 1),), (inp,), (exp,))
def test_simple_multi_input_conv_deconv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
ex_attr = [
[2.0, 3.0, 3.0, 1.0],
[3.0, 5.0, 5.0, 2.0],
[3.0, 5.0, 5.0, 2.0],
[1.0, 2.0, 2.0, 1.0],
]
ex_attr = torch.tensor(ex_attr).view(1, 1, 4, 4)
self._deconv_test_assert(net, (inp, inp2), (ex_attr, ex_attr))
def test_simple_multi_input_conv_neuron_deconv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
ex_attr = [
[2.0, 3.0, 3.0, 1.0],
[3.0, 5.0, 5.0, 2.0],
[3.0, 5.0, 5.0, 2.0],
[1.0, 2.0, 2.0, 1.0],
]
ex_attr = torch.tensor(ex_attr).view(1, 1, 4, 4)
self._neuron_deconv_test_assert(
net, net.fc1, (3,), (inp, inp2), (ex_attr, ex_attr)
)
def test_deconv_matching(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = 100.0 * torch.randn(1, 1, 4, 4)
self._deconv_matching_assert(net, net.relu2, inp)
def _deconv_test_assert(
self,
model: Module,
test_input: TensorOrTupleOfTensorsGeneric,
expected: Tuple[torch.Tensor, ...],
additional_input: Any = None,
) -> None:
deconv = Deconvolution(model)
attributions = deconv.attribute(
test_input, target=0, additional_forward_args=additional_input
)
for i in range(len(test_input)):
assertTensorAlmostEqual(self, attributions[i], expected[i], delta=0.01)
def _neuron_deconv_test_assert(
self,
model: Module,
layer: Module,
neuron_selector: Union[int, Tuple[Union[int, slice], ...]],
test_input: TensorOrTupleOfTensorsGeneric,
expected: Tuple[torch.Tensor, ...],
additional_input: Any = None,
) -> None:
deconv = NeuronDeconvolution(model, layer)
attributions = deconv.attribute(
test_input,
neuron_selector=neuron_selector,
additional_forward_args=additional_input,
)
for i in range(len(test_input)):
assertTensorAlmostEqual(self, attributions[i], expected[i], delta=0.01)
def _deconv_matching_assert(
self,
model: Module,
output_layer: Module,
test_input: TensorOrTupleOfTensorsGeneric,
) -> None:
out = model(test_input)
attrib = Deconvolution(model)
self.assertFalse(attrib.multiplies_by_inputs)
neuron_attrib = NeuronDeconvolution(model, output_layer)
for i in range(out.shape[1]):
deconv_vals = attrib.attribute(test_input, target=i)
neuron_deconv_vals = neuron_attrib.attribute(test_input, (i,))
assertTensorAlmostEqual(self, deconv_vals, neuron_deconv_vals, delta=0.01)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import io
import unittest
import unittest.mock
from typing import Any, Callable, Tuple, Union
import torch
from captum._utils.typing import (
BaselineType,
TargetType,
TensorLikeList,
TensorOrTupleOfTensorsGeneric,
)
from captum.attr._core.occlusion import Occlusion
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel3,
BasicModel_ConvNet_One_Conv,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from torch import Tensor
class Test(BaseTest):
def test_improper_window_shape(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
occ = Occlusion(net)
# Check error when too few sliding window dimensions
with self.assertRaises(AssertionError):
_ = occ.attribute(inp, sliding_window_shapes=((1, 2),), target=0)
# Check error when too many sliding window dimensions
with self.assertRaises(AssertionError):
_ = occ.attribute(
(inp, inp), sliding_window_shapes=((1, 1, 2), (1, 1, 1, 2)), target=0
)
# Check error when too many sliding window tuples
with self.assertRaises(AssertionError):
_ = occ.attribute(
(inp, inp),
sliding_window_shapes=((1, 1, 2), (1, 1, 2), (1, 1, 2)),
target=0,
)
def test_improper_stride(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
occ = Occlusion(net)
# Check error when too few stride dimensions
with self.assertRaises(AssertionError):
_ = occ.attribute(
inp, sliding_window_shapes=(1, 2, 2), strides=(1, 2), target=0
)
# Check error when too many stride dimensions
with self.assertRaises(AssertionError):
_ = occ.attribute(
(inp, inp),
sliding_window_shapes=((1, 1, 2), (1, 2, 2)),
strides=((1, 1, 2), (2, 1, 2, 2)),
target=0,
)
# Check error when too many stride tuples
with self.assertRaises(AssertionError):
_ = occ.attribute(
(inp, inp),
sliding_window_shapes=((1, 1, 2), (1, 2, 2)),
strides=((1, 1, 2), (1, 2, 2), (1, 2, 2)),
target=0,
)
def test_too_large_stride(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
occ = Occlusion(net)
with self.assertRaises(AssertionError):
_ = occ.attribute(
inp, sliding_window_shapes=((1, 1, 2),), strides=2, target=0
)
with self.assertRaises(AssertionError):
_ = occ.attribute(
(inp, inp),
sliding_window_shapes=((1, 1, 2), (1, 4, 2)),
strides=(2, (1, 2, 3)),
target=0,
)
with self.assertRaises(AssertionError):
_ = occ.attribute(
inp, sliding_window_shapes=((2, 1, 2),), strides=2, target=0
)
def test_simple_input(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._occlusion_test_assert(
net,
inp,
[[80.0, 200.0, 120.0]],
perturbations_per_eval=(1, 2, 3),
sliding_window_shapes=((1,)),
)
def test_simple_multi_input_int_to_int(self) -> None:
net = BasicModel3()
inp1 = torch.tensor([[-10], [3]])
inp2 = torch.tensor([[-5], [1]])
self._occlusion_test_assert(
net,
(inp1, inp2),
([[0.0], [1.0]], [[0.0], [-1.0]]),
sliding_window_shapes=((1,), (1,)),
)
def test_simple_multi_input_int_to_float(self) -> None:
net = BasicModel3()
def wrapper_func(*inp):
return net(*inp).float()
inp1 = torch.tensor([[-10], [3]])
inp2 = torch.tensor([[-5], [1]])
self._occlusion_test_assert(
wrapper_func,
(inp1, inp2),
([[0.0], [1.0]], [[0.0], [-1.0]]),
sliding_window_shapes=((1,), (1,)),
)
def test_simple_multi_input(self) -> None:
net = BasicModel3()
inp1 = torch.tensor([[-10.0], [3.0]])
inp2 = torch.tensor([[-5.0], [1.0]])
self._occlusion_test_assert(
net,
(inp1, inp2),
([[0.0], [1.0]], [[0.0], [-1.0]]),
sliding_window_shapes=((1,), (1,)),
)
def test_simple_multi_input_0d(self) -> None:
net = BasicModel3()
inp1 = torch.tensor([-10.0, 3.0])
inp2 = torch.tensor([-5.0, 1.0])
self._occlusion_test_assert(
net,
(inp1, inp2),
([0.0, 1.0], [0.0, -1.0]),
sliding_window_shapes=((), ()),
target=None,
)
def test_simple_input_larger_shape(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._occlusion_test_assert(
net,
inp,
[[200.0, 220.0, 240.0]],
perturbations_per_eval=(1, 2, 3),
sliding_window_shapes=((2,)),
baselines=torch.tensor([10.0, 10.0, 10.0]),
)
def test_simple_input_shape_with_stride(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._occlusion_test_assert(
net,
inp,
[[280.0, 280.0, 120.0]],
perturbations_per_eval=(1, 2, 3),
sliding_window_shapes=((2,)),
strides=2,
)
def test_multi_sample_ablation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
self._occlusion_test_assert(
net,
inp,
[[8.0, 35.0, 12.0], [80.0, 200.0, 120.0]],
perturbations_per_eval=(1, 2, 3),
sliding_window_shapes=((1,),),
)
def test_multi_input_ablation_with_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
expected = (
[[492.0, 492.0, 492.0], [400.0, 400.0, 400.0]],
[[80.0, 200.0, 120.0], [0.0, 400.0, 0.0]],
[[400.0, 420.0, 440.0], [48.0, 50.0, 52.0]],
)
self._occlusion_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
sliding_window_shapes=((3,), (1,), (2,)),
)
self._occlusion_test_assert(
net,
(inp1, inp2),
expected[0:1],
additional_input=(inp3, 1),
perturbations_per_eval=(1, 2, 3),
sliding_window_shapes=((3,), (1,)),
)
def test_multi_input_ablation_with_baselines(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
expected = (
[[444.0, 444.0, 444.0], [328.0, 328.0, 328.0]],
[[68.0, 188.0, 108.0], [-12.0, 388.0, -12.0]],
[[368.0, 368.0, 24.0], [0.0, 0.0, -12.0]],
)
self._occlusion_test_assert(
net,
(inp1, inp2, inp3),
expected,
baselines=(
torch.tensor([[1.0, 4, 7], [3.0, 6, 9]]),
3.0,
torch.tensor([[4.0], [6]]),
),
additional_input=(1,),
sliding_window_shapes=((3,), (1,), (2,)),
strides=(2, 1, 2),
)
def test_simple_multi_input_conv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
self._occlusion_test_assert(
net,
(inp, inp2),
(67 * torch.ones_like(inp), 13 * torch.ones_like(inp2)),
perturbations_per_eval=(1, 2, 4, 8, 12, 16),
sliding_window_shapes=((1, 4, 4), (1, 4, 4)),
)
self._occlusion_test_assert(
net,
(inp, inp2),
(
[
[
[
[17.0, 17.0, 17.0, 17.0],
[17.0, 17.0, 17.0, 17.0],
[64.0, 65.5, 65.5, 67.0],
[64.0, 65.5, 65.5, 67.0],
]
]
],
[
[
[
[3.0, 3.0, 3.0, 3.0],
[3.0, 3.0, 3.0, 3.0],
[3.0, 3.0, 3.0, 3.0],
[0.0, 0.0, 0.0, 0.0],
]
]
],
),
perturbations_per_eval=(1, 3, 7, 14),
sliding_window_shapes=((1, 2, 3), (1, 1, 2)),
strides=((1, 2, 1), (1, 1, 2)),
)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_simple_input_with_show_progress(self, mock_stderr) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
# test progress output for each batch size
for bsz in (1, 2, 3):
self._occlusion_test_assert(
net,
inp,
[[80.0, 200.0, 120.0]],
perturbations_per_eval=(bsz,),
sliding_window_shapes=((1,)),
show_progress=True,
)
output = mock_stderr.getvalue()
# to test if progress calculation aligns with the actual iteration
# all perturbations_per_eval should reach progress of 100%
assert (
"Occlusion attribution: 100%" in output
), f"Error progress output: {repr(output)}"
mock_stderr.seek(0)
mock_stderr.truncate(0)
def _occlusion_test_assert(
self,
model: Callable,
test_input: TensorOrTupleOfTensorsGeneric,
expected_ablation: Union[
float,
TensorLikeList,
Tuple[TensorLikeList, ...],
Tuple[Tensor, ...],
],
sliding_window_shapes: Union[Tuple[int, ...], Tuple[Tuple[int, ...], ...]],
target: TargetType = 0,
additional_input: Any = None,
perturbations_per_eval: Tuple[int, ...] = (1,),
baselines: BaselineType = None,
strides: Union[None, int, Tuple[Union[int, Tuple[int, ...]], ...]] = None,
show_progress: bool = False,
) -> None:
for batch_size in perturbations_per_eval:
ablation = Occlusion(model)
attributions = ablation.attribute(
test_input,
sliding_window_shapes=sliding_window_shapes,
target=target,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
strides=strides,
show_progress=show_progress,
)
if isinstance(expected_ablation, tuple):
for i in range(len(expected_ablation)):
assertTensorAlmostEqual(
self,
attributions[i],
expected_ablation[i],
)
else:
assertTensorAlmostEqual(
self,
attributions,
expected_ablation,
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
from typing import Any, cast, Tuple, Union
import torch
from captum._utils.gradient import compute_gradients
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._core.noise_tunnel import NoiseTunnel
from captum.attr._core.saliency import Saliency
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
)
from tests.helpers.basic_models import BasicModel, BasicModel5_MultiArgs
from tests.helpers.classification_models import SoftmaxModel
from torch import Tensor
from torch.nn import Module
def _get_basic_config() -> Tuple[Module, Tensor, Tensor, Any]:
input = torch.tensor([1.0, 2.0, 3.0, 0.0, -1.0, 7.0], requires_grad=True).T
# manually percomputed gradients
grads = torch.tensor([-0.0, -0.0, -0.0, 1.0, 1.0, -0.0])
return BasicModel(), input, grads, None
def _get_multiargs_basic_config() -> Tuple[
Module, Tuple[Tensor, ...], Tuple[Tensor, ...], Any
]:
model = BasicModel5_MultiArgs()
additional_forward_args = ([2, 3], 1)
inputs = (
torch.tensor([[1.5, 2.0, 34.3], [3.4, 1.2, 2.0]], requires_grad=True),
torch.tensor([[3.0, 3.5, 23.2], [2.3, 1.2, 0.3]], requires_grad=True),
)
grads = compute_gradients(
model, inputs, additional_forward_args=additional_forward_args
)
return model, inputs, grads, additional_forward_args
def _get_multiargs_basic_config_large() -> Tuple[
Module, Tuple[Tensor, ...], Tuple[Tensor, ...], Any
]:
model = BasicModel5_MultiArgs()
additional_forward_args = ([2, 3], 1)
inputs = (
torch.tensor(
[[10.5, 12.0, 34.3], [43.4, 51.2, 32.0]], requires_grad=True
).repeat_interleave(3, dim=0),
torch.tensor(
[[1.0, 3.5, 23.2], [2.3, 1.2, 0.3]], requires_grad=True
).repeat_interleave(3, dim=0),
)
grads = compute_gradients(
model, inputs, additional_forward_args=additional_forward_args
)
return model, inputs, grads, additional_forward_args
class Test(BaseTest):
def test_saliency_test_basic_vanilla(self) -> None:
self._saliency_base_assert(*_get_basic_config())
def test_saliency_test_basic_smoothgrad(self) -> None:
self._saliency_base_assert(*_get_basic_config(), nt_type="smoothgrad")
def test_saliency_test_basic_vargrad(self) -> None:
self._saliency_base_assert(*_get_basic_config(), nt_type="vargrad")
def test_saliency_test_basic_multi_variable_vanilla(self) -> None:
self._saliency_base_assert(*_get_multiargs_basic_config())
def test_saliency_test_basic_multi_variable_smoothgrad(self) -> None:
self._saliency_base_assert(*_get_multiargs_basic_config(), nt_type="smoothgrad")
def test_saliency_test_basic_multivar_sg_n_samples_batch_size_2(self) -> None:
attributions_batch_size = self._saliency_base_assert(
*_get_multiargs_basic_config_large(),
nt_type="smoothgrad",
n_samples_batch_size=2,
)
attributions = self._saliency_base_assert(
*_get_multiargs_basic_config_large(),
nt_type="smoothgrad",
)
assertTensorTuplesAlmostEqual(self, attributions_batch_size, attributions)
def test_saliency_test_basic_multivar_sg_n_samples_batch_size_3(self) -> None:
attributions_batch_size = self._saliency_base_assert(
*_get_multiargs_basic_config_large(),
nt_type="smoothgrad_sq",
n_samples_batch_size=3,
)
attributions = self._saliency_base_assert(
*_get_multiargs_basic_config_large(),
nt_type="smoothgrad_sq",
)
assertTensorTuplesAlmostEqual(self, attributions_batch_size, attributions)
def test_saliency_test_basic_multivar_vg_n_samples_batch_size_1(self) -> None:
attributions_batch_size = self._saliency_base_assert(
*_get_multiargs_basic_config_large(),
nt_type="vargrad",
n_samples_batch_size=1,
)
attributions = self._saliency_base_assert(
*_get_multiargs_basic_config_large(),
nt_type="vargrad",
)
assertTensorTuplesAlmostEqual(self, attributions_batch_size, attributions)
def test_saliency_test_basic_multivar_vg_n_samples_batch_size_6(self) -> None:
attributions_batch_size = self._saliency_base_assert(
*_get_multiargs_basic_config_large(),
nt_type="vargrad",
n_samples_batch_size=6,
)
attributions = self._saliency_base_assert(
*_get_multiargs_basic_config_large(),
nt_type="vargrad",
)
assertTensorTuplesAlmostEqual(self, attributions_batch_size, attributions)
def test_saliency_test_basic_multi_vargrad(self) -> None:
self._saliency_base_assert(*_get_multiargs_basic_config(), nt_type="vargrad")
def test_saliency_classification_vanilla(self) -> None:
self._saliency_classification_assert()
def test_saliency_classification_smoothgrad(self) -> None:
self._saliency_classification_assert(nt_type="smoothgrad")
def test_saliency_classification_vargrad(self) -> None:
self._saliency_classification_assert(nt_type="vargrad")
def test_saliency_grad_unchanged(self) -> None:
model, inp, grads, add_args = _get_basic_config()
inp.grad = torch.randn_like(inp)
grad = inp.grad.detach().clone()
self._saliency_base_assert(model, inp, grads, add_args)
assertTensorTuplesAlmostEqual(self, inp.grad, grad, delta=0.0)
def _saliency_base_assert(
self,
model: Module,
inputs: TensorOrTupleOfTensorsGeneric,
expected: TensorOrTupleOfTensorsGeneric,
additional_forward_args: Any = None,
nt_type: str = "vanilla",
n_samples_batch_size=None,
) -> Union[Tensor, Tuple[Tensor, ...]]:
saliency = Saliency(model)
self.assertFalse(saliency.multiplies_by_inputs)
if nt_type == "vanilla":
attributions = saliency.attribute(
inputs, additional_forward_args=additional_forward_args
)
else:
nt = NoiseTunnel(saliency)
attributions = nt.attribute(
inputs,
nt_type=nt_type,
nt_samples=10,
nt_samples_batch_size=n_samples_batch_size,
stdevs=0.0000002,
additional_forward_args=additional_forward_args,
)
for input, attribution, expected_attr in zip(inputs, attributions, expected):
if nt_type == "vanilla":
self._assert_attribution(attribution, expected_attr)
self.assertEqual(input.shape, attribution.shape)
return attributions
def _assert_attribution(self, attribution: Tensor, expected: Tensor) -> None:
expected = torch.abs(expected)
if len(attribution.shape) == 0:
assert (attribution - expected).abs() < 0.001
else:
assertTensorAlmostEqual(self, expected, attribution, delta=0.5, mode="max")
def _saliency_classification_assert(self, nt_type: str = "vanilla") -> None:
num_in = 5
input = torch.tensor([[0.0, 1.0, 2.0, 3.0, 4.0]], requires_grad=True)
target = torch.tensor(5)
# 10-class classification model
model = SoftmaxModel(num_in, 20, 10)
saliency = Saliency(model)
if nt_type == "vanilla":
attributions = saliency.attribute(input, target)
output = model(input)[:, target]
output.backward()
expected = torch.abs(cast(Tensor, input.grad))
assertTensorAlmostEqual(self, attributions, expected)
else:
nt = NoiseTunnel(saliency)
attributions = nt.attribute(
input, nt_type=nt_type, nt_samples=10, stdevs=0.0002, target=target
)
self.assertEqual(input.shape, attributions.shape)
|
#!/usr/bin/env python3
import random
import torch
from captum.attr import Max, Mean, Min, MSE, StdDev, Sum, Summarizer, Var
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
def get_values(n=100, lo=None, hi=None, integers=False):
for _ in range(n):
if integers:
yield random.randint(lo, hi)
else:
yield random.random() * (hi - lo) + lo
class Test(BaseTest):
def test_div0(self) -> None:
summarizer = Summarizer([Var(), Mean()])
summ = summarizer.summary
self.assertIsNone(summ)
summarizer.update(torch.tensor(10))
summ = summarizer.summary
assertTensorAlmostEqual(self, summ["mean"], 10)
assertTensorAlmostEqual(self, summ["variance"], 0)
summarizer.update(torch.tensor(10))
summ = summarizer.summary
assertTensorAlmostEqual(self, summ["mean"], 10)
assertTensorAlmostEqual(self, summ["variance"], 0)
def test_var_defin(self) -> None:
"""
Variance is avg squared distance to mean. Thus it should be positive.
This test is to ensure this is the case.
To test it, we will we make a skewed distribution leaning to one end
(either very large or small values).
We will also compare to numpy and ensure it is approximately the same.
This is assuming numpy is correct, for which it should be.
"""
SMALL_VAL = -10000
BIG_VAL = 10000
AMOUNT_OF_SMALLS = [100, 10]
AMOUNT_OF_BIGS = [10, 100]
for sm, big in zip(AMOUNT_OF_SMALLS, AMOUNT_OF_BIGS):
summ = Summarizer([Var()])
values = []
for _ in range(sm):
values.append(SMALL_VAL)
summ.update(torch.tensor(SMALL_VAL, dtype=torch.float64))
for _ in range(big):
values.append(BIG_VAL)
summ.update(torch.tensor(BIG_VAL, dtype=torch.float64))
actual_var = torch.var(torch.tensor(values).double(), unbiased=False)
var = summ.summary["variance"]
assertTensorAlmostEqual(self, var, actual_var)
self.assertTrue((var > 0).all())
def test_multi_dim(self) -> None:
x1 = torch.tensor([1.0, 2.0, 3.0, 4.0])
x2 = torch.tensor([2.0, 1.0, 2.0, 4.0])
x3 = torch.tensor([3.0, 3.0, 1.0, 4.0])
summarizer = Summarizer([Mean(), Var()])
summarizer.update(x1)
assertTensorAlmostEqual(
self, summarizer.summary["mean"], x1, delta=0.05, mode="max"
)
assertTensorAlmostEqual(
self,
summarizer.summary["variance"],
torch.zeros_like(x1),
delta=0.05,
mode="max",
)
summarizer.update(x2)
assertTensorAlmostEqual(
self,
summarizer.summary["mean"],
torch.tensor([1.5, 1.5, 2.5, 4]),
delta=0.05,
mode="max",
)
assertTensorAlmostEqual(
self,
summarizer.summary["variance"],
torch.tensor([0.25, 0.25, 0.25, 0]),
delta=0.05,
mode="max",
)
summarizer.update(x3)
assertTensorAlmostEqual(
self,
summarizer.summary["mean"],
torch.tensor([2, 2, 2, 4]),
delta=0.05,
mode="max",
)
assertTensorAlmostEqual(
self,
summarizer.summary["variance"],
torch.tensor([2.0 / 3.0, 2.0 / 3.0, 2.0 / 3.0, 0]),
delta=0.05,
mode="max",
)
def test_stats_random_data(self):
N = 1000
BIG_VAL = 100000
_values = list(get_values(lo=-BIG_VAL, hi=BIG_VAL, n=N))
values = torch.tensor(_values, dtype=torch.float64)
stats_to_test = [
Mean(),
Var(),
Var(order=1),
StdDev(),
StdDev(order=1),
Min(),
Max(),
Sum(),
MSE(),
]
stat_names = [
"mean",
"variance",
"sample_variance",
"std_dev",
"sample_std_dev",
"min",
"max",
"sum",
"mse",
]
gt_fns = [
torch.mean,
lambda x: torch.var(x, unbiased=False),
lambda x: torch.var(x, unbiased=True),
lambda x: torch.std(x, unbiased=False),
lambda x: torch.std(x, unbiased=True),
torch.min,
torch.max,
torch.sum,
lambda x: torch.sum((x - torch.mean(x)) ** 2),
]
for stat, name, gt in zip(stats_to_test, stat_names, gt_fns):
summ = Summarizer([stat])
actual = gt(values)
for x in values:
summ.update(x)
stat_val = summ.summary[name]
# rounding errors is a serious issue (moreso for MSE)
assertTensorAlmostEqual(self, stat_val, actual, delta=0.005)
|
#!/usr/bin/env python3
import io
import unittest
import unittest.mock
from typing import Any, Callable, List, Tuple, Union
import torch
from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.kernel_shap import KernelShap
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
set_all_random_seeds,
)
from tests.helpers.basic_models import (
BasicLinearModel,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
class Test(BaseTest):
def setUp(self) -> None:
super().setUp()
try:
import sklearn # noqa: F401
assert (
sklearn.__version__ >= "0.23.0"
), "Must have sklearn version 0.23.0 or higher"
except (ImportError, AssertionError):
raise unittest.SkipTest("Skipping KernelShap tests, sklearn not available.")
def test_linear_kernel_shap(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
baseline = torch.tensor([[10.0, 20.0, 10.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[[40.0, 120.0, 80.0]],
n_samples=500,
baselines=baseline,
expected_coefs=[[40.0, 120.0, 80.0]],
)
def test_simple_kernel_shap(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[[76.66666, 196.66666, 116.66666]],
perturbations_per_eval=(1, 2, 3),
n_samples=500,
)
def test_simple_kernel_shap_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[[275.0, 275.0, 115.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
expected_coefs=[[275.0, 115.0]],
)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_simple_kernel_shap_with_show_progress(self, mock_stderr) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
# test progress output for each batch size
for bsz in (1, 2, 3):
self._kernel_shap_test_assert(
net,
inp,
[[76.66666, 196.66666, 116.66666]],
perturbations_per_eval=(bsz,),
n_samples=500,
show_progress=True,
)
output = mock_stderr.getvalue()
# to test if progress calculation aligns with the actual iteration
# all perturbations_per_eval should reach progress of 100%
assert (
"Kernel Shap attribution: 100%" in output
), f"Error progress output: {repr(output)}"
mock_stderr.seek(0)
mock_stderr.truncate(0)
def test_simple_kernel_shap_with_baselines(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]])
self._kernel_shap_test_assert(
net,
inp,
[[248.0, 248.0, 104.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
baselines=4,
perturbations_per_eval=(1, 2, 3),
)
def test_simple_batch_kernel_shap(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[[7.0, 32.5, 10.5], [76.66666, 196.66666, 116.66666]],
perturbations_per_eval=(1, 2, 3),
n_samples=20000,
)
def test_simple_batch_kernel_shap_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[[39.5, 39.5, 10.5], [275.0, 275.0, 115.0]],
feature_mask=torch.tensor([[0, 0, 1], [1, 1, 0]]),
perturbations_per_eval=(1, 2, 3),
n_samples=100,
expected_coefs=[[39.5, 10.5], [115.0, 275.0]],
)
def test_multi_input_kernel_shap_without_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 0.0, 0.0]])
inp2 = torch.tensor([[20.0, 0.0, 50.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0]])
expected = (
[[90, 0, 0]],
[[78, 0, 198]],
[[0, 398, 38]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
n_samples=2000,
)
def test_multi_input_kernel_shap_with_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[20.0, 50.0, 30.0]])
inp2 = torch.tensor([[0.0, 100.0, 0.0]])
inp3 = torch.tensor([[2.0, 10.0, 3.0]])
mask1 = torch.tensor([[0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 0, 0]])
expected = (
[[255.0, 595.0, 255.0]],
[[255.0, 595.0, 0.0]],
[[255.0, 255.0, 255.0]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
)
expected_with_baseline = (
[[184, 580.0, 184]],
[[184, 580.0, -12.0]],
[[184, 184, 184]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
)
def test_multi_input_kernel_shap_with_empty_input(self) -> None:
net = BasicLinearModel()
inp1 = torch.tensor([[23.0, 0.0, 0.0, 23.0, 0.0, 0.0, 23.0]])
inp2 = torch.tensor([[]]) # empty input
mask1 = torch.tensor([[0, 1, 2, 3, 4, 5, 6]])
mask2 = torch.tensor([[]], dtype=torch.long) # empty mask
expected: Tuple[List[List[float]], ...] = (
[[-8.0, 0, 0, -2.0, 0, 0, -8.0]],
[[]],
)
# no mask
self._kernel_shap_test_assert(
net,
(inp1, inp2),
expected,
n_samples=2000,
expected_coefs=[[-8.0, 0, 0, -2.0, 0, 0, -8.0]],
)
# with mask
self._kernel_shap_test_assert(
net,
(inp1, inp2),
expected,
n_samples=2000,
expected_coefs=[[-8.0, 0, 0, -2.0, 0, 0, -8.0]],
feature_mask=(mask1, mask2),
)
def test_multi_input_batch_kernel_shap_without_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 0.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 0.0, 50.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [0.0, 10.0, 0.0]])
expected = (
[[90, 0, 0], [78.0, 198.0, 118.0]],
[[78, 0, 198], [0.0, 398.0, 0.0]],
[[0, 398, 38], [0.0, 38.0, 0.0]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
n_samples=2500,
expected_coefs=[
[90.0, 0, 0, 78, 0, 198, 0, 398, 38],
[78.0, 198.0, 118.0, 0.0, 398.0, 0.0, 0.0, 38.0, 0.0],
],
)
def test_multi_input_batch_kernel_shap(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
mask1 = torch.tensor([[1, 1, 1], [0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2], [0, 0, 0]])
expected = (
[[1088.6666, 1088.6666, 1088.6666], [255.0, 595.0, 255.0]],
[[76.6666, 1088.6666, 156.6666], [255.0, 595.0, 0.0]],
[[76.6666, 1088.6666, 156.6666], [255.0, 255.0, 255.0]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
n_samples=300,
)
expected_with_baseline = (
[[1040, 1040, 1040], [184, 580.0, 184]],
[[52, 1040, 132], [184, 580.0, -12.0]],
[[52, 1040, 132], [184, 184, 184]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
)
# Remaining tests are for cases where forward function returns a scalar
# as either a float, integer, 0d tensor or 1d tensor.
def test_single_kernel_shap_scalar_float(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_kernel_shap_assert(
lambda inp: torch.sum(net(inp)).item()
)
def test_single_kernel_shap_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_kernel_shap_assert(lambda inp: torch.sum(net(inp)))
def test_single_kernel_shap_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_kernel_shap_assert(
lambda inp: torch.sum(net(inp)).reshape(1)
)
def test_single_kernel_shap_scalar_int(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_kernel_shap_assert(
lambda inp: int(torch.sum(net(inp)).item())
)
def _single_input_scalar_kernel_shap_assert(self, func: Callable) -> None:
inp = torch.tensor([[2.0, 10.0, 3.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1]])
self._kernel_shap_test_assert(
func,
inp,
[[79.0, 79.0, 21.0]],
feature_mask=mask,
perturbations_per_eval=(1,),
target=None,
)
def test_multi_inp_kernel_shap_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_kernel_shap_assert(lambda *inp: torch.sum(net(*inp)))
def test_multi_inp_kernel_shap_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_kernel_shap_assert(
lambda *inp: torch.sum(net(*inp)).reshape(1)
)
def test_multi_inp_kernel_shap_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_kernel_shap_assert(
lambda *inp: int(torch.sum(net(*inp)).item())
)
def test_multi_inp_kernel_shap_scalar_float(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_kernel_shap_assert(
lambda *inp: torch.sum(net(*inp)).item()
)
def _multi_input_scalar_kernel_shap_assert(self, func: Callable) -> None:
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [20.0, 10.0, 13.0]])
mask1 = torch.tensor([[1, 1, 1]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2]])
expected = (
[[3850.6666, 3850.6666, 3850.6666]] * 2,
[[306.6666, 3850.6666, 410.6666]] * 2,
[[306.6666, 3850.6666, 410.6666]] * 2,
)
self._kernel_shap_test_assert(
func,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
perturbations_per_eval=(1,),
target=None,
n_samples=1500,
)
def _kernel_shap_test_assert(
self,
model: Callable,
test_input: TensorOrTupleOfTensorsGeneric,
expected_attr,
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
additional_input: Any = None,
perturbations_per_eval: Tuple[int, ...] = (1,),
baselines: BaselineType = None,
target: Union[None, int] = 0,
n_samples: int = 100,
delta: float = 1.0,
expected_coefs: Union[None, List[float], List[List[float]]] = None,
show_progress: bool = False,
) -> None:
for batch_size in perturbations_per_eval:
kernel_shap = KernelShap(model)
attributions = kernel_shap.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
show_progress=show_progress,
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_attr, delta=delta, mode="max"
)
if expected_coefs is not None:
set_all_random_seeds(1234)
# Test with return_input_shape = False
attributions = kernel_shap.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
return_input_shape=False,
show_progress=show_progress,
)
assertTensorAlmostEqual(
self, attributions, expected_coefs, delta=delta, mode="max"
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import unittest
from typing import Any, cast, Tuple, Union
import torch
from captum._utils.common import _zeros
from captum._utils.typing import BaselineType, Tensor, TensorOrTupleOfTensorsGeneric
from captum.attr._core.integrated_gradients import IntegratedGradients
from captum.attr._core.noise_tunnel import NoiseTunnel
from captum.attr._utils.common import _tensorize_baseline
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel,
BasicModel2,
BasicModel3,
BasicModel4_MultiArgs,
BasicModel5_MultiArgs,
BasicModel6_MultiTensor,
BasicModel_MultiLayer,
)
from torch.nn import Module
class Test(BaseTest):
def test_multivariable_vanilla(self) -> None:
self._assert_multi_variable("vanilla", "riemann_right")
def test_multivariable_vanilla_wo_mutliplying_by_inputs(self) -> None:
self._assert_multi_variable(
"vanilla", "riemann_right", multiply_by_inputs=False
)
def test_multivariable_smoothgrad(self) -> None:
self._assert_multi_variable("smoothgrad", "riemann_left")
def test_multivariable_smoothgrad_sq(self) -> None:
self._assert_multi_variable("smoothgrad_sq", "riemann_middle")
def test_multivariable_vargrad(self) -> None:
self._assert_multi_variable("vargrad", "riemann_trapezoid")
def test_multi_argument_vanilla(self) -> None:
self._assert_multi_argument("vanilla", "gausslegendre")
def test_multi_argument_smoothgrad(self) -> None:
self._assert_multi_argument("smoothgrad", "riemann_right")
def test_multi_argument_smoothgrad_sq(self) -> None:
self._assert_multi_argument("smoothgrad_sq", "riemann_left")
def test_multi_argument_vargrad(self) -> None:
self._assert_multi_argument("vargrad", "riemann_middle")
def test_univariable_vanilla(self) -> None:
self._assert_univariable("vanilla", "riemann_trapezoid")
def test_univariable_smoothgrad(self) -> None:
self._assert_univariable("smoothgrad", "gausslegendre")
def test_univariable_smoothgrad_sq(self) -> None:
self._assert_univariable("smoothgrad_sq", "riemann_right")
def test_univariable_vargrad(self) -> None:
self._assert_univariable("vargrad", "riemann_left")
def test_multi_tensor_input_vanilla(self) -> None:
self._assert_multi_tensor_input("vanilla", "riemann_middle")
def test_multi_tensor_input_smoothgrad(self) -> None:
self._assert_multi_tensor_input("smoothgrad", "riemann_trapezoid")
def test_multi_tensor_input_smoothgrad_sq(self) -> None:
self._assert_multi_tensor_input("smoothgrad_sq", "gausslegendre")
def test_multi_tensor_input_vargrad(self) -> None:
self._assert_multi_tensor_input("vargrad", "riemann_right")
def test_batched_input_vanilla(self) -> None:
self._assert_batched_tensor_input("vanilla", "riemann_left")
def test_batched_input_smoothgrad(self) -> None:
self._assert_batched_tensor_input("smoothgrad", "riemann_middle")
def test_batched_input_smoothgrad_with_batch_size_1(self) -> None:
self._assert_n_samples_batched_size("smoothgrad", "riemann_middle", 1)
def test_batched_input_smoothgrad_with_batch_size_2(self) -> None:
self._assert_n_samples_batched_size("vargrad", "riemann_middle", 2)
def test_batched_input_smoothgrad_with_batch_size_3(self) -> None:
self._assert_n_samples_batched_size("smoothgrad_sq", "riemann_middle", 3)
def test_batched_input_smoothgrad_sq(self) -> None:
self._assert_batched_tensor_input("smoothgrad_sq", "riemann_trapezoid")
def test_batched_input_vargrad(self) -> None:
self._assert_batched_tensor_input("vargrad", "gausslegendre")
def test_batched_input_smoothgrad_wo_mutliplying_by_inputs(self) -> None:
model = BasicModel_MultiLayer()
inputs = torch.tensor(
[[1.5, 2.0, 1.3], [0.5, 0.1, 2.3], [1.5, 2.0, 1.3]], requires_grad=True
)
ig_wo_mutliplying_by_inputs = IntegratedGradients(
model, multiply_by_inputs=False
)
nt_wo_mutliplying_by_inputs = NoiseTunnel(ig_wo_mutliplying_by_inputs)
ig = IntegratedGradients(model)
nt = NoiseTunnel(ig)
n_samples = 5
target = 0
type = "smoothgrad"
attributions_wo_mutliplying_by_inputs = nt_wo_mutliplying_by_inputs.attribute(
inputs,
nt_type=type,
nt_samples=n_samples,
stdevs=0.0,
target=target,
n_steps=500,
)
attributions = nt.attribute(
inputs,
nt_type=type,
nt_samples=n_samples,
stdevs=0.0,
target=target,
n_steps=500,
)
assertTensorAlmostEqual(
self, attributions_wo_mutliplying_by_inputs * inputs, attributions
)
def test_batched_multi_input_vanilla(self) -> None:
self._assert_batched_tensor_multi_input("vanilla", "riemann_right")
def test_batched_multi_input_smoothgrad(self) -> None:
self._assert_batched_tensor_multi_input("smoothgrad", "riemann_left")
def test_batched_multi_input_smoothgrad_sq(self) -> None:
self._assert_batched_tensor_multi_input("smoothgrad_sq", "riemann_middle")
def test_batched_multi_input_vargrad(self) -> None:
self._assert_batched_tensor_multi_input("vargrad", "riemann_trapezoid")
def test_batched_multi_input_vargrad_batch_size_1(self) -> None:
self._assert_batched_tensor_multi_input("vargrad", "riemann_trapezoid", 1)
def test_batched_multi_input_smooth_batch_size_2(self) -> None:
self._assert_batched_tensor_multi_input("vargrad", "riemann_trapezoid", 2)
def test_batched_multi_input_smoothgrad_sq_batch_size_3(self) -> None:
self._assert_batched_tensor_multi_input("vargrad", "riemann_trapezoid", 3)
def _assert_multi_variable(
self,
type: str,
approximation_method: str = "gausslegendre",
multiply_by_inputs: bool = True,
) -> None:
model = BasicModel2()
input1 = torch.tensor([3.0])
input2 = torch.tensor([1.0], requires_grad=True)
baseline1 = torch.tensor([0.0])
baseline2 = torch.tensor([0.0])
attributions1 = self._compute_attribution_and_evaluate(
model,
(input1, input2),
(baseline1, baseline2),
type=type,
approximation_method=approximation_method,
multiply_by_inputs=multiply_by_inputs,
)
if type == "vanilla":
assertTensorAlmostEqual(
self,
attributions1[0],
[1.5] if multiply_by_inputs else [0.5],
delta=0.05,
mode="max",
)
assertTensorAlmostEqual(
self,
attributions1[1],
[-0.5] if multiply_by_inputs else [-0.5],
delta=0.05,
mode="max",
)
model = BasicModel3()
attributions2 = self._compute_attribution_and_evaluate(
model,
(input1, input2),
(baseline1, baseline2),
type=type,
approximation_method=approximation_method,
multiply_by_inputs=multiply_by_inputs,
)
if type == "vanilla":
assertTensorAlmostEqual(
self,
attributions2[0],
[1.5] if multiply_by_inputs else [0.5],
delta=0.05,
mode="max",
)
assertTensorAlmostEqual(
self,
attributions2[1],
[-0.5] if multiply_by_inputs else [-0.5],
delta=0.05,
mode="max",
)
# Verifies implementation invariance
self.assertEqual(
sum(attribution for attribution in attributions1),
sum(attribution for attribution in attributions2),
)
def _assert_univariable(
self, type: str, approximation_method: str = "gausslegendre"
) -> None:
model = BasicModel()
self._compute_attribution_and_evaluate(
model,
torch.tensor([1.0], requires_grad=True),
torch.tensor([0.0]),
type=type,
approximation_method=approximation_method,
)
self._compute_attribution_and_evaluate(
model,
torch.tensor([0.0]),
torch.tensor([0.0]),
type=type,
approximation_method=approximation_method,
)
self._compute_attribution_and_evaluate(
model,
torch.tensor([-1.0], requires_grad=True),
0.00001,
type=type,
approximation_method=approximation_method,
)
def _assert_multi_argument(
self, type: str, approximation_method: str = "gausslegendre"
) -> None:
model = BasicModel4_MultiArgs()
self._compute_attribution_and_evaluate(
model,
(
torch.tensor([[1.5, 2.0, 34.3]], requires_grad=True),
torch.tensor([[3.0, 3.5, 23.2]], requires_grad=True),
),
baselines=(0.0, torch.zeros((1, 3))),
additional_forward_args=torch.arange(1.0, 4.0).reshape(1, 3),
type=type,
approximation_method=approximation_method,
)
# uses batching with an integer variable and nd-tensors as
# additional forward arguments
self._compute_attribution_and_evaluate(
model,
(
torch.tensor([[1.5, 2.0, 34.3], [3.4, 1.2, 2.0]], requires_grad=True),
torch.tensor([[3.0, 3.5, 23.2], [2.3, 1.2, 0.3]], requires_grad=True),
),
baselines=(torch.zeros((2, 3)), 0.0),
additional_forward_args=(torch.arange(1.0, 7.0).reshape(2, 3), 1),
type=type,
approximation_method=approximation_method,
)
# uses batching with an integer variable and python list
# as additional forward arguments
model = BasicModel5_MultiArgs()
self._compute_attribution_and_evaluate(
model,
(
torch.tensor([[1.5, 2.0, 34.3], [3.4, 1.2, 2.0]], requires_grad=True),
torch.tensor([[3.0, 3.5, 23.2], [2.3, 1.2, 0.3]], requires_grad=True),
),
baselines=(0.0, 0.00001),
additional_forward_args=([2, 3], 1),
type=type,
approximation_method=approximation_method,
)
# similar to previous case plus baseline consists of a tensor and
# a single example
self._compute_attribution_and_evaluate(
model,
(
torch.tensor([[1.5, 2.0, 34.3], [3.4, 1.2, 2.0]], requires_grad=True),
torch.tensor([[3.0, 3.5, 23.2], [2.3, 1.2, 0.3]], requires_grad=True),
),
baselines=(torch.zeros((1, 3)), 0.00001),
additional_forward_args=([2, 3], 1),
type=type,
approximation_method=approximation_method,
)
def _assert_multi_tensor_input(
self, type: str, approximation_method: str = "gausslegendre"
) -> None:
model = BasicModel6_MultiTensor()
self._compute_attribution_and_evaluate(
model,
(
torch.tensor([[1.5, 2.0, 3.3]], requires_grad=True),
torch.tensor([[3.0, 3.5, 2.2]], requires_grad=True),
),
type=type,
approximation_method=approximation_method,
)
def _assert_batched_tensor_input(
self, type: str, approximation_method: str = "gausslegendre"
) -> None:
model = BasicModel_MultiLayer()
input = (
torch.tensor(
[[1.5, 2.0, 1.3], [0.5, 0.1, 2.3], [1.5, 2.0, 1.3]], requires_grad=True
),
)
self._compute_attribution_and_evaluate(
model, input, type=type, target=0, approximation_method=approximation_method
)
self._compute_attribution_batch_helper_evaluate(
model, input, target=0, approximation_method=approximation_method
)
def _assert_batched_tensor_multi_input(
self,
type: str,
approximation_method: str = "gausslegendre",
nt_samples_batch_size: int = None,
) -> None:
model = BasicModel_MultiLayer()
input = (
torch.tensor(
[[1.5, 2.1, 1.9], [0.5, 0.0, 0.7], [1.5, 2.1, 1.1]], requires_grad=True
),
torch.tensor(
[[0.3, 1.9, 2.4], [0.5, 0.6, 2.1], [1.2, 2.1, 0.2]], requires_grad=True
),
)
self._compute_attribution_and_evaluate(
model,
input,
type=type,
target=0,
approximation_method=approximation_method,
nt_samples_batch_size=nt_samples_batch_size,
)
def _assert_n_samples_batched_size(
self,
type: str,
approximation_method: str = "gausslegendre",
nt_samples_batch_size: int = None,
) -> None:
model = BasicModel_MultiLayer()
input = (
torch.tensor(
[[1.5, 2.0, 1.3], [0.5, 0.1, 2.3], [1.5, 2.0, 1.3]], requires_grad=True
),
)
self._compute_attribution_and_evaluate(
model,
input,
type=type,
target=0,
nt_samples_batch_size=nt_samples_batch_size,
approximation_method=approximation_method,
)
def _compute_attribution_and_evaluate(
self,
model: Module,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: Union[None, int] = None,
additional_forward_args: Any = None,
type: str = "vanilla",
approximation_method: str = "gausslegendre",
multiply_by_inputs=True,
nt_samples_batch_size=None,
) -> Tuple[Tensor, ...]:
r"""
attrib_type: 'vanilla', 'smoothgrad', 'smoothgrad_sq', 'vargrad'
"""
ig = IntegratedGradients(model, multiply_by_inputs=multiply_by_inputs)
self.assertEqual(ig.multiplies_by_inputs, multiply_by_inputs)
if not isinstance(inputs, tuple):
inputs = (inputs,) # type: ignore
inputs: Tuple[Tensor, ...]
if baselines is not None and not isinstance(baselines, tuple):
baselines = (baselines,)
if baselines is None:
baselines = _tensorize_baseline(inputs, _zeros(inputs))
if type == "vanilla":
attributions, delta = ig.attribute(
inputs,
baselines,
additional_forward_args=additional_forward_args,
method=approximation_method,
n_steps=500,
target=target,
return_convergence_delta=True,
)
model.zero_grad()
attributions_without_delta, delta = ig.attribute(
inputs,
baselines,
additional_forward_args=additional_forward_args,
method=approximation_method,
n_steps=500,
target=target,
return_convergence_delta=True,
)
model.zero_grad()
self.assertEqual([inputs[0].shape[0]], list(delta.shape))
delta_external = ig.compute_convergence_delta(
attributions,
baselines,
inputs,
target=target,
additional_forward_args=additional_forward_args,
)
assertTensorAlmostEqual(self, delta, delta_external, delta=0.0, mode="max")
else:
nt = NoiseTunnel(ig)
n_samples = 5
attributions, delta = nt.attribute(
inputs,
nt_type=type,
nt_samples=n_samples,
stdevs=0.00000002,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
method=approximation_method,
n_steps=500,
return_convergence_delta=True,
nt_samples_batch_size=nt_samples_batch_size,
)
attributions_without_delta = nt.attribute(
inputs,
nt_type=type,
nt_samples=n_samples,
stdevs=0.00000002,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
method=approximation_method,
n_steps=500,
nt_samples_batch_size=3,
)
self.assertEqual(nt.multiplies_by_inputs, multiply_by_inputs)
self.assertEqual([inputs[0].shape[0] * n_samples], list(delta.shape))
for input, attribution in zip(inputs, attributions):
self.assertEqual(attribution.shape, input.shape)
if multiply_by_inputs:
assertTensorAlmostEqual(self, delta, torch.zeros(delta.shape), 0.07, "max")
# compare attributions retrieved with and without
# `return_convergence_delta` flag
for attribution, attribution_without_delta in zip(
attributions, attributions_without_delta
):
assertTensorAlmostEqual(
self, attribution, attribution_without_delta, delta=0.05
)
return cast(Tuple[Tensor, ...], attributions)
def _compute_attribution_batch_helper_evaluate(
self,
model: Module,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: Union[None, Tensor, Tuple[Tensor, ...]] = None,
target: Union[None, int] = None,
additional_forward_args: Any = None,
approximation_method: str = "gausslegendre",
) -> None:
ig = IntegratedGradients(model)
if not isinstance(inputs, tuple):
inputs = (inputs,) # type: ignore
inputs: Tuple[Tensor, ...]
if baselines is not None and not isinstance(baselines, tuple):
baselines = (baselines,)
if baselines is None:
baselines = _tensorize_baseline(inputs, _zeros(inputs))
for internal_batch_size in [None, 10, 20]:
attributions, delta = ig.attribute(
inputs,
baselines,
additional_forward_args=additional_forward_args,
method=approximation_method,
n_steps=100,
target=target,
internal_batch_size=internal_batch_size,
return_convergence_delta=True,
)
total_delta = 0.0
for i in range(inputs[0].shape[0]):
attributions_indiv, delta_indiv = ig.attribute(
tuple(input[i : i + 1] for input in inputs),
tuple(baseline[i : i + 1] for baseline in baselines),
additional_forward_args=additional_forward_args,
method=approximation_method,
n_steps=100,
target=target,
internal_batch_size=internal_batch_size,
return_convergence_delta=True,
)
total_delta += abs(delta_indiv).sum().item()
for j in range(len(attributions)):
assertTensorAlmostEqual(
self,
attributions[j][i : i + 1].squeeze(0),
attributions_indiv[j].squeeze(0),
delta=0.05,
mode="max",
)
self.assertAlmostEqual(abs(delta).sum().item(), total_delta, delta=0.005)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import io
import unittest
import unittest.mock
from typing import Any, Callable, Tuple, Union
import torch
from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.shapley_value import ShapleyValues, ShapleyValueSampling
from tests.helpers.basic import assertTensorTuplesAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
BasicModelBoolInput,
)
class Test(BaseTest):
def test_simple_shapley_sampling(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._shapley_test_assert(
net,
inp,
[[76.66666, 196.66666, 116.66666]],
perturbations_per_eval=(1, 2, 3),
n_samples=250,
)
def test_simple_shapley_sampling_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._shapley_test_assert(
net,
inp,
[[275.0, 275.0, 115.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
)
def test_simple_shapley_sampling_boolean(self) -> None:
net = BasicModelBoolInput()
inp = torch.tensor([[True, False, True]])
self._shapley_test_assert(
net,
inp,
[[35.0, 35.0, 35.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
)
def test_simple_shapley_sampling_boolean_with_baseline(self) -> None:
net = BasicModelBoolInput()
inp = torch.tensor([[True, False, True]])
self._shapley_test_assert(
net,
inp,
[[-40.0, -40.0, 0.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
baselines=True,
perturbations_per_eval=(1, 2, 3),
)
def test_simple_shapley_sampling_with_baselines(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]])
self._shapley_test_assert(
net,
inp,
[[248.0, 248.0, 104.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
baselines=4,
perturbations_per_eval=(1, 2, 3),
)
def test_multi_sample_shapley_sampling(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]])
self._shapley_test_assert(
net,
inp,
[[7.0, 32.5, 10.5], [76.66666, 196.66666, 116.66666]],
perturbations_per_eval=(1, 2, 3),
n_samples=200,
)
def test_multi_sample_shapley_sampling_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1], [1, 1, 0]])
self._shapley_test_assert(
net,
inp,
[[39.5, 39.5, 10.5], [275.0, 275.0, 115.0]],
feature_mask=mask,
perturbations_per_eval=(1, 2, 3),
)
def test_multi_input_shapley_sampling_without_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 0.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 0.0, 50.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [0.0, 10.0, 0.0]])
expected = (
[[90, 0, 0], [78.0, 198.0, 118.0]],
[[78, 0, 198], [0.0, 398.0, 0.0]],
[[0, 398, 38], [0.0, 38.0, 0.0]],
)
self._shapley_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
n_samples=200,
test_true_shapley=False,
)
def test_multi_input_shapley_sampling_with_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
mask1 = torch.tensor([[1, 1, 1], [0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2], [0, 0, 0]])
expected = (
[[1088.6666, 1088.6666, 1088.6666], [255.0, 595.0, 255.0]],
[[76.6666, 1088.6666, 156.6666], [255.0, 595.0, 0.0]],
[[76.6666, 1088.6666, 156.6666], [255.0, 255.0, 255.0]],
)
self._shapley_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
)
expected_with_baseline = (
[[1040, 1040, 1040], [184, 580.0, 184]],
[[52, 1040, 132], [184, 580.0, -12.0]],
[[52, 1040, 132], [184, 184, 184]],
)
self._shapley_test_assert(
net,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
)
# Remaining tests are for cases where forward function returns a scalar
# per batch, as either a float, integer, 0d tensor or 1d tensor.
def test_single_shapley_batch_scalar_float(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_one_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp)).item()
)
def test_single_shapley_batch_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_one_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp))
)
def test_single_shapley_batch_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_one_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp)).reshape(1)
)
def test_single_shapley_batch_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_one_sample_batch_scalar_shapley_assert(
lambda inp: int(torch.sum(net(inp)).item())
)
def test_single_shapley_int_batch_scalar_float(self) -> None:
net = BasicModel_MultiLayer()
self._single_int_input_multi_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp.float())).item()
)
def test_single_shapley_int_batch_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer()
self._single_int_input_multi_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp.float()))
)
def test_single_shapley_int_batch_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer()
self._single_int_input_multi_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp.float())).reshape(1)
)
def test_single_shapley_int_batch_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer()
self._single_int_input_multi_sample_batch_scalar_shapley_assert(
lambda inp: int(torch.sum(net(inp.float())).item())
)
def test_multi_sample_shapley_batch_scalar_float(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_multi_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp)).item()
)
def test_multi_sample_shapley_batch_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_multi_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp))
)
def test_multi_sample_shapley_batch_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_multi_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp)).reshape(1)
)
def test_multi_sample_shapley_batch_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_multi_sample_batch_scalar_shapley_assert(
lambda inp: int(torch.sum(net(inp)).item())
)
def test_multi_inp_shapley_batch_scalar_float(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_batch_scalar_shapley_assert(
lambda *inp: torch.sum(net(*inp)).item()
)
def test_multi_inp_shapley_batch_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_batch_scalar_shapley_assert(lambda *inp: torch.sum(net(*inp)))
def test_multi_inp_shapley_batch_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_batch_scalar_shapley_assert(
lambda *inp: torch.sum(net(*inp)).reshape(1)
)
def test_mutli_inp_shapley_batch_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_batch_scalar_shapley_assert(
lambda *inp: int(torch.sum(net(*inp)).item())
)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_shapley_sampling_with_show_progress(self, mock_stderr) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
# test progress output for each batch size
for bsz in (1, 2, 3):
self._shapley_test_assert(
net,
inp,
[[76.66666, 196.66666, 116.66666]],
perturbations_per_eval=(bsz,),
n_samples=250,
show_progress=True,
)
output = mock_stderr.getvalue()
# to test if progress calculation aligns with the actual iteration
# all perturbations_per_eval should reach progress of 100%
assert (
"Shapley Value Sampling attribution: 100%" in output
), f"Error progress output: {repr(output)}"
assert (
"Shapley Values attribution: 100%" in output
), f"Error progress output: {repr(output)}"
mock_stderr.seek(0)
mock_stderr.truncate(0)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_shapley_sampling_with_mask_and_show_progress(self, mock_stderr) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
# test progress output for each batch size
for bsz in (1, 2, 3):
self._shapley_test_assert(
net,
inp,
[[275.0, 275.0, 115.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(bsz,),
show_progress=True,
)
output = mock_stderr.getvalue()
# to test if progress calculation aligns with the actual iteration
# all perturbations_per_eval should reach progress of 100%
assert (
"Shapley Value Sampling attribution: 100%" in output
), f"Error progress output: {repr(output)}"
assert (
"Shapley Values attribution: 100%" in output
), f"Error progress output: {repr(output)}"
mock_stderr.seek(0)
mock_stderr.truncate(0)
def _single_input_one_sample_batch_scalar_shapley_assert(
self, func: Callable
) -> None:
inp = torch.tensor([[2.0, 10.0, 3.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1]])
self._shapley_test_assert(
func,
inp,
[[79.0, 79.0, 21.0]],
feature_mask=mask,
perturbations_per_eval=(1,),
target=None,
)
def _single_input_multi_sample_batch_scalar_shapley_assert(
self, func: Callable
) -> None:
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1]])
self._shapley_test_assert(
func,
inp,
[[629.0, 629.0, 251.0]],
feature_mask=mask,
perturbations_per_eval=(1,),
target=None,
n_samples=2500,
)
def _single_int_input_multi_sample_batch_scalar_shapley_assert(
self, func: Callable
) -> None:
inp = torch.tensor([[2, 10, 3], [20, 50, 30]])
mask = torch.tensor([[0, 0, 1]])
self._shapley_test_assert(
func,
inp,
[[629.0, 629.0, 251.0]],
feature_mask=mask,
perturbations_per_eval=(1,),
target=None,
n_samples=2500,
)
def _multi_input_batch_scalar_shapley_assert(self, func: Callable) -> None:
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [20.0, 10.0, 13.0]])
mask1 = torch.tensor([[1, 1, 1]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2]])
expected = (
[[3850.6666, 3850.6666, 3850.6666]],
[[306.6666, 3850.6666, 410.6666]],
[[306.6666, 3850.6666, 410.6666]],
)
self._shapley_test_assert(
func,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
perturbations_per_eval=(1,),
target=None,
n_samples=3500,
delta=1.2,
)
def _shapley_test_assert(
self,
model: Callable,
test_input: TensorOrTupleOfTensorsGeneric,
expected_attr,
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
additional_input: Any = None,
perturbations_per_eval: Tuple[int, ...] = (1,),
baselines: BaselineType = None,
target: Union[None, int] = 0,
n_samples: int = 100,
delta: float = 1.0,
test_true_shapley: bool = True,
show_progress: bool = False,
) -> None:
for batch_size in perturbations_per_eval:
shapley_samp = ShapleyValueSampling(model)
attributions = shapley_samp.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
show_progress=show_progress,
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_attr, delta=delta, mode="max"
)
if test_true_shapley:
shapley_val = ShapleyValues(model)
attributions = shapley_val.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
show_progress=show_progress,
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_attr, mode="max", delta=0.001
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import unittest
import torch
from captum._utils.typing import BaselineType, Tensor
from captum.attr._core.integrated_gradients import IntegratedGradients
from captum.attr._core.noise_tunnel import NoiseTunnel
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.classification_models import SigmoidModel, SoftmaxModel
from torch.nn import Module
class Test(BaseTest):
def test_sigmoid_classification_vanilla(self) -> None:
self._assert_sigmoid_classification("vanilla", "riemann_right")
def test_sigmoid_classification_smoothgrad(self) -> None:
self._assert_sigmoid_classification("smoothgrad", "riemann_left")
def test_sigmoid_classification_smoothgrad_sq(self) -> None:
self._assert_sigmoid_classification("smoothgrad_sq", "riemann_middle")
def test_sigmoid_classification_vargrad(self) -> None:
self._assert_sigmoid_classification("vargrad", "riemann_trapezoid")
def test_softmax_classification_vanilla(self) -> None:
self._assert_softmax_classification("vanilla", "gausslegendre")
def test_softmax_classification_smoothgrad(self) -> None:
self._assert_softmax_classification("smoothgrad", "riemann_right")
def test_softmax_classification_smoothgrad_sq(self) -> None:
self._assert_softmax_classification("smoothgrad_sq", "riemann_left")
def test_softmax_classification_vargrad(self) -> None:
self._assert_softmax_classification("vargrad", "riemann_middle")
def test_softmax_classification_vanilla_batch(self) -> None:
self._assert_softmax_classification_batch("vanilla", "riemann_trapezoid")
def test_softmax_classification_smoothgrad_batch(self) -> None:
self._assert_softmax_classification_batch("smoothgrad", "gausslegendre")
def test_softmax_classification_smoothgrad_sq_batch(self) -> None:
self._assert_softmax_classification_batch("smoothgrad_sq", "riemann_right")
def test_softmax_classification_vargrad_batch(self) -> None:
self._assert_softmax_classification_batch("vargrad", "riemann_left")
def _assert_sigmoid_classification(
self, type: str = "vanilla", approximation_method: str = "gausslegendre"
) -> None:
num_in = 20
input = torch.arange(0.0, num_in * 1.0, requires_grad=True).unsqueeze(0)
target = torch.tensor(0)
# TODO add test cases for multiple different layers
model = SigmoidModel(num_in, 5, 1)
self._validate_completness(model, input, target, type, approximation_method)
def _assert_softmax_classification(
self, type: str = "vanilla", approximation_method: str = "gausslegendre"
) -> None:
num_in = 40
input = torch.arange(0.0, num_in * 1.0, requires_grad=True).unsqueeze(0)
target = torch.tensor(5)
# 10-class classification model
model = SoftmaxModel(num_in, 20, 10)
self._validate_completness(model, input, target, type, approximation_method)
def _assert_softmax_classification_batch(
self, type: str = "vanilla", approximation_method: str = "gausslegendre"
) -> None:
num_in = 40
input = torch.arange(0.0, num_in * 3.0, requires_grad=True).reshape(3, num_in)
target = torch.tensor([5, 5, 2])
baseline = torch.zeros(1, num_in)
# 10-class classification model
model = SoftmaxModel(num_in, 20, 10)
self._validate_completness(
model, input, target, type, approximation_method, baseline
)
def _validate_completness(
self,
model: Module,
input: Tensor,
target: Tensor,
type: str = "vanilla",
approximation_method: str = "gausslegendre",
baseline: BaselineType = None,
) -> None:
ig = IntegratedGradients(model.forward)
model.zero_grad()
if type == "vanilla":
attributions, delta = ig.attribute(
input,
baselines=baseline,
target=target,
method=approximation_method,
n_steps=200,
return_convergence_delta=True,
)
delta_expected = ig.compute_convergence_delta(
attributions, baseline, input, target
)
assertTensorAlmostEqual(self, delta_expected, delta)
delta_condition = (delta.abs() < 0.005).all()
self.assertTrue(
delta_condition,
"The sum of attribution values {} is not "
"nearly equal to the difference between the endpoint for "
"some samples".format(delta),
)
self.assertEqual([input.shape[0]], list(delta.shape))
else:
nt = NoiseTunnel(ig)
n_samples = 10
attributions, delta = nt.attribute(
input,
baselines=baseline,
nt_type=type,
nt_samples=n_samples,
stdevs=0.0002,
n_steps=100,
target=target,
method=approximation_method,
return_convergence_delta=True,
)
self.assertEqual([input.shape[0] * n_samples], list(delta.shape))
self.assertTrue((delta.abs() < 0.05).all())
self.assertEqual(attributions.shape, input.shape)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import torch
from captum.attr._utils.batching import (
_batched_generator,
_batched_operator,
_tuple_splice_range,
)
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
class Test(BaseTest):
def test_tuple_splice_range(self) -> None:
test_tuple = (
torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]]),
"test",
torch.tensor([[6, 7, 8], [0, 1, 2], [3, 4, 5]]),
)
spliced_tuple = _tuple_splice_range(test_tuple, 1, 3)
assertTensorAlmostEqual(self, spliced_tuple[0], [[3, 4, 5], [6, 7, 8]])
self.assertEqual(spliced_tuple[1], "test")
assertTensorAlmostEqual(self, spliced_tuple[2], [[0, 1, 2], [3, 4, 5]])
def test_tuple_splice_range_3d(self) -> None:
test_tuple = (
torch.tensor([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [6, 7, 8]]]),
"test",
)
spliced_tuple = _tuple_splice_range(test_tuple, 1, 2)
assertTensorAlmostEqual(self, spliced_tuple[0], [[[6, 7, 8], [6, 7, 8]]])
self.assertEqual(spliced_tuple[1], "test")
def test_batched_generator(self) -> None:
def sample_operator(inputs, additional_forward_args, target_ind, scale):
return (
scale * (sum(inputs)),
scale * sum(additional_forward_args),
target_ind,
)
array1 = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
array2 = [[6, 7, 8], [0, 1, 2], [3, 4, 5]]
array3 = [[0, 1, 2], [0, 0, 0], [0, 0, 0]]
inp1, inp2, inp3 = (
torch.tensor(array1),
torch.tensor(array2),
torch.tensor(array3),
)
for index, (inp, add, targ) in enumerate(
_batched_generator((inp1, inp2), (inp3, 5), 7, 1)
):
assertTensorAlmostEqual(self, inp[0], [array1[index]])
assertTensorAlmostEqual(self, inp[1], [array2[index]])
assertTensorAlmostEqual(self, add[0], [array3[index]])
self.assertEqual(add[1], 5)
self.assertEqual(targ, 7)
def test_batched_operator_0_bsz(self) -> None:
inp1 = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
with self.assertRaises(AssertionError):
_batched_operator(lambda x: x, inputs=inp1, internal_batch_size=0)
def test_batched_operator(self) -> None:
def _sample_operator(inputs, additional_forward_args, target_ind, scale):
return (
scale * (sum(inputs)),
scale * sum(additional_forward_args) + target_ind[0],
)
inp1 = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
inp2 = torch.tensor([[6, 7, 8], [0, 1, 2], [3, 4, 5]])
inp3 = torch.tensor([[0, 1, 2], [0, 0, 0], [0, 0, 0]])
batched_result = _batched_operator(
_sample_operator,
inputs=(inp1, inp2),
additional_forward_args=(inp3),
target_ind=[0, 1, 2],
scale=2.0,
internal_batch_size=1,
)
assertTensorAlmostEqual(
self, batched_result[0], [[12, 16, 20], [6, 10, 14], [18, 22, 26]]
)
assertTensorAlmostEqual(
self, batched_result[1], [[0, 2, 4], [1, 1, 1], [2, 2, 2]]
)
|
#!/usr/bin/env python3
import unittest
from typing import Any, List, Tuple, Union
import torch
from captum._utils.typing import TensorLikeList, TensorOrTupleOfTensorsGeneric
from captum.attr._core.guided_backprop_deconvnet import GuidedBackprop
from captum.attr._core.neuron.neuron_guided_backprop_deconvnet import (
NeuronGuidedBackprop,
)
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModel_ConvNet_One_Conv
from torch.nn import Module
class Test(BaseTest):
def test_simple_input_conv_gb(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = 1.0 * torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
exp = [
[
[
[0.0, 1.0, 1.0, 1.0],
[1.0, 3.0, 3.0, 2.0],
[1.0, 3.0, 3.0, 2.0],
[1.0, 2.0, 2.0, 1.0],
]
]
]
self._guided_backprop_test_assert(net, (inp,), (exp,))
def test_simple_input_conv_neuron_gb(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = 1.0 * torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
exp = [
[
[
[0.0, 1.0, 1.0, 1.0],
[1.0, 3.0, 3.0, 2.0],
[1.0, 3.0, 3.0, 2.0],
[1.0, 2.0, 2.0, 1.0],
]
]
]
self._neuron_guided_backprop_test_assert(net, net.fc1, (0,), (inp,), (exp,))
def test_simple_input_conv_neuron_gb_agg_neurons(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = 1.0 * torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
exp = [
[
[
[0.0, 1.0, 1.0, 1.0],
[1.0, 3.0, 3.0, 2.0],
[1.0, 3.0, 3.0, 2.0],
[1.0, 2.0, 2.0, 1.0],
]
]
]
self._neuron_guided_backprop_test_assert(
net, net.fc1, (slice(0, 1, 1),), (inp,), (exp,)
)
def test_simple_multi_input_conv_gb(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
ex_attr = [
[
[
[1.0, 2.0, 2.0, 1.0],
[2.0, 4.0, 4.0, 2.0],
[2.0, 4.0, 4.0, 2.0],
[1.0, 2.0, 2.0, 1.0],
]
]
]
self._guided_backprop_test_assert(net, (inp, inp2), (ex_attr, ex_attr))
def test_simple_multi_input_conv_neuron_gb(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
ex_attr = [
[
[
[1.0, 2.0, 2.0, 1.0],
[2.0, 4.0, 4.0, 2.0],
[2.0, 4.0, 4.0, 2.0],
[1.0, 2.0, 2.0, 1.0],
]
]
]
self._neuron_guided_backprop_test_assert(
net, net.fc1, (3,), (inp, inp2), (ex_attr, ex_attr)
)
def test_gb_matching(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = 100.0 * torch.randn(1, 1, 4, 4)
self._guided_backprop_matching_assert(net, net.relu2, inp)
def _guided_backprop_test_assert(
self,
model: Module,
test_input: TensorOrTupleOfTensorsGeneric,
expected: Tuple[TensorLikeList, ...],
additional_input: Any = None,
) -> None:
guided_backprop = GuidedBackprop(model)
attributions = guided_backprop.attribute(
test_input, target=0, additional_forward_args=additional_input
)
for i in range(len(test_input)):
assertTensorAlmostEqual(
self,
attributions[i],
expected[i],
delta=0.01,
)
def _neuron_guided_backprop_test_assert(
self,
model: Module,
layer: Module,
neuron_selector: Union[int, Tuple[Union[int, slice], ...]],
test_input: TensorOrTupleOfTensorsGeneric,
expected: Tuple[List[List[List[List[float]]]], ...],
additional_input: Any = None,
) -> None:
guided_backprop = NeuronGuidedBackprop(model, layer)
attributions = guided_backprop.attribute(
test_input,
neuron_selector=neuron_selector,
additional_forward_args=additional_input,
)
for i in range(len(test_input)):
assertTensorAlmostEqual(
self,
attributions[i],
expected[i],
delta=0.01,
)
def _guided_backprop_matching_assert(
self,
model: Module,
output_layer: Module,
test_input: TensorOrTupleOfTensorsGeneric,
):
out = model(test_input)
attrib = GuidedBackprop(model)
self.assertFalse(attrib.multiplies_by_inputs)
neuron_attrib = NeuronGuidedBackprop(model, output_layer)
for i in range(out.shape[1]):
gbp_vals = attrib.attribute(test_input, target=i)
neuron_gbp_vals = neuron_attrib.attribute(test_input, (i,))
assertTensorAlmostEqual(self, gbp_vals, neuron_gbp_vals, delta=0.01)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
from typing import cast, Tuple
import torch
import torch.nn as nn
from captum.attr import InputXGradient, LRP
from captum.attr._utils.lrp_rules import (
Alpha1_Beta0_Rule,
EpsilonRule,
GammaRule,
IdentityRule,
)
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel_ConvNet_One_Conv,
BasicModel_MultiLayer,
BasicModelWithReusedLinear,
SimpleLRPModel,
)
from torch import Tensor
from torch.nn import Module
def _get_basic_config() -> Tuple[Module, Tensor]:
input = torch.arange(16).view(1, 1, 4, 4).float()
return BasicModel_ConvNet_One_Conv(), input
def _get_rule_config() -> Tuple[Tensor, Module, Tensor, Tensor]:
relevance = torch.tensor([[[-0.0, 3.0]]])
layer = nn.modules.Conv1d(1, 1, 2, bias=False)
nn.init.constant_(layer.weight.data, 2)
activations = torch.tensor([[[1.0, 5.0, 7.0]]])
input = torch.tensor([[2, 0, -2]])
return relevance, layer, activations, input
def _get_simple_model(inplace: bool = False) -> Tuple[Module, Tensor]:
model = SimpleLRPModel(inplace)
inputs = torch.tensor([[1.0, 2.0, 3.0]])
return model, inputs
def _get_simple_model2(inplace: bool = False) -> Tuple[Module, Tensor]:
class MyModel(nn.Module):
def __init__(self, inplace) -> None:
super().__init__()
self.lin = nn.Linear(2, 2)
self.lin.weight = nn.Parameter(torch.ones(2, 2))
self.relu = torch.nn.ReLU(inplace=inplace)
def forward(self, input):
return self.relu(self.lin(input))[0].unsqueeze(0)
input = torch.tensor([[1.0, 2.0], [1.0, 3.0]])
model = MyModel(inplace)
return model, input
class Test(BaseTest):
def test_lrp_creator(self) -> None:
model, _ = _get_basic_config()
model.conv1.rule = 1 # type: ignore
self.assertRaises(TypeError, LRP, model)
def test_lrp_creator_activation(self) -> None:
model, inputs = _get_basic_config()
model.add_module("sigmoid", nn.Sigmoid())
lrp = LRP(model)
self.assertRaises(TypeError, lrp.attribute, inputs)
def test_lrp_basic_attributions(self) -> None:
model, inputs = _get_basic_config()
logits = model(inputs)
_, classIndex = torch.max(logits, 1)
lrp = LRP(model)
relevance, delta = lrp.attribute(
inputs, cast(int, classIndex.item()), return_convergence_delta=True
)
self.assertEqual(delta.item(), 0) # type: ignore
self.assertEqual(relevance.shape, inputs.shape) # type: ignore
assertTensorAlmostEqual(
self,
relevance,
torch.Tensor(
[[[[0, 1, 2, 3], [0, 5, 6, 7], [0, 9, 10, 11], [0, 0, 0, 0]]]]
),
)
def test_lrp_simple_attributions(self) -> None:
model, inputs = _get_simple_model()
model.eval()
model.linear.rule = EpsilonRule() # type: ignore
model.linear2.rule = EpsilonRule() # type: ignore
lrp = LRP(model)
relevance = lrp.attribute(inputs)
assertTensorAlmostEqual(self, relevance, torch.tensor([[18.0, 36.0, 54.0]]))
def test_lrp_simple_attributions_batch(self) -> None:
model, inputs = _get_simple_model()
model.eval()
model.linear.rule = EpsilonRule() # type: ignore
model.linear2.rule = EpsilonRule() # type: ignore
lrp = LRP(model)
inputs = torch.cat((inputs, 3 * inputs))
relevance, delta = lrp.attribute(
inputs, target=0, return_convergence_delta=True
)
self.assertEqual(relevance.shape, inputs.shape) # type: ignore
self.assertEqual(delta.shape[0], inputs.shape[0]) # type: ignore
assertTensorAlmostEqual(
self, relevance, torch.Tensor([[18.0, 36.0, 54.0], [54.0, 108.0, 162.0]])
)
def test_lrp_simple_repeat_attributions(self) -> None:
model, inputs = _get_simple_model()
model.eval()
model.linear.rule = GammaRule() # type: ignore
model.linear2.rule = Alpha1_Beta0_Rule() # type: ignore
output = model(inputs)
lrp = LRP(model)
_ = lrp.attribute(inputs)
output_after = model(inputs)
assertTensorAlmostEqual(self, output, output_after)
def test_lrp_simple_inplaceReLU(self) -> None:
model_default, inputs = _get_simple_model()
model_inplace, _ = _get_simple_model(inplace=True)
for model in [model_default, model_inplace]:
model.eval()
model.linear.rule = EpsilonRule() # type: ignore
model.linear2.rule = EpsilonRule() # type: ignore
lrp_default = LRP(model_default)
lrp_inplace = LRP(model_inplace)
relevance_default = lrp_default.attribute(inputs)
relevance_inplace = lrp_inplace.attribute(inputs)
assertTensorAlmostEqual(self, relevance_default, relevance_inplace)
def test_lrp_simple_tanh(self) -> None:
class Model(nn.Module):
def __init__(self) -> None:
super(Model, self).__init__()
self.linear = nn.Linear(3, 3, bias=False)
self.linear.weight.data.fill_(0.1)
self.tanh = torch.nn.Tanh()
self.linear2 = nn.Linear(3, 1, bias=False)
self.linear2.weight.data.fill_(0.1)
def forward(self, x):
return self.linear2(self.tanh(self.linear(x)))
model = Model()
inputs = torch.tensor([[1.0, 2.0, 3.0]])
_ = model(inputs)
lrp = LRP(model)
relevance = lrp.attribute(inputs)
assertTensorAlmostEqual(
self, relevance, torch.Tensor([[0.0269, 0.0537, 0.0806]])
) # Result if tanh is skipped for propagation
def test_lrp_simple_attributions_GammaRule(self) -> None:
model, inputs = _get_simple_model()
with torch.no_grad():
model.linear.weight.data[0][0] = -2 # type: ignore
model.eval()
model.linear.rule = GammaRule(gamma=1) # type: ignore
model.linear2.rule = GammaRule() # type: ignore
lrp = LRP(model)
relevance = lrp.attribute(inputs)
assertTensorAlmostEqual(
self, relevance.data, torch.tensor([[28 / 3, 104 / 3, 52]]) # type: ignore
)
def test_lrp_simple_attributions_AlphaBeta(self) -> None:
model, inputs = _get_simple_model()
with torch.no_grad():
model.linear.weight.data[0][0] = -2 # type: ignore
model.eval()
model.linear.rule = Alpha1_Beta0_Rule() # type: ignore
model.linear2.rule = Alpha1_Beta0_Rule() # type: ignore
lrp = LRP(model)
relevance = lrp.attribute(inputs)
assertTensorAlmostEqual(self, relevance, torch.tensor([[12, 33.6, 50.4]]))
def test_lrp_Identity(self) -> None:
model, inputs = _get_simple_model()
with torch.no_grad():
model.linear.weight.data[0][0] = -2 # type: ignore
model.eval()
model.linear.rule = IdentityRule() # type: ignore
model.linear2.rule = EpsilonRule() # type: ignore
lrp = LRP(model)
relevance = lrp.attribute(inputs)
assertTensorAlmostEqual(self, relevance, torch.tensor([[24.0, 36.0, 36.0]]))
def test_lrp_simple2_attributions(self) -> None:
model, input = _get_simple_model2()
lrp = LRP(model)
relevance = lrp.attribute(input, 0)
self.assertEqual(relevance.shape, input.shape) # type: ignore
def test_lrp_skip_connection(self) -> None:
# A custom addition module needs to be used so that relevance is
# propagated correctly.
class Addition_Module(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
return x1 + x2
class SkipConnection(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(2, 2, bias=False)
self.linear.weight.data.fill_(5)
self.add = Addition_Module()
def forward(self, input: Tensor) -> Module:
x = self.add(self.linear(input), input)
return x
model = SkipConnection()
input = torch.Tensor([[2, 3]])
model.add.rule = EpsilonRule() # type: ignore
lrp = LRP(model)
relevance = lrp.attribute(input, target=1)
assertTensorAlmostEqual(self, relevance, torch.Tensor([[10, 18]]))
def test_lrp_maxpool1D(self) -> None:
class MaxPoolModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(2, 2, bias=False)
self.linear.weight.data.fill_(2.0)
self.maxpool = nn.MaxPool1d(2)
def forward(self, input: Tensor) -> Module:
return self.maxpool(self.linear(input))
model = MaxPoolModel()
input = torch.tensor([[[1.0, 2.0], [5.0, 6.0]]])
lrp = LRP(model)
relevance = lrp.attribute(input, target=1)
assertTensorAlmostEqual(self, relevance, torch.Tensor([[[0.0, 0.0], [10, 12]]]))
def test_lrp_maxpool2D(self) -> None:
class MaxPoolModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.maxpool = nn.MaxPool2d(2)
def forward(self, input: Tensor) -> Module:
return self.maxpool(input)
model = MaxPoolModel()
input = torch.tensor([[[[1.0, 2.0], [5.0, 6.0]]]])
lrp = LRP(model)
relevance = lrp.attribute(input)
assertTensorAlmostEqual(
self, relevance, torch.Tensor([[[[0.0, 0.0], [0.0, 6.0]]]])
)
def test_lrp_maxpool3D(self) -> None:
class MaxPoolModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.maxpool = nn.MaxPool3d(2)
def forward(self, input: Tensor) -> Module:
return self.maxpool(input)
model = MaxPoolModel()
input = torch.tensor([[[[[1.0, 2.0], [5.0, 6.0]], [[3.0, 4.0], [7.0, 8.0]]]]])
lrp = LRP(model)
relevance = lrp.attribute(input)
assertTensorAlmostEqual(
self,
relevance,
torch.Tensor([[[[[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 8.0]]]]]),
)
def test_lrp_multi(self) -> None:
model = BasicModel_MultiLayer()
input = torch.Tensor([[1, 2, 3]])
add_input = 0
output = model(input)
output_add = model(input, add_input=add_input)
self.assertTrue(torch.equal(output, output_add))
lrp = LRP(model)
attributions = lrp.attribute(input, target=0)
attributions_add_input = lrp.attribute(
input, target=0, additional_forward_args=(add_input,)
)
self.assertTrue(
torch.equal(attributions, attributions_add_input) # type: ignore
) # type: ignore
def test_lrp_multi_inputs(self) -> None:
model = BasicModel_MultiLayer()
input = torch.Tensor([[1, 2, 3]])
input = (input, 3 * input)
lrp = LRP(model)
attributions, delta = lrp.attribute(
input, target=0, return_convergence_delta=True
)
self.assertEqual(len(input), 2)
assertTensorAlmostEqual(self, attributions[0], torch.Tensor([[16, 32, 48]]))
assertTensorAlmostEqual(self, delta, torch.Tensor([-104.0]))
def test_lrp_ixg_equivalency(self) -> None:
model, inputs = _get_simple_model()
lrp = LRP(model)
attributions_lrp = lrp.attribute(inputs)
ixg = InputXGradient(model)
attributions_ixg = ixg.attribute(inputs)
assertTensorAlmostEqual(
self, attributions_lrp, attributions_ixg
) # Divide by score because LRP relevance is normalized.
def test_lrp_repeated_module(self) -> None:
model = BasicModelWithReusedLinear()
inp = torch.ones(2, 3)
lrp = LRP(model)
with self.assertRaisesRegexp(RuntimeError, "more than once"):
lrp.attribute(inp, target=0)
|
#!/usr/bin/env python3
import io
import unittest
import unittest.mock
from typing import Any, cast, List, Tuple, Union
import torch
from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.feature_ablation import FeatureAblation
from captum.attr._core.noise_tunnel import NoiseTunnel
from captum.attr._utils.attribution import Attribution
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel,
BasicModel_ConvNet_One_Conv,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
BasicModelBoolInput,
BasicModelWithSparseInputs,
)
from torch import Tensor
class Test(BaseTest):
r"""
The following conversion tests are underlying assumptions
made by the rest of tests in this file.
We are testing them explicitly just in case they break behaviour
in the future. As in this case it will be easier to update the tests.
"""
def test_python_float_conversion(self) -> None:
x = torch.tensor(3, dtype=cast(torch.dtype, float))
self.assertEqual(x.dtype, torch.float64)
def test_python_int_conversion(self) -> None:
x = torch.tensor(5, dtype=cast(torch.dtype, int))
self.assertEqual(x.dtype, torch.int64)
def test_float32_tensor_item_conversion(self) -> None:
x = torch.tensor(5, dtype=torch.float32)
y = torch.tensor(x.item()) # .item() returns a python float
# for whatever reason it is only
# dtype == torch.float64 if you provide dtype=float
self.assertEqual(y.dtype, torch.float32)
def test_int32_tensor_item_conversion(self) -> None:
x = torch.tensor(5, dtype=torch.int32)
y = torch.tensor(x.item()) # .item() returns a python int
self.assertEqual(y.dtype, torch.int64)
def test_simple_ablation(self) -> None:
ablation_algo = FeatureAblation(BasicModel_MultiLayer())
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._ablation_test_assert(
ablation_algo, inp, [[80.0, 200.0, 120.0]], perturbations_per_eval=(1, 2, 3)
)
def test_simple_ablation_int_to_int(self) -> None:
ablation_algo = FeatureAblation(BasicModel())
inp = torch.tensor([[-3, 1, 2]])
self._ablation_test_assert(
ablation_algo, inp, [[-3, 0, 0]], perturbations_per_eval=(1, 2, 3)
)
def test_simple_ablation_int_to_int_nt(self) -> None:
ablation_algo = NoiseTunnel(FeatureAblation(BasicModel()))
inp = torch.tensor([[-3, 1, 2]]).float()
self._ablation_test_assert(
ablation_algo,
inp,
[[-3.0, 0.0, 0.0]],
perturbations_per_eval=(1, 2, 3),
stdevs=1e-10,
)
def test_simple_ablation_int_to_float(self) -> None:
net = BasicModel()
def wrapper_func(inp):
return net(inp).float()
ablation_algo = FeatureAblation(wrapper_func)
inp = torch.tensor([[-3, 1, 2]])
self._ablation_test_assert(
ablation_algo, inp, [[-3.0, 0.0, 0.0]], perturbations_per_eval=(1, 2, 3)
)
def test_simple_ablation_with_mask(self) -> None:
ablation_algo = FeatureAblation(BasicModel_MultiLayer())
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._ablation_test_assert(
ablation_algo,
inp,
[[280.0, 280.0, 120.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
)
def test_simple_ablation_with_baselines(self) -> None:
ablation_algo = FeatureAblation(BasicModel_MultiLayer())
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._ablation_test_assert(
ablation_algo,
inp,
[[248.0, 248.0, 104.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
baselines=4,
perturbations_per_eval=(1, 2, 3),
)
def test_simple_ablation_boolean(self) -> None:
ablation_algo = FeatureAblation(BasicModelBoolInput())
inp = torch.tensor([[True, False, True]])
self._ablation_test_assert(
ablation_algo,
inp,
[[40.0, 40.0, 40.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
)
def test_simple_ablation_boolean_with_baselines(self) -> None:
ablation_algo = FeatureAblation(BasicModelBoolInput())
inp = torch.tensor([[True, False, True]])
self._ablation_test_assert(
ablation_algo,
inp,
[[-40.0, -40.0, 0.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
baselines=True,
perturbations_per_eval=(1, 2, 3),
)
def test_multi_sample_ablation(self) -> None:
ablation_algo = FeatureAblation(BasicModel_MultiLayer())
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
self._ablation_test_assert(
ablation_algo,
inp,
[[8.0, 35.0, 12.0], [80.0, 200.0, 120.0]],
perturbations_per_eval=(1, 2, 3),
)
def test_multi_sample_ablation_with_mask(self) -> None:
ablation_algo = FeatureAblation(BasicModel_MultiLayer())
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1], [1, 1, 0]])
self._ablation_test_assert(
ablation_algo,
inp,
[[41.0, 41.0, 12.0], [280.0, 280.0, 120.0]],
feature_mask=mask,
perturbations_per_eval=(1, 2, 3),
)
def test_multi_input_ablation_with_mask(self) -> None:
ablation_algo = FeatureAblation(BasicModel_MultiLayer_MultiInput())
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
mask1 = torch.tensor([[1, 1, 1], [0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2], [0, 0, 0]])
expected = (
[[492.0, 492.0, 492.0], [200.0, 200.0, 200.0]],
[[80.0, 200.0, 120.0], [0.0, 400.0, 0.0]],
[[0.0, 400.0, 40.0], [60.0, 60.0, 60.0]],
)
self._ablation_test_assert(
ablation_algo,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
)
self._ablation_test_assert(
ablation_algo,
(inp1, inp2),
expected[0:1],
additional_input=(inp3, 1),
feature_mask=(mask1, mask2),
perturbations_per_eval=(1, 2, 3),
)
expected_with_baseline = (
[[468.0, 468.0, 468.0], [184.0, 192.0, 184.0]],
[[68.0, 188.0, 108.0], [-12.0, 388.0, -12.0]],
[[-16.0, 384.0, 24.0], [12.0, 12.0, 12.0]],
)
self._ablation_test_assert(
ablation_algo,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
)
def test_multi_input_ablation_with_mask_nt(self) -> None:
ablation_algo = NoiseTunnel(FeatureAblation(BasicModel_MultiLayer_MultiInput()))
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
mask1 = torch.tensor([[1, 1, 1], [0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2], [0, 0, 0]])
expected = (
[[492.0, 492.0, 492.0], [200.0, 200.0, 200.0]],
[[80.0, 200.0, 120.0], [0.0, 400.0, 0.0]],
[[0.0, 400.0, 40.0], [60.0, 60.0, 60.0]],
)
self._ablation_test_assert(
ablation_algo,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
stdevs=1e-10,
)
self._ablation_test_assert(
ablation_algo,
(inp1, inp2),
expected[0:1],
additional_input=(inp3, 1),
feature_mask=(mask1, mask2),
perturbations_per_eval=(1, 2, 3),
stdevs=1e-10,
)
expected_with_baseline = (
[[468.0, 468.0, 468.0], [184.0, 192.0, 184.0]],
[[68.0, 188.0, 108.0], [-12.0, 388.0, -12.0]],
[[-16.0, 384.0, 24.0], [12.0, 12.0, 12.0]],
)
self._ablation_test_assert(
ablation_algo,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
stdevs=1e-10,
)
def test_multi_input_ablation(self) -> None:
ablation_algo = FeatureAblation(BasicModel_MultiLayer_MultiInput())
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
baseline1 = torch.tensor([[3.0, 0.0, 0.0]])
baseline2 = torch.tensor([[0.0, 1.0, 0.0]])
baseline3 = torch.tensor([[1.0, 2.0, 3.0]])
self._ablation_test_assert(
ablation_algo,
(inp1, inp2, inp3),
(
[[80.0, 400.0, 0.0], [68.0, 200.0, 120.0]],
[[80.0, 196.0, 120.0], [0.0, 396.0, 0.0]],
[[-4.0, 392.0, 28.0], [4.0, 32.0, 0.0]],
),
additional_input=(1,),
baselines=(baseline1, baseline2, baseline3),
perturbations_per_eval=(1, 2, 3),
)
baseline1_exp = torch.tensor([[3.0, 0.0, 0.0], [3.0, 0.0, 2.0]])
baseline2_exp = torch.tensor([[0.0, 1.0, 0.0], [0.0, 1.0, 4.0]])
baseline3_exp = torch.tensor([[3.0, 2.0, 4.0], [1.0, 2.0, 3.0]])
self._ablation_test_assert(
ablation_algo,
(inp1, inp2, inp3),
(
[[80.0, 400.0, 0.0], [68.0, 200.0, 112.0]],
[[80.0, 196.0, 120.0], [0.0, 396.0, -16.0]],
[[-12.0, 392.0, 24.0], [4.0, 32.0, 0.0]],
),
additional_input=(1,),
baselines=(baseline1_exp, baseline2_exp, baseline3_exp),
perturbations_per_eval=(1, 2, 3),
)
def test_simple_multi_input_conv(self) -> None:
ablation_algo = FeatureAblation(BasicModel_ConvNet_One_Conv())
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
self._ablation_test_assert(
ablation_algo,
(inp, inp2),
(67 * torch.ones_like(inp), 13 * torch.ones_like(inp2)),
feature_mask=(torch.tensor(0), torch.tensor(1)),
perturbations_per_eval=(1, 2, 4, 8, 12, 16),
)
self._ablation_test_assert(
ablation_algo,
(inp, inp2),
(
[
[
[
[0.0, 2.0, 4.0, 3.0],
[4.0, 9.0, 10.0, 7.0],
[4.0, 13.0, 14.0, 11.0],
[0.0, 0.0, 0.0, 0.0],
]
]
],
[
[
[
[1.0, 2.0, 2.0, 1.0],
[1.0, 2.0, 2.0, 1.0],
[1.0, 2.0, 2.0, 1.0],
[0.0, 0.0, 0.0, 0.0],
]
]
],
),
perturbations_per_eval=(1, 3, 7, 14),
)
# Remaining tests are for cases where forward function returns a scalar
# per batch, as either a float, integer, 0d tensor or 1d tensor.
def test_error_perturbations_per_eval_limit_batch_scalar(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
ablation = FeatureAblation(lambda inp: torch.sum(net(inp)).item())
with self.assertRaises(AssertionError):
_ = ablation.attribute(inp, perturbations_per_eval=2)
def test_error_agg_mode_arbitrary_output(self) -> None:
net = BasicModel_MultiLayer()
# output 3 numbers for the entire batch
# note that the batch size == 2
def forward_func(inp):
pred = net(inp)
return torch.stack([pred.sum(), pred.max(), pred.min()])
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
ablation = FeatureAblation(forward_func)
with self.assertRaises(AssertionError):
_ = ablation.attribute(inp, perturbations_per_eval=2)
def test_empty_sparse_features(self) -> None:
ablation_algo = FeatureAblation(BasicModelWithSparseInputs())
inp1 = torch.tensor([[1.0, -2.0, 3.0], [2.0, -1.0, 3.0]])
inp2 = torch.tensor([])
exp: Tuple[List[List[float]], List[float]] = ([[9.0, -3.0, 12.0]], [0.0])
self._ablation_test_assert(ablation_algo, (inp1, inp2), exp, target=None)
def test_sparse_features(self) -> None:
ablation_algo = FeatureAblation(BasicModelWithSparseInputs())
inp1 = torch.tensor([[1.0, -2.0, 3.0], [2.0, -1.0, 3.0]])
# Length of sparse index list may not match # of examples
inp2 = torch.tensor([1, 7, 2, 4, 5, 3, 6])
self._ablation_test_assert(
ablation_algo, (inp1, inp2), ([[9.0, -3.0, 12.0]], [2.0]), target=None
)
def test_single_ablation_batch_scalar_float(self) -> None:
net = BasicModel_MultiLayer()
ablation_algo = FeatureAblation(lambda inp: torch.sum(net(inp)).item())
self._single_input_one_sample_batch_scalar_ablation_assert(
ablation_algo, dtype=torch.float64
)
def test_single_ablation_batch_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer()
ablation_algo = FeatureAblation(lambda inp: torch.sum(net(inp)))
self._single_input_one_sample_batch_scalar_ablation_assert(ablation_algo)
def test_single_ablation_batch_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer()
ablation_algo = FeatureAblation(lambda inp: torch.sum(net(inp)).reshape(1))
self._single_input_one_sample_batch_scalar_ablation_assert(ablation_algo)
def test_single_ablation_batch_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer()
ablation_algo = FeatureAblation(lambda inp: int(torch.sum(net(inp)).item()))
self._single_input_one_sample_batch_scalar_ablation_assert(
ablation_algo, dtype=torch.int64
)
def test_multi_sample_ablation_batch_scalar_float(self) -> None:
net = BasicModel_MultiLayer()
ablation_algo = FeatureAblation(lambda inp: torch.sum(net(inp)).item())
self._single_input_multi_sample_batch_scalar_ablation_assert(
ablation_algo,
dtype=torch.float64,
)
def test_multi_sample_ablation_batch_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer()
ablation_algo = FeatureAblation(lambda inp: torch.sum(net(inp)))
self._single_input_multi_sample_batch_scalar_ablation_assert(ablation_algo)
def test_multi_sample_ablation_batch_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer()
ablation_algo = FeatureAblation(lambda inp: torch.sum(net(inp)).reshape(1))
self._single_input_multi_sample_batch_scalar_ablation_assert(ablation_algo)
def test_multi_sample_ablation_batch_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer()
ablation_algo = FeatureAblation(lambda inp: int(torch.sum(net(inp)).item()))
self._single_input_multi_sample_batch_scalar_ablation_assert(
ablation_algo, dtype=torch.int64
)
def test_multi_inp_ablation_batch_scalar_float(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
ablation_algo = FeatureAblation(lambda *inp: torch.sum(net(*inp)).item())
self._multi_input_batch_scalar_ablation_assert(
ablation_algo,
dtype=torch.float64,
)
def test_multi_inp_ablation_batch_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
ablation_algo = FeatureAblation(lambda *inp: torch.sum(net(*inp)))
self._multi_input_batch_scalar_ablation_assert(ablation_algo)
def test_multi_inp_ablation_batch_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
ablation_algo = FeatureAblation(lambda *inp: torch.sum(net(*inp)).reshape(1))
self._multi_input_batch_scalar_ablation_assert(ablation_algo)
def test_mutli_inp_ablation_batch_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
ablation_algo = FeatureAblation(lambda *inp: int(torch.sum(net(*inp)).item()))
self._multi_input_batch_scalar_ablation_assert(ablation_algo, dtype=torch.int64)
def test_unassociated_output_3d_tensor(self) -> None:
def forward_func(inp):
return torch.ones(1, 5, 3, 2)
inp = torch.randn(10, 5)
mask = torch.arange(5).unsqueeze(0)
self._ablation_test_assert(
ablation_algo=FeatureAblation(forward_func),
test_input=inp,
baselines=None,
target=None,
feature_mask=mask,
perturbations_per_eval=(1,),
expected_ablation=torch.zeros((5 * 3 * 2,) + inp[0].shape),
)
def test_single_inp_ablation_multi_output_aggr(self) -> None:
def forward_func(inp):
return inp[0].unsqueeze(0)
inp = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
mask = torch.tensor([[0, 1, 2]])
self._ablation_test_assert(
ablation_algo=FeatureAblation(forward_func),
test_input=inp,
feature_mask=mask,
baselines=None,
target=None,
perturbations_per_eval=(1,),
# should just be the first input spread across each feature
expected_ablation=[[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0]],
)
def test_single_inp_ablation_multi_output_aggr_mask_none(self) -> None:
def forward_func(inp):
return inp[0].unsqueeze(0)
inp = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
self._ablation_test_assert(
ablation_algo=FeatureAblation(forward_func),
test_input=inp,
feature_mask=None,
baselines=None,
target=None,
perturbations_per_eval=(1,),
# should just be the first input spread across each feature
expected_ablation=[[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0]],
)
def test_single_inp_ablation_multi_output_aggr_non_standard(self) -> None:
def forward_func(inp):
return inp[0].unsqueeze(0)
inp = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
mask = torch.tensor([[0, 0, 1]])
self._ablation_test_assert(
ablation_algo=FeatureAblation(forward_func),
test_input=inp,
feature_mask=mask,
baselines=None,
target=None,
perturbations_per_eval=(1,),
expected_ablation=[[1.0, 1.0, 0.0], [2.0, 2.0, 0.0], [0.0, 0.0, 3.0]],
)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_simple_ablation_with_show_progress(self, mock_stderr) -> None:
ablation_algo = FeatureAblation(BasicModel_MultiLayer())
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
# test progress output for each batch size
for bsz in (1, 2, 3):
self._ablation_test_assert(
ablation_algo,
inp,
[[80.0, 200.0, 120.0]],
perturbations_per_eval=(bsz,),
show_progress=True,
)
output = mock_stderr.getvalue()
# to test if progress calculation aligns with the actual iteration
# all perturbations_per_eval should reach progress of 100%
assert (
"Feature Ablation attribution: 100%" in output
), f"Error progress output: {repr(output)}"
mock_stderr.seek(0)
mock_stderr.truncate(0)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_simple_ablation_with_mask_and_show_progress(self, mock_stderr) -> None:
ablation_algo = FeatureAblation(BasicModel_MultiLayer())
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
# test progress output for each batch size
for bsz in (1, 2, 3):
self._ablation_test_assert(
ablation_algo,
inp,
[[280.0, 280.0, 120.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(bsz,),
show_progress=True,
)
output = mock_stderr.getvalue()
# to test if progress calculation aligns with the actual iteration
# all perturbations_per_eval should reach progress of 100%
assert (
"Feature Ablation attribution: 100%" in output
), f"Error progress output: {repr(output)}"
mock_stderr.seek(0)
mock_stderr.truncate(0)
def _single_input_one_sample_batch_scalar_ablation_assert(
self, ablation_algo: Attribution, dtype: torch.dtype = torch.float32
) -> None:
inp = torch.tensor([[2.0, 10.0, 3.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1]])
self._ablation_test_assert(
ablation_algo,
inp,
torch.tensor([[82.0, 82.0, 24.0]], dtype=torch.float32).to(dtype),
feature_mask=mask,
perturbations_per_eval=(1,),
target=None,
)
def _single_input_multi_sample_batch_scalar_ablation_assert(
self,
ablation_algo: Attribution,
dtype: torch.dtype = torch.float32,
) -> None:
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1]])
self._ablation_test_assert(
ablation_algo,
inp,
torch.tensor([[642.0, 642.0, 264.0]], dtype=torch.float32).to(dtype),
feature_mask=mask,
perturbations_per_eval=(1,),
target=None,
)
def _multi_input_batch_scalar_ablation_assert(
self,
ablation_algo: Attribution,
dtype: torch.dtype = torch.float32,
) -> None:
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
mask1 = torch.tensor([[1, 1, 1]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2]])
expected = (
torch.tensor([[1784, 1784, 1784]], dtype=dtype),
torch.tensor([[160, 1200, 240]], dtype=dtype),
torch.tensor([[16, 880, 104]], dtype=dtype),
)
self._ablation_test_assert(
ablation_algo,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
perturbations_per_eval=(1,),
target=None,
)
def _ablation_test_assert(
self,
ablation_algo: Attribution,
test_input: TensorOrTupleOfTensorsGeneric,
expected_ablation: Union[
Tensor,
Tuple[Tensor, ...],
# NOTE: mypy doesn't support recursive types
# would do a List[NestedList[Union[int, float]]
# or Tuple[NestedList[Union[int, float]]
# but... we can't.
#
# See https://github.com/python/mypy/issues/731
List[Any],
Tuple[List[Any], ...],
],
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
additional_input: Any = None,
perturbations_per_eval: Tuple[int, ...] = (1,),
baselines: BaselineType = None,
target: TargetType = 0,
**kwargs: Any,
) -> None:
for batch_size in perturbations_per_eval:
self.assertTrue(ablation_algo.multiplies_by_inputs)
attributions = ablation_algo.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
**kwargs,
)
if isinstance(expected_ablation, tuple):
for i in range(len(expected_ablation)):
expected = expected_ablation[i]
if not isinstance(expected, torch.Tensor):
expected = torch.tensor(expected)
self.assertEqual(attributions[i].shape, expected.shape)
self.assertEqual(attributions[i].dtype, expected.dtype)
assertTensorAlmostEqual(self, attributions[i], expected)
else:
if not isinstance(expected_ablation, torch.Tensor):
expected_ablation = torch.tensor(expected_ablation)
self.assertEqual(attributions.shape, expected_ablation.shape)
self.assertEqual(attributions.dtype, expected_ablation.dtype)
assertTensorAlmostEqual(self, attributions, expected_ablation)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env fbpython
import math
from typing import cast
from unittest.mock import Mock, patch
import torch
from captum.attr._core.dataloader_attr import DataLoaderAttribution, InputRole
from captum.attr._core.feature_ablation import FeatureAblation
from parameterized import parameterized
from tests.helpers.basic import (
assertAttributionComparision,
assertTensorAlmostEqual,
BaseTest,
)
from torch import Tensor
from torch.utils.data import DataLoader, TensorDataset
def sum_forward(*inps):
inps = [torch.flatten(inp, start_dim=1) for inp in inps]
return torch.cat(inps, dim=1).sum(1)
class Linear(torch.nn.Module):
def __init__(self, n):
super().__init__()
self.linear = torch.nn.Linear(n, 1)
def forward(self, *inps):
inps = [torch.flatten(inp, start_dim=1) for inp in inps]
return self.linear(torch.cat(inps, dim=1))
mock_dataset = TensorDataset(
# iD feature
torch.tensor(
[
[0.0, 0.1],
[0.3, 0.4],
[0.6, 0.7],
[0.9, 1.0],
[1.2, 1.3],
]
),
# 2D feature
torch.tensor(
[
[[0.1, 0.2], [0.3, 0.2]],
[[0.4, 0.5], [0.3, 0.2]],
[[0.8, 0.1], [0.2, 0.5]],
[[1.1, 0.7], [0.1, 0.7]],
[[0.6, 1.4], [1.2, 0.4]],
]
),
# scalar feature or label
torch.tensor(
[
[0],
[1],
[0],
[0],
[1],
]
),
)
class Test(BaseTest):
@parameterized.expand(
[
(sum_forward,),
(Linear(7),),
]
)
def test_dl_attr(self, forward) -> None:
fa = FeatureAblation(forward)
dl_fa = DataLoaderAttribution(fa)
dataloader = DataLoader(mock_dataset, batch_size=2)
dl_attributions = dl_fa.attribute(dataloader)
# default reduce of DataLoaderAttribution works the same as concat all batches
attr_list = []
for batch in dataloader:
batch_attr = fa.attribute(tuple(batch))
attr_list.append(batch_attr)
expected_attr = tuple(
torch.cat(feature_attrs, dim=0) for feature_attrs in zip(*attr_list)
)
assertAttributionComparision(self, dl_attributions, expected_attr)
@parameterized.expand(
[
(sum_forward,),
(Linear(7),),
]
)
def test_dl_attr_with_mask(self, forward) -> None:
# FeatureAblation does not support grouping across tensors for now
# add such test cases after support grouping across tensors in FeatureAblation
masks = (
torch.tensor([[0, 0]]),
torch.tensor([[[1, 2], [3, 2]]]),
torch.tensor([[4]]),
)
fa = FeatureAblation(forward)
dl_fa = DataLoaderAttribution(fa)
dataloader = DataLoader(mock_dataset, batch_size=2)
dl_attributions = dl_fa.attribute(dataloader, feature_mask=masks)
# default reduce of DataLoaderAttribution works the same as concat all batches
attr_list = []
for batch in dataloader:
batch_attr = fa.attribute(tuple(batch), feature_mask=masks)
attr_list.append(batch_attr)
expected_attr = tuple(
torch.cat(feature_attrs, dim=0) for feature_attrs in zip(*attr_list)
)
assertAttributionComparision(self, dl_attributions, expected_attr)
@parameterized.expand(
[
(sum_forward,),
(Linear(7),),
]
)
def test_dl_attr_with_baseline(self, forward) -> None:
baselines = (
torch.tensor([[0, -1]]),
1,
0.1,
)
fa = FeatureAblation(forward)
dl_fa = DataLoaderAttribution(fa)
dataloader = DataLoader(mock_dataset, batch_size=2)
dl_attributions = dl_fa.attribute(dataloader, baselines=baselines)
# default reduce of DataLoaderAttribution works the same as concat all batches
attr_list = []
for batch in dataloader:
batch_attr = fa.attribute(tuple(batch), baselines=baselines)
attr_list.append(batch_attr)
expected_attr = tuple(
torch.cat(feature_attrs, dim=0) for feature_attrs in zip(*attr_list)
)
assertAttributionComparision(self, dl_attributions, expected_attr)
def test_dl_attr_with_reduce_and_to_metric(self) -> None:
forward = sum_forward
func_call_counts = {
"reduce": 0,
"to_metric": 0,
}
def reduce(accum, cur_output, cur_inputs):
func_call_counts["reduce"] += 1
accum = {"sum": 0, "count": 0} if accum is None else accum
accum["sum"] += cur_output.sum()
accum["count"] += len(cur_output)
return accum
def to_metric(accum):
func_call_counts["to_metric"] += 1
self.assertEqual(isinstance(accum, dict), True)
return torch.tensor(
[
accum["sum"] / accum["count"],
accum["sum"],
]
)
fa = FeatureAblation(forward)
dl_fa = DataLoaderAttribution(fa)
batch_size = 2
dataloader = DataLoader(mock_dataset, batch_size=batch_size)
dl_attribution = dl_fa.attribute(
dataloader,
reduce=reduce,
to_metric=to_metric,
return_input_shape=False,
)
n_iters = len(dataloader)
n_features = 7
# after support other attr methods, this can be diff from n_features
n_perturbations = 7
n_passes = n_perturbations + 1 # +1 for base forward without perturbation
n_outputs = 2 # [mean, sum]
self.assertEqual(func_call_counts["reduce"], n_iters * n_passes)
self.assertEqual(func_call_counts["to_metric"], n_passes)
expected_attr_shape = (n_outputs, n_features)
self.assertEqual(type(dl_attribution), Tensor)
dl_attribution = cast(Tensor, dl_attribution)
self.assertEqual(dl_attribution.shape, expected_attr_shape)
@parameterized.expand(
[
([0, 0, 0],),
([0, 1, 0],),
([0, 1, 1],),
([0, 1, 2],),
([0, 2, 2],),
]
)
def test_dl_attr_with_input_roles(self, input_roles) -> None:
n_inputs = len(input_roles)
n_forward_inputs = sum(1 for r in input_roles if r != InputRole.no_forward)
n_attr_inputs = sum(1 for r in input_roles if r == InputRole.need_attr)
def reduce(accum, cur_output, cur_inputs):
# all inputs from dataloader should be given to reduce
self.assertEqual(len(cur_inputs), n_inputs)
return cur_output if accum is None else torch.cat([accum, cur_output])
def forward(*forward_inputs):
# inputs of InputRole.no_forward should not be passed to forward
self.assertEqual(len(forward_inputs), n_forward_inputs)
return sum_forward(*forward_inputs)
fa = FeatureAblation(forward)
dl_fa = DataLoaderAttribution(fa)
batch_size = 2
dataloader = DataLoader(mock_dataset, batch_size=batch_size)
dl_attributions = dl_fa.attribute(
dataloader,
input_roles=input_roles,
reduce=reduce,
)
# only inputs needs
self.assertEqual(len(dl_attributions), n_attr_inputs)
# default reduce of DataLoaderAttribution works the same as concat all batches
attr_list = []
for batch in dataloader:
attr_inputs = tuple(
_ for _, role in zip(batch, input_roles) if role == InputRole.need_attr
)
additional_forward_args = tuple(
_
for _, role in zip(batch, input_roles)
if role == InputRole.need_forward
)
batch_attr = fa.attribute(
attr_inputs, additional_forward_args=additional_forward_args
)
attr_list.append(batch_attr)
expected_attr = tuple(
torch.cat(feature_attrs, dim=0) for feature_attrs in zip(*attr_list)
)
assertAttributionComparision(self, dl_attributions, expected_attr)
def test_dl_attr_not_return_input_shape(self) -> None:
forward = sum_forward
fa = FeatureAblation(forward)
dl_fa = DataLoaderAttribution(fa)
dataloader = DataLoader(mock_dataset, batch_size=2)
dl_attribution = dl_fa.attribute(dataloader, return_input_shape=False)
expected_attr_shape = (len(mock_dataset), 7)
self.assertEqual(type(dl_attribution), Tensor)
dl_attribution = cast(Tensor, dl_attribution)
self.assertEqual(dl_attribution.shape, expected_attr_shape)
# default reduce of DataLoaderAttribution works the same as concat all batches
attr_list = []
for batch in dataloader:
batch_attr = fa.attribute(tuple(batch))
attr_list.append(batch_attr)
expected_attr = torch.cat(
[
# flatten feature dim
torch.cat(feature_attrs, dim=0).flatten(start_dim=1)
for feature_attrs in zip(*attr_list)
],
dim=1,
)
assertTensorAlmostEqual(self, dl_attribution, expected_attr)
def test_dl_attr_with_mask_not_return_input_shape(self) -> None:
forward = sum_forward
masks = (
torch.tensor([[0, 0]]),
torch.tensor([[[1, 2], [3, 2]]]),
torch.tensor([[4]]),
)
fa = FeatureAblation(forward)
dl_fa = DataLoaderAttribution(fa)
dataloader = DataLoader(mock_dataset, batch_size=2)
dl_attribution = dl_fa.attribute(
dataloader, feature_mask=masks, return_input_shape=False
)
expected_attr_shape = (len(mock_dataset), 5)
self.assertEqual(type(dl_attribution), Tensor)
dl_attribution = cast(Tensor, dl_attribution)
self.assertEqual(dl_attribution.shape, expected_attr_shape)
@parameterized.expand([(2,), (3,), (4,)])
def test_dl_attr_with_perturb_per_pass(self, perturb_per_pass) -> None:
forward = sum_forward
fa = FeatureAblation(forward)
dl_fa = DataLoaderAttribution(fa)
mock_dl_iter = Mock(wraps=DataLoader.__iter__)
with patch.object(DataLoader, "__iter__", lambda self: mock_dl_iter(self)):
dataloader = DataLoader(mock_dataset, batch_size=2)
dl_attributions = dl_fa.attribute(
dataloader, perturbations_per_pass=perturb_per_pass
)
n_features = 7
# 2 extra iter calls: get one input for format; get unperturbed output
n_iter_overhead = 2
self.assertEqual(
mock_dl_iter.call_count,
math.ceil(n_features / perturb_per_pass) + n_iter_overhead,
)
# default reduce of DataLoaderAttribution works the same as concat all batches
attr_list = []
for batch in dataloader:
batch_attr = fa.attribute(tuple(batch))
attr_list.append(batch_attr)
expected_attr = tuple(
torch.cat(feature_attrs, dim=0) for feature_attrs in zip(*attr_list)
)
assertAttributionComparision(self, dl_attributions, expected_attr)
|
#!/usr/bin/env python3
import unittest
from typing import List
import torch
from captum.attr._utils.approximation_methods import Riemann, riemann_builders
from tests.helpers.basic import assertTensorAlmostEqual
class Test(unittest.TestCase):
def __init__(self, methodName: str = "runTest") -> None:
super().__init__(methodName)
def test_riemann_0(self) -> None:
with self.assertRaises(AssertionError):
step_sizes, alphas = riemann_builders()
step_sizes(0)
alphas(0)
def test_riemann_2(self) -> None:
expected_step_sizes_lrm = [0.5, 0.5]
expected_step_sizes_trapezoid = [0.25, 0.25]
expected_left = [0.0, 0.5]
expected_right = [0.5, 1.0]
expected_middle = [0.25, 0.75]
expected_trapezoid = [0.0, 1.0]
self._assert_steps_and_alphas(
2,
expected_step_sizes_lrm,
expected_step_sizes_trapezoid,
expected_left,
expected_right,
expected_middle,
expected_trapezoid,
)
def test_riemann_3(self) -> None:
expected_step_sizes = [1 / 3] * 3
expected_step_sizes_trapezoid = [1 / 6, 1 / 3, 1 / 6]
expected_left = [0.0, 1 / 3, 2 / 3]
expected_right = [1 / 3, 2 / 3, 1.0]
expected_middle = [1 / 6, 0.5, 1 - 1 / 6]
expected_trapezoid = [0.0, 0.5, 1.0]
self._assert_steps_and_alphas(
3,
expected_step_sizes,
expected_step_sizes_trapezoid,
expected_left,
expected_right,
expected_middle,
expected_trapezoid,
)
def test_riemann_4(self) -> None:
expected_step_sizes = [1 / 4] * 4
expected_step_sizes_trapezoid = [1 / 8, 1 / 4, 1 / 4, 1 / 8]
expected_left = [0.0, 0.25, 0.5, 0.75]
expected_right = [0.25, 0.5, 0.75, 1.0]
expected_middle = [0.125, 0.375, 0.625, 0.875]
expected_trapezoid = [0.0, 1 / 3, 2 / 3, 1.0]
self._assert_steps_and_alphas(
4,
expected_step_sizes,
expected_step_sizes_trapezoid,
expected_left,
expected_right,
expected_middle,
expected_trapezoid,
)
def _assert_steps_and_alphas(
self,
n: int,
expected_step_sizes: List[float],
expected_step_sizes_trapezoid: List[float],
expected_left: List[float],
expected_right: List[float],
expected_middle: List[float],
expected_trapezoid: List[float],
) -> None:
step_sizes_left, alphas_left = riemann_builders(Riemann.left)
step_sizes_right, alphas_right = riemann_builders(Riemann.right)
step_sizes_middle, alphas_middle = riemann_builders(Riemann.middle)
step_sizes_trapezoid, alphas_trapezoid = riemann_builders(Riemann.trapezoid)
assertTensorAlmostEqual(
self,
torch.tensor(expected_step_sizes),
step_sizes_left(n),
delta=0.05,
mode="max",
)
assertTensorAlmostEqual(
self,
torch.tensor(expected_step_sizes),
step_sizes_right(n),
delta=0.05,
mode="max",
)
assertTensorAlmostEqual(
self,
torch.tensor(expected_step_sizes),
step_sizes_middle(n),
delta=0.05,
mode="max",
)
assertTensorAlmostEqual(
self,
torch.tensor(expected_step_sizes_trapezoid),
step_sizes_trapezoid(n),
delta=0.05,
mode="max",
)
assertTensorAlmostEqual(
self, torch.tensor(expected_left), alphas_left(n), delta=0.05, mode="max"
)
assertTensorAlmostEqual(
self, torch.tensor(expected_right), alphas_right(n), delta=0.05, mode="max"
)
assertTensorAlmostEqual(
self,
torch.tensor(expected_middle),
alphas_middle(n),
delta=0.05,
mode="max",
)
assertTensorAlmostEqual(
self,
torch.tensor(expected_trapezoid),
alphas_trapezoid(n),
delta=0.05,
mode="max",
)
# TODO write a test case for gauss-legendre
|
#!/usr/bin/env python3
import unittest
from typing import Any, Callable, cast, List, Tuple, Union
import torch
from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.layer.layer_conductance import LayerConductance
from captum.attr._core.neuron.neuron_conductance import NeuronConductance
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel_ConvNet,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_simple_conductance_input_linear2(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
self._conductance_input_test_assert(
net, net.linear2, inp, (0,), [0.0, 390.0, 0.0]
)
def test_simple_conductance_input_linear2_wo_mult_by_inputs(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[100.0, 100.0, 100.0]], requires_grad=True)
self._conductance_input_test_assert(
net,
net.linear2,
inp,
(0,),
[3.96, 3.96, 3.96],
multiply_by_inputs=False,
)
def test_simple_conductance_input_linear1(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._conductance_input_test_assert(net, net.linear1, inp, 0, [0.0, 90.0, 0.0])
def test_simple_conductance_input_linear1_selector_fn(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._conductance_input_test_assert(
net, net.linear1, inp, lambda x: x[:, 0], [0.0, 90.0, 0.0]
)
def test_simple_conductance_input_relu(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 70.0, 30.0]], requires_grad=True)
self._conductance_input_test_assert(net, net.relu, inp, (3,), [0.0, 70.0, 30.0])
def test_simple_conductance_multi_input_linear2(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 0.0]])
inp2 = torch.tensor([[0.0, 10.0, 0.0]])
inp3 = torch.tensor([[0.0, 5.0, 0.0]])
self._conductance_input_test_assert(
net,
net.model.linear2,
(inp1, inp2, inp3),
(0,),
([[0.0, 156.0, 0.0]], [[0.0, 156.0, 0.0]], [[0.0, 78.0, 0.0]]),
(4,),
)
def test_simple_conductance_multi_input_relu(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 1.0]])
inp2 = torch.tensor([[0.0, 4.0, 5.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0]])
self._conductance_input_test_assert(
net,
net.model.relu,
(inp1, inp2),
(3,),
([[0.0, 50.0, 5.0]], [[0.0, 20.0, 25.0]]),
(inp3, 5),
)
def test_simple_conductance_multi_input_batch_relu(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 1.0], [0.0, 0.0, 10.0]])
inp2 = torch.tensor([[0.0, 4.0, 5.0], [0.0, 0.0, 10.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 5.0]])
self._conductance_input_test_assert(
net,
net.model.relu,
(inp1, inp2),
(3,),
(
[[0.0, 50.0, 5.0], [0.0, 0.0, 50.0]],
[[0.0, 20.0, 25.0], [0.0, 0.0, 50.0]],
),
(inp3, 5),
)
def test_layer_tuple_selector_fn(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 6.0, 0.0]])
self._conductance_input_test_assert(
net, net.multi_relu, inp, lambda x: x[0][:, 1], [0.0, 6.0, 0.0]
)
def test_matching_conv2_multi_input_conductance(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(2, 1, 10, 10)
self._conductance_input_sum_test_assert(net, net.conv2, inp, 0.0)
# trying different baseline
self._conductance_input_sum_test_assert(net, net.conv2, inp, 0.000001)
def test_matching_relu2_multi_input_conductance(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(3, 1, 10, 10, requires_grad=True)
baseline = 20 * torch.randn(3, 1, 10, 10, requires_grad=True)
self._conductance_input_sum_test_assert(net, net.relu2, inp, baseline)
def test_matching_relu2_with_scalar_base_multi_input_conductance(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(3, 1, 10, 10, requires_grad=True)
self._conductance_input_sum_test_assert(net, net.relu2, inp, 0.0)
def test_matching_pool2_multi_input_conductance(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(1, 1, 10, 10)
baseline = 20 * torch.randn(1, 1, 10, 10, requires_grad=True)
self._conductance_input_sum_test_assert(net, net.pool2, inp, baseline)
def test_matching_layer_tuple_selector_fn(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 6.0, 0.0]])
lc = LayerConductance(net, net.multi_relu)
layer_attr = lc.attribute(inp, target=0, n_steps=500, method="gausslegendre")
nc = NeuronConductance(net, net.multi_relu)
for i in range(len(layer_attr)):
for j in range(layer_attr[i].shape[1]):
neuron_attr = nc.attribute(
inp,
lambda x: x[i][:, j],
target=0,
n_steps=500,
method="gausslegendre",
)
self.assertAlmostEqual(
neuron_attr.sum().item(),
layer_attr[i][0][j].item(),
delta=0.005,
)
def _conductance_input_test_assert(
self,
model: Module,
target_layer: Module,
test_input: TensorOrTupleOfTensorsGeneric,
test_neuron: Union[int, Tuple[int, ...], Callable],
expected_input_conductance: Union[List[float], Tuple[List[List[float]], ...]],
additional_input: Any = None,
multiply_by_inputs: bool = True,
) -> None:
for internal_batch_size in (None, 5, 20):
cond = NeuronConductance(
model,
target_layer,
multiply_by_inputs=multiply_by_inputs,
)
self.assertEqual(cond.multiplies_by_inputs, multiply_by_inputs)
attributions = cond.attribute(
test_input,
test_neuron,
target=0,
n_steps=500,
method="gausslegendre",
additional_forward_args=additional_input,
internal_batch_size=internal_batch_size,
)
if isinstance(expected_input_conductance, tuple):
for i in range(len(expected_input_conductance)):
for j in range(len(expected_input_conductance[i])):
assertTensorAlmostEqual(
self,
attributions[i][j : j + 1].squeeze(0),
expected_input_conductance[i][j],
delta=0.1,
mode="max",
)
else:
if isinstance(attributions, Tensor):
assertTensorAlmostEqual(
self,
attributions.squeeze(0),
expected_input_conductance,
delta=0.1,
mode="max",
)
else:
raise AssertionError(
"Attributions not returning a Tensor when expected."
)
def _conductance_input_sum_test_assert(
self,
model: Module,
target_layer: Module,
test_input: TensorOrTupleOfTensorsGeneric,
test_baseline: BaselineType = None,
):
layer_cond = LayerConductance(model, target_layer)
attributions = cast(
Tensor,
layer_cond.attribute(
test_input,
baselines=test_baseline,
target=0,
n_steps=500,
method="gausslegendre",
),
)
neuron_cond = NeuronConductance(model, target_layer)
attr_shape = cast(Tuple[int, ...], attributions.shape)
for i in range(attr_shape[1]):
for j in range(attr_shape[2]):
for k in range(attr_shape[3]):
neuron_vals = neuron_cond.attribute(
test_input,
(i, j, k),
baselines=test_baseline,
target=0,
n_steps=500,
)
for n in range(attributions.shape[0]):
self.assertAlmostEqual(
torch.sum(neuron_vals[n]).item(),
attributions[n, i, j, k].item(),
delta=0.005,
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
from __future__ import print_function
from typing import Tuple, Union
import torch
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._core.neuron.neuron_deep_lift import NeuronDeepLift, NeuronDeepLiftShap
from tests.attr.layer.test_layer_deeplift import (
_create_inps_and_base_for_deeplift_neuron_layer_testing,
_create_inps_and_base_for_deepliftshap_neuron_layer_testing,
)
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel_ConvNet,
BasicModel_ConvNet_MaxPool3d,
LinearMaxPoolLinearModel,
ReLULinearModel,
)
from torch import Tensor
class Test(BaseTest):
def test_relu_neuron_deeplift(self) -> None:
model = ReLULinearModel(inplace=True)
x1 = torch.tensor([[-10.0, 1.0, -5.0]], requires_grad=True)
x2 = torch.tensor([[3.0, 3.0, 1.0]], requires_grad=True)
inputs = (x1, x2)
neuron_dl = NeuronDeepLift(model, model.relu)
attributions = neuron_dl.attribute(inputs, 0, attribute_to_neuron_input=False)
assertTensorAlmostEqual(self, attributions[0], [[0.0, 0.0, 0.0]])
assertTensorAlmostEqual(self, attributions[1], [[0.0, 0.0, 0.0]])
def test_deeplift_compare_with_and_without_inplace(self) -> None:
model1 = ReLULinearModel(inplace=True)
model2 = ReLULinearModel()
x1 = torch.tensor([[-10.0, 1.0, -5.0]], requires_grad=True)
x2 = torch.tensor([[3.0, 3.0, 1.0]], requires_grad=True)
inputs = (x1, x2)
neuron_dl1 = NeuronDeepLift(model1, model1.relu)
attributions1 = neuron_dl1.attribute(inputs, 0, attribute_to_neuron_input=False)
neuron_dl2 = NeuronDeepLift(model2, model2.relu)
attributions2 = neuron_dl2.attribute(inputs, 0, attribute_to_neuron_input=False)
assertTensorAlmostEqual(self, attributions1[0], attributions2[0])
assertTensorAlmostEqual(self, attributions1[1], attributions2[1])
def test_linear_neuron_deeplift(self) -> None:
model = ReLULinearModel()
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
neuron_dl = NeuronDeepLift(model, model.l3)
attributions = neuron_dl.attribute(
inputs, 0, baselines, attribute_to_neuron_input=True
)
assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])
assertTensorAlmostEqual(self, attributions[1], [[0.0, 0.0, 0.0]])
attributions = neuron_dl.attribute(
inputs, 0, baselines, attribute_to_neuron_input=False
)
self.assertTrue(neuron_dl.multiplies_by_inputs)
assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])
assertTensorAlmostEqual(self, attributions[1], [[6.0, 9.0, 0.0]])
def test_linear_neuron_deeplift_wo_inp_marginal_effects(self) -> None:
model = ReLULinearModel()
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
neuron_dl = NeuronDeepLift(model, model.l3, multiply_by_inputs=False)
attributions = neuron_dl.attribute(
inputs, 0, baselines, attribute_to_neuron_input=False
)
assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])
assertTensorAlmostEqual(self, attributions[1], [[2.0, 3.0, 0.0]])
def test_relu_deeplift_with_custom_attr_func(self) -> None:
model = ReLULinearModel()
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
neuron_dl = NeuronDeepLift(model, model.l3)
expected = ([[0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0]])
self._relu_custom_attr_func_assert(neuron_dl, inputs, baselines, expected)
def test_relu_neuron_deeplift_shap(self) -> None:
model = ReLULinearModel()
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
neuron_dl = NeuronDeepLiftShap(model, model.relu)
attributions = neuron_dl.attribute(
inputs, 0, baselines, attribute_to_neuron_input=False
)
assertTensorAlmostEqual(self, attributions[0], [[0.0, 0.0, 0.0]])
assertTensorAlmostEqual(self, attributions[1], [[0.0, 0.0, 0.0]])
def test_linear_neuron_deeplift_shap(self) -> None:
model = ReLULinearModel()
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
neuron_dl = NeuronDeepLiftShap(model, model.l3)
attributions = neuron_dl.attribute(
inputs, 0, baselines, attribute_to_neuron_input=True
)
assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])
assertTensorAlmostEqual(self, attributions[1], [[0.0, 0.0, 0.0]])
attributions = neuron_dl.attribute(
inputs, 0, baselines, attribute_to_neuron_input=False
)
self.assertTrue(neuron_dl.multiplies_by_inputs)
assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])
assertTensorAlmostEqual(self, attributions[1], [[6.0, 9.0, 0.0]])
def test_linear_neuron_deeplift_shap_wo_inp_marginal_effects(self) -> None:
model = ReLULinearModel()
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
neuron_dl = NeuronDeepLiftShap(model, model.l3, multiply_by_inputs=False)
attributions = neuron_dl.attribute(
inputs, 0, baselines, attribute_to_neuron_input=False
)
assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])
assertTensorAlmostEqual(self, attributions[1], [[2.0, 3.0, 0.0]])
attributions = neuron_dl.attribute(
inputs, lambda x: x[:, 0], baselines, attribute_to_neuron_input=False
)
assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])
assertTensorAlmostEqual(self, attributions[1], [[2.0, 3.0, 0.0]])
def test_relu_deepliftshap_with_custom_attr_func(self) -> None:
model = ReLULinearModel()
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
neuron_dl = NeuronDeepLiftShap(model, model.l3)
expected = (torch.zeros(1, 3), torch.zeros(1, 3))
self._relu_custom_attr_func_assert(neuron_dl, inputs, baselines, expected)
def _relu_custom_attr_func_assert(
self,
attr_method: Union[NeuronDeepLift, NeuronDeepLiftShap],
inputs: TensorOrTupleOfTensorsGeneric,
baselines,
expected,
) -> None:
def custom_attr_func(
multipliers: Tuple[Tensor, ...],
inputs: Tuple[Tensor, ...],
baselines: Union[None, Tuple[Union[Tensor, int, float], ...]] = None,
) -> Tuple[Tensor, ...]:
return tuple(multiplier * 0.0 for multiplier in multipliers)
attr = attr_method.attribute(
inputs, 0, baselines, custom_attribution_func=custom_attr_func
)
assertTensorAlmostEqual(self, attr[0], expected[0], 0.0)
assertTensorAlmostEqual(self, attr[1], expected[1], 0.0)
def test_lin_maxpool_lin_classification(self) -> None:
inputs = torch.ones(2, 4)
baselines = torch.tensor([[1, 2, 3, 9], [4, 8, 6, 7]]).float()
model = LinearMaxPoolLinearModel()
ndl = NeuronDeepLift(model, model.pool1)
attr = ndl.attribute(inputs, neuron_selector=(0), baselines=baselines)
ndl2 = NeuronDeepLift(model, model.lin2)
attr2 = ndl2.attribute(
inputs,
neuron_selector=(0),
baselines=baselines,
attribute_to_neuron_input=True,
)
assertTensorAlmostEqual(self, attr, attr2)
def test_convnet_maxpool2d_classification(self) -> None:
inputs = 100 * torch.randn(2, 1, 10, 10)
model = BasicModel_ConvNet()
ndl = NeuronDeepLift(model, model.pool1)
attr = ndl.attribute(inputs, neuron_selector=(0, 0, 0))
ndl2 = NeuronDeepLift(model, model.conv2)
attr2 = ndl2.attribute(
inputs, neuron_selector=(0, 0, 0), attribute_to_neuron_input=True
)
assertTensorAlmostEqual(self, attr.sum(), attr2.sum())
def test_convnet_maxpool3d_classification(self) -> None:
inputs = 100 * torch.randn(2, 1, 10, 10, 10)
model = BasicModel_ConvNet_MaxPool3d()
ndl = NeuronDeepLift(model, model.pool1)
attr = ndl.attribute(inputs, neuron_selector=(0, 0, 0, 0))
ndl2 = NeuronDeepLift(model, model.conv2)
attr2 = ndl2.attribute(
inputs, neuron_selector=(0, 0, 0, 0), attribute_to_neuron_input=True
)
assertTensorAlmostEqual(self, attr.sum(), attr2.sum())
|
#!/usr/bin/env python3
import unittest
from typing import Any, Callable, Tuple, Union
import torch
from captum._utils.typing import TensorLikeList, TensorOrTupleOfTensorsGeneric
from captum.attr._core.integrated_gradients import IntegratedGradients
from captum.attr._core.neuron.neuron_integrated_gradients import (
NeuronIntegratedGradients,
)
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
)
from tests.helpers.basic_models import (
BasicModel_ConvNet,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_simple_ig_input_linear2(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._ig_input_test_assert(net, net.linear2, inp, 0, [[0.0, 390.0, 0.0]])
def test_simple_ig_input_linear2_wo_mult_by_inputs(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[100.0, 100.0, 100.0]])
self._ig_input_test_assert(
net, net.linear2, inp, 0, [[3.96, 3.96, 3.96]], multiply_by_inputs=False
)
def test_simple_ig_input_linear1(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
self._ig_input_test_assert(net, net.linear1, inp, (0,), [[0.0, 100.0, 0.0]])
def test_simple_ig_input_relu(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 6.0, 14.0]], requires_grad=True)
self._ig_input_test_assert(net, net.relu, inp, (0,), [[0.0, 3.0, 7.0]])
def test_simple_ig_input_relu2(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 5.0, 4.0]])
self._ig_input_test_assert(net, net.relu, inp, 1, [[0.0, 5.0, 4.0]])
def test_simple_ig_input_relu_selector_fn(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 5.0, 4.0]])
self._ig_input_test_assert(
net, net.relu, inp, lambda x: torch.sum(x[:, 2:]), [[0.0, 10.0, 8.0]]
)
def test_simple_ig_input_relu2_agg_neurons(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 5.0, 4.0]])
self._ig_input_test_assert(
net, net.relu, inp, (slice(0, 2, 1),), [[0.0, 5.0, 4.0]]
)
def test_simple_ig_multi_input_linear2(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 0.0]])
inp2 = torch.tensor([[0.0, 10.0, 0.0]])
inp3 = torch.tensor([[0.0, 5.0, 0.0]])
self._ig_input_test_assert(
net,
net.model.linear2,
(inp1, inp2, inp3),
(0,),
([[0.0, 156.0, 0.0]], [[0.0, 156.0, 0.0]], [[0.0, 78.0, 0.0]]),
(4,),
)
def test_simple_ig_multi_input_relu(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 6.0, 14.0]])
inp2 = torch.tensor([[0.0, 6.0, 14.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0]])
self._ig_input_test_assert(
net,
net.model.relu,
(inp1, inp2),
(0,),
([[0.0, 1.5, 3.5]], [[0.0, 1.5, 3.5]]),
(inp3, 0.5),
)
def test_simple_ig_multi_input_relu_batch(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 6.0, 14.0], [0.0, 80.0, 0.0]])
inp2 = torch.tensor([[0.0, 6.0, 14.0], [0.0, 20.0, 0.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0], [0.0, 20.0, 0.0]])
self._ig_input_test_assert(
net,
net.model.relu,
(inp1, inp2),
(0,),
([[0.0, 1.5, 3.5], [0.0, 40.0, 0.0]], [[0.0, 1.5, 3.5], [0.0, 10.0, 0.0]]),
(inp3, 0.5),
)
def test_simple_ig_multi_input_relu_batch_selector_fn(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 6.0, 14.0], [0.0, 80.0, 0.0]])
inp2 = torch.tensor([[0.0, 6.0, 14.0], [0.0, 20.0, 0.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0], [0.0, 20.0, 0.0]])
self._ig_input_test_assert(
net,
net.model.relu,
(inp1, inp2),
lambda x: torch.sum(x),
(
[[0.0, 10.5, 24.5], [0.0, 160.0, 0.0]],
[[0.0, 10.5, 24.5], [0.0, 40.0, 0.0]],
),
(inp3, 0.5),
)
def test_matching_output_gradient(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(2, 1, 10, 10, requires_grad=True)
baseline = 20 * torch.randn(2, 1, 10, 10, requires_grad=True)
self._ig_matching_test_assert(net, net.softmax, inp, baseline)
def _ig_input_test_assert(
self,
model: Module,
target_layer: Module,
test_input: TensorOrTupleOfTensorsGeneric,
test_neuron: Union[int, Tuple[Union[int, slice], ...], Callable],
expected_input_ig: Union[TensorLikeList, Tuple[TensorLikeList, ...]],
additional_input: Any = None,
multiply_by_inputs: bool = True,
) -> None:
for internal_batch_size in [None, 5, 20]:
grad = NeuronIntegratedGradients(
model, target_layer, multiply_by_inputs=multiply_by_inputs
)
self.assertEquals(grad.multiplies_by_inputs, multiply_by_inputs)
attributions = grad.attribute(
test_input,
test_neuron,
n_steps=200,
method="gausslegendre",
additional_forward_args=additional_input,
internal_batch_size=internal_batch_size,
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_input_ig, delta=0.1
)
def _ig_matching_test_assert(
self,
model: Module,
output_layer: Module,
test_input: Tensor,
baseline: Union[None, Tensor] = None,
) -> None:
out = model(test_input)
input_attrib = IntegratedGradients(model)
ig_attrib = NeuronIntegratedGradients(model, output_layer)
for i in range(out.shape[1]):
ig_vals = input_attrib.attribute(test_input, target=i, baselines=baseline)
neuron_ig_vals = ig_attrib.attribute(test_input, (i,), baselines=baseline)
assertTensorAlmostEqual(
self, ig_vals, neuron_ig_vals, delta=0.001, mode="max"
)
self.assertEqual(neuron_ig_vals.shape, test_input.shape)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import unittest
from typing import Any, Callable, cast, List, Tuple, Union
import torch
from captum._utils.gradient import _forward_layer_eval
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._core.neuron.neuron_gradient import NeuronGradient
from captum.attr._core.saliency import Saliency
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
)
from tests.helpers.basic_models import (
BasicModel_ConvNet,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_simple_gradient_input_linear2(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
self._gradient_input_test_assert(net, net.linear2, inp, (0,), [[4.0, 4.0, 4.0]])
def test_simple_gradient_input_linear1(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._gradient_input_test_assert(net, net.linear1, inp, (0,), [[1.0, 1.0, 1.0]])
def test_simple_gradient_input_relu_inplace(self) -> None:
net = BasicModel_MultiLayer(inplace=True)
inp = torch.tensor([[0.0, 5.0, 4.0]])
self._gradient_input_test_assert(
net, net.relu, inp, (0,), [[1.0, 1.0, 1.0]], attribute_to_neuron_input=True
)
def test_simple_gradient_input_linear1_inplace(self) -> None:
net = BasicModel_MultiLayer(inplace=True)
inp = torch.tensor([[0.0, 5.0, 4.0]])
self._gradient_input_test_assert(net, net.linear1, inp, (0,), [[1.0, 1.0, 1.0]])
def test_simple_gradient_input_relu(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 5.0, 4.0]], requires_grad=True)
self._gradient_input_test_assert(net, net.relu, inp, 0, [[0.0, 0.0, 0.0]])
def test_simple_gradient_input_relu2(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 5.0, 4.0]])
self._gradient_input_test_assert(net, net.relu, inp, 1, [[1.0, 1.0, 1.0]])
def test_simple_gradient_input_relu_selector_fn(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 5.0, 4.0]])
self._gradient_input_test_assert(
net, net.relu, inp, lambda x: torch.sum(x), [[3.0, 3.0, 3.0]]
)
def test_simple_gradient_input_relu2_agg_neurons(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 5.0, 4.0]])
self._gradient_input_test_assert(
net, net.relu, inp, (slice(0, 2, 1),), [[1.0, 1.0, 1.0]]
)
def test_simple_gradient_multi_input_linear2(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 100.0, 0.0]])
inp2 = torch.tensor([[0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 0.0]])
self._gradient_input_test_assert(
net,
net.model.linear2,
(inp1, inp2, inp3),
(0,),
([[12.0, 12.0, 12.0]], [[12.0, 12.0, 12.0]], [[12.0, 12.0, 12.0]]),
(3,),
)
def test_simple_gradient_multi_input_linear1(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 100.0, 0.0]])
inp2 = torch.tensor([[0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 0.0]])
self._gradient_input_test_assert(
net,
net.model.linear1,
(inp1, inp2),
(0,),
([[5.0, 5.0, 5.0]], [[5.0, 5.0, 5.0]]),
(inp3, 5),
)
def test_matching_output_gradient(self) -> None:
net = BasicModel_ConvNet()
inp = torch.randn(2, 1, 10, 10, requires_grad=True)
self._gradient_matching_test_assert(net, net.softmax, inp)
def test_matching_intermediate_gradient(self) -> None:
net = BasicModel_ConvNet()
inp = torch.randn(3, 1, 10, 10)
self._gradient_matching_test_assert(net, net.relu2, inp)
def _gradient_input_test_assert(
self,
model: Module,
target_layer: Module,
test_input: TensorOrTupleOfTensorsGeneric,
test_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
expected_input_gradient: Union[
List[List[float]], Tuple[List[List[float]], ...]
],
additional_input: Any = None,
attribute_to_neuron_input: bool = False,
) -> None:
grad = NeuronGradient(model, target_layer)
attributions = grad.attribute(
test_input,
test_neuron_selector,
additional_forward_args=additional_input,
attribute_to_neuron_input=attribute_to_neuron_input,
)
assertTensorTuplesAlmostEqual(self, attributions, expected_input_gradient)
def _gradient_matching_test_assert(
self, model: Module, output_layer: Module, test_input: Tensor
) -> None:
out = _forward_layer_eval(model, test_input, output_layer)
# Select first element of tuple
out = out[0]
gradient_attrib = NeuronGradient(model, output_layer)
self.assertFalse(gradient_attrib.multiplies_by_inputs)
for i in range(cast(Tuple[int, ...], out.shape)[1]):
neuron: Tuple[int, ...] = (i,)
while len(neuron) < len(out.shape) - 1:
neuron = neuron + (0,)
input_attrib = Saliency(
lambda x: _forward_layer_eval(
model, x, output_layer, grad_enabled=True
)[0][(slice(None), *neuron)]
)
sal_vals = input_attrib.attribute(test_input, abs=False)
grad_vals = gradient_attrib.attribute(test_input, neuron)
# Verify matching sizes
self.assertEqual(grad_vals.shape, sal_vals.shape)
self.assertEqual(grad_vals.shape, test_input.shape)
assertTensorAlmostEqual(self, sal_vals, grad_vals, delta=0.001, mode="max")
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import unittest
from typing import Any, Callable, Tuple, Union
import torch
from captum._utils.typing import (
BaselineType,
TensorLikeList,
TensorOrTupleOfTensorsGeneric,
)
from captum.attr._core.neuron.neuron_feature_ablation import NeuronFeatureAblation
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel_ConvNet_One_Conv,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_simple_ablation_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._ablation_test_assert(
net,
net.linear2,
inp,
[[280.0, 280.0, 120.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
)
def test_multi_sample_ablation_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1], [1, 1, 0]])
self._ablation_test_assert(
net,
net.linear2,
inp,
[[41.0, 41.0, 12.0], [280.0, 280.0, 120.0]],
feature_mask=mask,
perturbations_per_eval=(1, 2, 3),
)
def test_multi_sample_ablation_with_selector_fn(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1], [1, 1, 0]])
self._ablation_test_assert(
net,
net.linear2,
inp,
[[82.0, 82.0, 24.0], [560.0, 560.0, 240.0]],
feature_mask=mask,
perturbations_per_eval=(1, 2, 3),
neuron_selector=lambda x: torch.sum(x, dim=1),
)
def test_multi_sample_ablation_with_slice(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1], [1, 1, 0]])
self._ablation_test_assert(
net,
net.linear2,
inp,
[[82.0, 82.0, 24.0], [560.0, 560.0, 240.0]],
feature_mask=mask,
perturbations_per_eval=(1, 2, 3),
neuron_selector=(slice(0, 2, 1),),
)
def test_multi_input_ablation_with_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
mask1 = torch.tensor([[1, 1, 1], [0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2], [0, 0, 0]])
expected = (
[[492.0, 492.0, 492.0], [200.0, 200.0, 200.0]],
[[80.0, 200.0, 120.0], [0.0, 400.0, 0.0]],
[[0.0, 400.0, 40.0], [60.0, 60.0, 60.0]],
)
self._ablation_test_assert(
net,
net.model.linear2,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
)
self._ablation_test_assert(
net,
net.model.linear2,
(inp1, inp2),
expected[0:1],
additional_input=(inp3, 1),
feature_mask=(mask1, mask2),
perturbations_per_eval=(1, 2, 3),
)
expected_with_baseline = (
[[468.0, 468.0, 468.0], [184.0, 192.0, 184.0]],
[[68.0, 188.0, 108.0], [-12.0, 388.0, -12.0]],
[[-16.0, 384.0, 24.0], [12.0, 12.0, 12.0]],
)
self._ablation_test_assert(
net,
net.model.linear2,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
)
def test_multi_input_ablation(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
baseline1 = torch.tensor([[3.0, 0.0, 0.0]])
baseline2 = torch.tensor([[0.0, 1.0, 0.0]])
baseline3 = torch.tensor([[1.0, 2.0, 3.0]])
self._ablation_test_assert(
net,
net.model.linear2,
(inp1, inp2, inp3),
(
[[80.0, 400.0, 0.0], [68.0, 200.0, 120.0]],
[[80.0, 196.0, 120.0], [0.0, 396.0, 0.0]],
[[-4.0, 392.0, 28.0], [4.0, 32.0, 0.0]],
),
additional_input=(1,),
baselines=(baseline1, baseline2, baseline3),
perturbations_per_eval=(1, 2, 3),
)
baseline1_exp = torch.tensor([[3.0, 0.0, 0.0], [3.0, 0.0, 2.0]])
baseline2_exp = torch.tensor([[0.0, 1.0, 0.0], [0.0, 1.0, 4.0]])
baseline3_exp = torch.tensor([[3.0, 2.0, 4.0], [1.0, 2.0, 3.0]])
self._ablation_test_assert(
net,
net.model.linear2,
(inp1, inp2, inp3),
(
[[80.0, 400.0, 0.0], [68.0, 200.0, 112.0]],
[[80.0, 196.0, 120.0], [0.0, 396.0, -16.0]],
[[-12.0, 392.0, 24.0], [4.0, 32.0, 0.0]],
),
additional_input=(1,),
baselines=(baseline1_exp, baseline2_exp, baseline3_exp),
perturbations_per_eval=(1, 2, 3),
)
def test_simple_multi_input_conv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
self._ablation_test_assert(
net,
net.relu2,
(inp, inp2),
(67 * torch.ones_like(inp), 13 * torch.ones_like(inp2)),
feature_mask=(torch.tensor(0), torch.tensor(1)),
perturbations_per_eval=(1, 2, 4, 8, 12, 16),
)
self._ablation_test_assert(
net,
net.relu2,
(inp, inp2),
(
[
[
[
[0.0, 2.0, 4.0, 3.0],
[4.0, 9.0, 10.0, 7.0],
[4.0, 13.0, 14.0, 11.0],
[0.0, 0.0, 0.0, 0.0],
]
]
],
[
[
[
[1.0, 2.0, 2.0, 1.0],
[1.0, 2.0, 2.0, 1.0],
[1.0, 2.0, 2.0, 1.0],
[0.0, 0.0, 0.0, 0.0],
]
]
],
),
perturbations_per_eval=(1, 3, 7, 14),
)
def test_simple_multi_input_conv_intermediate(self) -> None:
net = BasicModel_ConvNet_One_Conv(inplace=True)
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
self._ablation_test_assert(
net,
net.relu1,
(inp, inp2),
(torch.zeros_like(inp), torch.zeros_like(inp2)),
feature_mask=(torch.tensor(0), torch.tensor(1)),
perturbations_per_eval=(1, 2, 4, 8, 12, 16),
neuron_selector=(1, 0, 0),
)
self._ablation_test_assert(
net,
net.relu1,
(inp, inp2),
(45 * torch.ones_like(inp), 9 * torch.ones_like(inp2)),
feature_mask=(torch.tensor(0), torch.tensor(1)),
perturbations_per_eval=(1, 2, 4, 8, 12, 16),
neuron_selector=(1, 0, 0),
attribute_to_neuron_input=True,
)
self._ablation_test_assert(
net,
net.relu1,
(inp, inp2),
(
[
[
[
[0.0, 1.0, 2.0, 0.0],
[4.0, 5.0, 6.0, 0.0],
[8.0, 9.0, 10.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
]
],
[
[
[
[1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
]
],
),
perturbations_per_eval=(1, 3, 7, 14),
neuron_selector=(1, 0, 0),
attribute_to_neuron_input=True,
)
def _ablation_test_assert(
self,
model: Module,
layer: Module,
test_input: TensorOrTupleOfTensorsGeneric,
expected_ablation: Union[
TensorLikeList,
Tuple[TensorLikeList, ...],
Tuple[Tensor, ...],
],
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
additional_input: Any = None,
perturbations_per_eval: Tuple[int, ...] = (1,),
baselines: BaselineType = None,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable] = 0,
attribute_to_neuron_input: bool = False,
) -> None:
for batch_size in perturbations_per_eval:
ablation = NeuronFeatureAblation(model, layer)
self.assertTrue(ablation.multiplies_by_inputs)
attributions = ablation.attribute(
test_input,
neuron_selector=neuron_selector,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
attribute_to_neuron_input=attribute_to_neuron_input,
)
if isinstance(expected_ablation, tuple):
for i in range(len(expected_ablation)):
assertTensorAlmostEqual(self, attributions[i], expected_ablation[i])
else:
assertTensorAlmostEqual(self, attributions, expected_ablation)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
from typing import Callable, Tuple, Union
import torch
from captum.attr._core.neuron.neuron_gradient_shap import NeuronGradientShap
from captum.attr._core.neuron.neuron_integrated_gradients import (
NeuronIntegratedGradients,
)
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModel_MultiLayer
from tests.helpers.classification_models import SoftmaxModel
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_basic_multilayer(self) -> None:
model = BasicModel_MultiLayer(inplace=True)
model.eval()
inputs = torch.tensor([[1.0, 20.0, 10.0]])
baselines = torch.zeros(2, 3)
ngs = NeuronGradientShap(model, model.linear1, multiply_by_inputs=False)
attr = ngs.attribute(inputs, 0, baselines=baselines, stdevs=0.0)
self.assertFalse(ngs.multiplies_by_inputs)
assertTensorAlmostEqual(self, attr, [[1.0, 1.0, 1.0]])
def test_basic_multilayer_wo_mult_by_inputs(self) -> None:
model = BasicModel_MultiLayer(inplace=True)
model.eval()
inputs = torch.tensor([[1.0, 20.0, 10.0]])
baselines = torch.randn(2, 3)
self._assert_attributions(model, model.linear1, inputs, baselines, 0, 60)
def test_basic_multilayer_wo_mult_by_inputs_agg_neurons(self) -> None:
model = BasicModel_MultiLayer(inplace=True)
model.eval()
inputs = torch.tensor([[1.0, 20.0, 10.0]])
baselines = torch.randn(2, 3)
self._assert_attributions(
model, model.linear1, inputs, baselines, (slice(0, 1, 1),), 60
)
self._assert_attributions(
model, model.linear1, inputs, baselines, lambda x: x[:, 0:1], 60
)
def test_classification(self) -> None:
def custom_baseline_fn(inputs: Tensor) -> Tensor:
num_in = inputs.shape[1] # type: ignore
return torch.arange(0.0, num_in * 5.0).reshape(5, num_in)
num_in = 40
n_samples = 100
# 10-class classification model
model = SoftmaxModel(num_in, 20, 10)
model.eval()
inputs = torch.arange(0.0, num_in * 2.0).reshape(2, num_in)
baselines = custom_baseline_fn
self._assert_attributions(model, model.relu1, inputs, baselines, 1, n_samples)
def _assert_attributions(
self,
model: Module,
layer: Module,
inputs: Tensor,
baselines: Union[Tensor, Callable[..., Tensor]],
neuron_ind: Union[int, Tuple[Union[int, slice], ...], Callable],
n_samples: int = 5,
) -> None:
ngs = NeuronGradientShap(model, layer)
nig = NeuronIntegratedGradients(model, layer)
attrs_gs = ngs.attribute(
inputs, neuron_ind, baselines=baselines, n_samples=n_samples, stdevs=0.09
)
if callable(baselines):
baselines = baselines(inputs)
attrs_ig = []
for baseline in torch.unbind(baselines):
attrs_ig.append(
nig.attribute(inputs, neuron_ind, baselines=baseline.unsqueeze(0))
)
combined_attrs_ig = torch.stack(attrs_ig, dim=0).mean(dim=0)
self.assertTrue(ngs.multiplies_by_inputs)
assertTensorAlmostEqual(self, attrs_gs, combined_attrs_ig, 0.5)
|
#!/usr/bin/env python3
from __future__ import print_function
import os
import tempfile
import unittest
from typing import Dict, List
import torch
HAS_PYTEXT = True
try:
from captum.attr._models.pytext import (
BaselineGenerator,
configure_model_integ_grads_embeddings,
)
from pytext.common.constants import DatasetFieldName
from pytext.config.component import create_featurizer, create_model
from pytext.config.doc_classification import ModelInputConfig, TargetConfig
from pytext.config.field_config import FeatureConfig, WordFeatConfig
from pytext.data import CommonMetadata
from pytext.data.doc_classification_data_handler import DocClassificationDataHandler
from pytext.data.featurizer import SimpleFeaturizer
from pytext.fields import FieldMeta
from pytext.models.decoders.mlp_decoder import MLPDecoder
from pytext.models.doc_model import DocModel_Deprecated
from pytext.models.embeddings.word_embedding import WordEmbedding
from pytext.models.representations.bilstm_doc_attention import BiLSTMDocAttention
except ImportError:
HAS_PYTEXT = False
class VocabStub:
def __init__(self) -> None:
self.itos: List = []
self.stoi: Dict = {}
# TODO add more test cases for dict features
class TestWordEmbeddings(unittest.TestCase):
def setUp(self):
if not HAS_PYTEXT:
return self.skipTest("Skip the test since PyText is not installed")
self.embedding_file, self.embedding_path = tempfile.mkstemp()
self.word_embedding_file, self.word_embedding_path = tempfile.mkstemp()
self.decoder_file, self.decoder_path = tempfile.mkstemp()
self.representation_file, self.representation_path = tempfile.mkstemp()
self.model = self._create_dummy_model()
self.data_handler = self._create_dummy_data_handler()
def tearDown(self) -> None:
for f in (
self.embedding_file,
self.word_embedding_file,
self.decoder_file,
self.representation_file,
):
os.close(f)
for p in (
self.embedding_path,
self.word_embedding_path,
self.decoder_path,
self.representation_path,
):
os.remove(p)
def test_word_embeddings(self) -> None:
embedding_list = configure_model_integ_grads_embeddings(self.model)
integrated_gradients_embedding = embedding_list[0]
input = torch.arange(0, 300).unsqueeze(0).unsqueeze(0)
self.assertEqual(integrated_gradients_embedding.embedding_dim, 300)
self.assertEqual(embedding_list.embedding_dim[0], 300)
self.assertEqual(embedding_list(input).shape[2], input.shape[2])
self.assertTrue(
torch.allclose(
integrated_gradients_embedding.get_attribution_map(input)["word"], input
)
)
def test_baseline_generation(self) -> None:
baseline_generator = BaselineGenerator(self.model, self.data_handler, "cpu")
embedding_list = configure_model_integ_grads_embeddings(self.model)
integrated_gradients_embedding = embedding_list[0]
self.assertTrue(
torch.allclose(
baseline_generator.generate_baseline(integrated_gradients_embedding, 5)[
0
],
torch.tensor([[1, 1, 1, 1, 1]]),
)
)
def _create_dummy_data_handler(self):
feat = WordFeatConfig(
vocab_size=4,
vocab_from_all_data=True,
vocab_from_train_data=True,
vocab_from_pretrained_embeddings=False,
pretrained_embeddings_path=None,
)
featurizer = create_featurizer(
SimpleFeaturizer.Config(), FeatureConfig(word_feat=feat)
)
data_handler = DocClassificationDataHandler.from_config(
DocClassificationDataHandler.Config(),
ModelInputConfig(word_feat=feat),
TargetConfig(),
featurizer=featurizer,
)
train_data = data_handler.gen_dataset(
[{"text": "<pad>"}], include_label_fields=False
)
eval_data = data_handler.gen_dataset(
[{"text": "<pad>"}], include_label_fields=False
)
test_data = data_handler.gen_dataset(
[{"text": "<pad>"}], include_label_fields=False
)
data_handler.init_feature_metadata(train_data, eval_data, test_data)
return data_handler
def _create_dummy_model(self):
return create_model(
DocModel_Deprecated.Config(
representation=BiLSTMDocAttention.Config(
save_path=self.representation_path
),
decoder=MLPDecoder.Config(save_path=self.decoder_path),
),
FeatureConfig(
word_feat=WordEmbedding.Config(
embed_dim=300, save_path=self.word_embedding_path
),
save_path=self.embedding_path,
),
self._create_dummy_meta_data(),
)
def _create_dummy_meta_data(self):
text_field_meta = FieldMeta()
text_field_meta.vocab = VocabStub()
text_field_meta.vocab_size = 4
text_field_meta.unk_token_idx = 1
text_field_meta.pad_token_idx = 0
text_field_meta.pretrained_embeds_weight = None
label_meta = FieldMeta()
label_meta.vocab = VocabStub()
label_meta.vocab_size = 3
metadata = CommonMetadata()
metadata.features = {DatasetFieldName.TEXT_FIELD: text_field_meta}
metadata.target = label_meta
return metadata
|
#!/usr/bin/env python3
from __future__ import print_function
import unittest
import torch
from captum.attr._models.base import (
configure_interpretable_embedding_layer,
InterpretableEmbeddingBase,
remove_interpretable_embedding_layer,
)
from tests.helpers.basic import assertTensorAlmostEqual
from tests.helpers.basic_models import BasicEmbeddingModel, TextModule
from torch.nn import Embedding
class Test(unittest.TestCase):
def test_interpretable_embedding_base(self) -> None:
input1 = torch.tensor([2, 5, 0, 1])
input2 = torch.tensor([3, 0, 0, 2])
model = BasicEmbeddingModel()
output = model(input1, input2)
interpretable_embedding1 = configure_interpretable_embedding_layer(
model, "embedding1"
)
self.assertEqual(model.embedding1, interpretable_embedding1)
self._assert_embeddings_equal(
input1,
output,
interpretable_embedding1,
model.embedding1.embedding_dim,
model.embedding1.num_embeddings,
)
interpretable_embedding2 = configure_interpretable_embedding_layer(
model, "embedding2.inner_embedding"
)
self.assertEqual(model.embedding2.inner_embedding, interpretable_embedding2)
self._assert_embeddings_equal(
input2,
output,
interpretable_embedding2,
model.embedding2.inner_embedding.embedding_dim,
model.embedding2.inner_embedding.num_embeddings,
)
# configure another embedding when one is already configured
with self.assertRaises(AssertionError):
configure_interpretable_embedding_layer(model, "embedding2.inner_embedding")
with self.assertRaises(AssertionError):
configure_interpretable_embedding_layer(model, "embedding1")
# remove interpretable embedding base
self.assertTrue(
model.embedding2.inner_embedding.__class__ is InterpretableEmbeddingBase
)
remove_interpretable_embedding_layer(model, interpretable_embedding2)
self.assertTrue(model.embedding2.inner_embedding.__class__ is Embedding)
self.assertTrue(model.embedding1.__class__ is InterpretableEmbeddingBase)
remove_interpretable_embedding_layer(model, interpretable_embedding1)
self.assertTrue(model.embedding1.__class__ is Embedding)
def test_custom_module(self) -> None:
input1 = torch.tensor([[3, 2, 0], [1, 2, 4]])
input2 = torch.tensor([[0, 1, 0], [1, 2, 3]])
model = BasicEmbeddingModel()
output = model(input1, input2)
expected = model.embedding2(input=input2)
# in this case we make interpretable the custom embedding layer - TextModule
interpretable_embedding = configure_interpretable_embedding_layer(
model, "embedding2"
)
actual = interpretable_embedding.indices_to_embeddings(input=input2)
output_interpretable_models = model(input1, actual)
assertTensorAlmostEqual(
self, output, output_interpretable_models, delta=0.05, mode="max"
)
assertTensorAlmostEqual(self, expected, actual, delta=0.0, mode="max")
self.assertTrue(model.embedding2.__class__ is InterpretableEmbeddingBase)
remove_interpretable_embedding_layer(model, interpretable_embedding)
self.assertTrue(model.embedding2.__class__ is TextModule)
self._assert_embeddings_equal(input2, output, interpretable_embedding)
def test_nested_multi_embeddings(self) -> None:
input1 = torch.tensor([[3, 2, 0], [1, 2, 4]])
input2 = torch.tensor([[0, 1, 0], [2, 6, 8]])
input3 = torch.tensor([[4, 1, 0], [2, 2, 8]])
model = BasicEmbeddingModel(nested_second_embedding=True)
output = model(input1, input2, input3)
expected = model.embedding2(input=input2, another_input=input3)
# in this case we make interpretable the custom embedding layer - TextModule
interpretable_embedding2 = configure_interpretable_embedding_layer(
model, "embedding2"
)
actual = interpretable_embedding2.indices_to_embeddings(
input=input2, another_input=input3
)
output_interpretable_models = model(input1, actual)
assertTensorAlmostEqual(
self, output, output_interpretable_models, delta=0.05, mode="max"
)
assertTensorAlmostEqual(self, expected, actual, delta=0.0, mode="max")
self.assertTrue(model.embedding2.__class__ is InterpretableEmbeddingBase)
remove_interpretable_embedding_layer(model, interpretable_embedding2)
self.assertTrue(model.embedding2.__class__ is TextModule)
self._assert_embeddings_equal(input2, output, interpretable_embedding2)
def _assert_embeddings_equal(
self,
input,
output,
interpretable_embedding,
embedding_dim=None,
num_embeddings=None,
):
if interpretable_embedding.embedding_dim is not None:
self.assertEqual(embedding_dim, interpretable_embedding.embedding_dim)
self.assertEqual(num_embeddings, interpretable_embedding.num_embeddings)
# dim - [4, 100]
emb_shape = interpretable_embedding.indices_to_embeddings(input).shape
self.assertEqual(emb_shape[0], input.shape[0])
if interpretable_embedding.embedding_dim is not None:
self.assertEqual(emb_shape[1], interpretable_embedding.embedding_dim)
self.assertEqual(input.shape[0], output.shape[0])
|
#!/usr/bin/env python3
from typing import Any, cast, List, Tuple, Union
import torch
from captum.attr._core.integrated_gradients import IntegratedGradients
from captum.attr._core.layer.layer_activation import LayerActivation
from captum.attr._core.layer.layer_conductance import LayerConductance
from captum.attr._core.layer.layer_integrated_gradients import LayerIntegratedGradients
from captum.attr._models.base import (
configure_interpretable_embedding_layer,
remove_interpretable_embedding_layer,
)
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
)
from tests.helpers.basic_models import (
BasicEmbeddingModel,
BasicModel_MultiLayer,
BasicModel_MultiLayer_TrueMultiInput,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_compare_with_emb_patching(self) -> None:
input1 = torch.tensor([[2, 5, 0, 1]])
baseline1 = torch.tensor([[0, 0, 0, 0]])
# these ones will be use as an additional forward args
input2 = torch.tensor([[0, 2, 4, 1]])
input3 = torch.tensor([[2, 3, 0, 1]])
self._assert_compare_with_emb_patching(
input1, baseline1, additional_args=(input2, input3)
)
def test_compare_with_emb_patching_wo_mult_by_inputs(self) -> None:
input1 = torch.tensor([[2, 5, 0, 1]])
baseline1 = torch.tensor([[0, 0, 0, 0]])
# these ones will be use as an additional forward args
input2 = torch.tensor([[0, 2, 4, 1]])
input3 = torch.tensor([[2, 3, 0, 1]])
self._assert_compare_with_emb_patching(
input1,
baseline1,
additional_args=(input2, input3),
multiply_by_inputs=False,
)
def test_compare_with_emb_patching_batch(self) -> None:
input1 = torch.tensor([[2, 5, 0, 1], [3, 1, 1, 0]])
baseline1 = torch.tensor([[0, 0, 0, 0]])
# these ones will be use as an additional forward args
input2 = torch.tensor([[0, 2, 4, 1], [2, 3, 5, 7]])
input3 = torch.tensor([[3, 5, 6, 7], [2, 3, 0, 1]])
self._assert_compare_with_emb_patching(
input1, baseline1, additional_args=(input2, input3)
)
def test_compare_with_layer_conductance_attr_to_outputs(self) -> None:
model = BasicModel_MultiLayer()
input = torch.tensor([[50.0, 50.0, 50.0]], requires_grad=True)
self._assert_compare_with_layer_conductance(model, input)
def test_compare_with_layer_conductance_attr_to_inputs(self) -> None:
# Note that Layer Conductance and Layer Integrated Gradients (IG) aren't
# exactly the same. Layer IG computes partial derivative of the output
# with respect to the layer and sums along the straight line. While Layer
# Conductance also computes the same partial derivatives it doesn't use
# the straight line but a path defined by F(i) - F(i - 1).
# However, in some cases when that path becomes close to a straight line,
# Layer IG and Layer Conductance become numerically very close.
model = BasicModel_MultiLayer()
input = torch.tensor([[50.0, 50.0, 50.0]], requires_grad=True)
self._assert_compare_with_layer_conductance(model, input, True)
def test_multiple_tensors_compare_with_expected(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._assert_compare_with_expected(
net,
net.multi_relu,
inp,
([[90.0, 100.0, 100.0, 100.0]], [[90.0, 100.0, 100.0, 100.0]]),
)
def test_multiple_layers_single_inputs(self) -> None:
input1 = torch.tensor([[2, 5, 0, 1], [3, 1, 1, 0]])
input2 = torch.tensor([[0, 2, 4, 1], [2, 3, 5, 7]])
input3 = torch.tensor([[3, 5, 6, 7], [2, 3, 0, 1]])
inputs = (input1, input2, input3)
baseline = tuple(torch.zeros_like(inp) for inp in inputs)
self._assert_compare_with_emb_patching(
inputs,
baseline,
multiple_emb=True,
additional_args=None,
)
def test_multiple_layers_multiple_inputs_shared_input(self) -> None:
input1 = torch.randn(5, 3)
input2 = torch.randn(5, 3)
input3 = torch.randn(5, 3)
inputs = (input1, input2, input3)
baseline = tuple(torch.zeros_like(inp) for inp in inputs)
net = BasicModel_MultiLayer_TrueMultiInput()
lig = LayerIntegratedGradients(net, layer=[net.m1, net.m234])
ig = IntegratedGradients(net)
# test layer inputs
attribs_inputs = lig.attribute(
inputs, baseline, target=0, attribute_to_layer_input=True
)
attribs_inputs_regular_ig = ig.attribute(inputs, baseline, target=0)
self.assertIsInstance(attribs_inputs, list)
self.assertEqual(len(attribs_inputs), 2)
self.assertIsInstance(attribs_inputs[0], Tensor)
self.assertIsInstance(attribs_inputs[1], tuple)
self.assertEqual(len(attribs_inputs[1]), 3)
assertTensorTuplesAlmostEqual(
self,
# last input for second layer is first input =>
# add the attributions
(attribs_inputs[0] + attribs_inputs[1][-1],) + attribs_inputs[1][0:-1],
attribs_inputs_regular_ig,
delta=1e-5,
)
# test layer outputs
attribs = lig.attribute(inputs, baseline, target=0)
ig = IntegratedGradients(lambda x, y: x + y)
attribs_ig = ig.attribute(
(net.m1(input1), net.m234(input2, input3, input1, 1)),
(net.m1(baseline[0]), net.m234(baseline[1], baseline[2], baseline[1], 1)),
target=0,
)
assertTensorTuplesAlmostEqual(self, attribs, attribs_ig, delta=1e-5)
def test_multiple_layers_multiple_input_outputs(self) -> None:
# test with multiple layers, where one layer accepts multiple inputs
input1 = torch.randn(5, 3)
input2 = torch.randn(5, 3)
input3 = torch.randn(5, 3)
input4 = torch.randn(5, 3)
inputs = (input1, input2, input3, input4)
baseline = tuple(torch.zeros_like(inp) for inp in inputs)
net = BasicModel_MultiLayer_TrueMultiInput()
lig = LayerIntegratedGradients(net, layer=[net.m1, net.m234])
ig = IntegratedGradients(net)
# test layer inputs
attribs_inputs = lig.attribute(
inputs, baseline, target=0, attribute_to_layer_input=True
)
attribs_inputs_regular_ig = ig.attribute(inputs, baseline, target=0)
self.assertIsInstance(attribs_inputs, list)
self.assertEqual(len(attribs_inputs), 2)
self.assertIsInstance(attribs_inputs[0], Tensor)
self.assertIsInstance(attribs_inputs[1], tuple)
self.assertEqual(len(attribs_inputs[1]), 3)
assertTensorTuplesAlmostEqual(
self,
(attribs_inputs[0],) + attribs_inputs[1],
attribs_inputs_regular_ig,
delta=1e-7,
)
# test layer outputs
attribs = lig.attribute(inputs, baseline, target=0)
ig = IntegratedGradients(lambda x, y: x + y)
attribs_ig = ig.attribute(
(net.m1(input1), net.m234(input2, input3, input4, 1)),
(net.m1(baseline[0]), net.m234(baseline[1], baseline[2], baseline[3], 1)),
target=0,
)
assertTensorTuplesAlmostEqual(self, attribs, attribs_ig, delta=1e-7)
def test_multiple_tensors_compare_with_exp_wo_mult_by_inputs(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 100.0, 0.0]])
base = torch.tensor([[0.0, 0.0, 0.0]])
target_layer = net.multi_relu
layer_ig = LayerIntegratedGradients(net, target_layer)
layer_ig_wo_mult_by_inputs = LayerIntegratedGradients(
net, target_layer, multiply_by_inputs=False
)
layer_act = LayerActivation(net, target_layer)
attributions = layer_ig.attribute(inp, target=0)
attributions_wo_mult_by_inputs = layer_ig_wo_mult_by_inputs.attribute(
inp, target=0
)
inp_minus_baseline_activ = tuple(
inp_act - base_act
for inp_act, base_act in zip(
layer_act.attribute(inp), layer_act.attribute(base)
)
)
assertTensorTuplesAlmostEqual(
self,
tuple(
attr_wo_mult * inp_min_base
for attr_wo_mult, inp_min_base in zip(
attributions_wo_mult_by_inputs, inp_minus_baseline_activ
)
),
attributions,
)
def _assert_compare_with_layer_conductance(
self, model: Module, input: Tensor, attribute_to_layer_input: bool = False
):
lc = LayerConductance(model, cast(Module, model.linear2))
# For large number of steps layer conductance and layer integrated gradients
# become very close
attribution, delta = lc.attribute(
input,
target=0,
n_steps=1500,
return_convergence_delta=True,
attribute_to_layer_input=attribute_to_layer_input,
)
lig = LayerIntegratedGradients(model, cast(Module, model.linear2))
attributions2, delta2 = lig.attribute(
input,
target=0,
n_steps=1500,
return_convergence_delta=True,
attribute_to_layer_input=attribute_to_layer_input,
)
assertTensorAlmostEqual(
self, attribution, attributions2, delta=0.01, mode="max"
)
assertTensorAlmostEqual(self, delta, delta2, delta=0.5, mode="max")
def _assert_compare_with_emb_patching(
self,
input: Union[Tensor, Tuple[Tensor, ...]],
baseline: Union[Tensor, Tuple[Tensor, ...]],
additional_args: Union[None, Tuple[Tensor, ...]],
multiply_by_inputs: bool = True,
multiple_emb: bool = False,
):
model = BasicEmbeddingModel(nested_second_embedding=True)
if multiple_emb:
module_list: List[Module] = [model.embedding1, model.embedding2]
lig = LayerIntegratedGradients(
model,
module_list,
multiply_by_inputs=multiply_by_inputs,
)
else:
lig = LayerIntegratedGradients(
model, model.embedding1, multiply_by_inputs=multiply_by_inputs
)
attributions, delta = lig.attribute(
input,
baselines=baseline,
additional_forward_args=additional_args,
return_convergence_delta=True,
)
# now let's interpret with standard integrated gradients and
# the embeddings for monkey patching
e1 = configure_interpretable_embedding_layer(model, "embedding1")
e1_input_emb = e1.indices_to_embeddings(input[0] if multiple_emb else input)
e1_baseline_emb = e1.indices_to_embeddings(
baseline[0] if multiple_emb else baseline
)
input_emb = e1_input_emb
baseline_emb = e1_baseline_emb
e2 = None
if multiple_emb:
e2 = configure_interpretable_embedding_layer(model, "embedding2")
e2_input_emb = e2.indices_to_embeddings(*input[1:])
e2_baseline_emb = e2.indices_to_embeddings(*baseline[1:])
input_emb = (e1_input_emb, e2_input_emb)
baseline_emb = (e1_baseline_emb, e2_baseline_emb)
ig = IntegratedGradients(model, multiply_by_inputs=multiply_by_inputs)
attributions_with_ig, delta_with_ig = ig.attribute(
input_emb,
baselines=baseline_emb,
additional_forward_args=additional_args,
target=0,
return_convergence_delta=True,
)
remove_interpretable_embedding_layer(model, e1)
if e2 is not None:
remove_interpretable_embedding_layer(model, e2)
self.assertEqual(
isinstance(attributions_with_ig, tuple), isinstance(attributions, list)
)
self.assertTrue(
isinstance(attributions_with_ig, tuple)
if multiple_emb
else not isinstance(attributions_with_ig, tuple)
)
# convert to tuple for comparison
if not isinstance(attributions_with_ig, tuple):
attributions = (attributions,)
attributions_with_ig = (attributions_with_ig,)
else:
# convert list to tuple
self.assertIsInstance(attributions, list)
attributions = tuple(attributions)
for attr_lig, attr_ig in zip(attributions, attributions_with_ig):
self.assertEqual(cast(Tensor, attr_lig).shape, cast(Tensor, attr_ig).shape)
assertTensorAlmostEqual(self, attr_lig, attr_ig, delta=0.05, mode="max")
if multiply_by_inputs:
assertTensorAlmostEqual(self, delta, delta_with_ig, delta=0.05, mode="max")
def _assert_compare_with_expected(
self,
model: Module,
target_layer: Module,
test_input: Union[Tensor, Tuple[Tensor, ...]],
expected_ig: Tuple[List[List[float]], ...],
additional_input: Any = None,
):
layer_ig = LayerIntegratedGradients(model, target_layer)
attributions = layer_ig.attribute(
test_input, target=0, additional_forward_args=additional_input
)
assertTensorTuplesAlmostEqual(self, attributions, expected_ig, delta=0.01)
|
#!/usr/bin/env python3
import unittest
from typing import Any, Tuple, Union
import torch
from captum._utils.typing import TensorLikeList
from captum.attr._core.layer.grad_cam import LayerGradCam
from tests.helpers.basic import assertTensorTuplesAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel_ConvNet_One_Conv,
BasicModel_MultiLayer,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_simple_input_non_conv(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
self._grad_cam_test_assert(net, net.linear0, inp, [[400.0]])
def test_simple_multi_input_non_conv(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 6.0, 0.0]], requires_grad=True)
self._grad_cam_test_assert(net, net.multi_relu, inp, ([[21.0]], [[21.0]]))
def test_simple_input_conv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16).view(1, 1, 4, 4).float()
self._grad_cam_test_assert(
net, net.conv1, inp, [[[[11.25, 13.5], [20.25, 22.5]]]]
)
def test_simple_input_conv_split_channels(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16).view(1, 1, 4, 4).float()
expected_result = [
[
[[-3.7500, 3.0000], [23.2500, 30.0000]],
[[15.0000, 10.5000], [-3.0000, -7.5000]],
]
]
self._grad_cam_test_assert(
net,
net.conv1,
inp,
expected_activation=expected_result,
attr_dim_summation=False,
)
def test_simple_input_conv_no_grad(self) -> None:
net = BasicModel_ConvNet_One_Conv()
# this way we deactivate require_grad. Some models explicitly
# do that before interpreting the model.
for param in net.parameters():
param.requires_grad = False
inp = torch.arange(16).view(1, 1, 4, 4).float()
self._grad_cam_test_assert(
net, net.conv1, inp, [[[[11.25, 13.5], [20.25, 22.5]]]]
)
def test_simple_input_conv_relu(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16).view(1, 1, 4, 4).float()
self._grad_cam_test_assert(net, net.relu1, inp, [[[[0.0, 4.0], [28.0, 32.5]]]])
def test_simple_input_conv_without_final_relu(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16).view(1, 1, 4, 4).float()
# Adding negative value to test final relu is not applied by default
inp[0, 0, 1, 1] = -4.0
inp.requires_grad_()
self._grad_cam_test_assert(
net, net.conv1, inp, 0.5625 * inp, attribute_to_layer_input=True
)
def test_simple_input_conv_fc_with_final_relu(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16).view(1, 1, 4, 4).float()
# Adding negative value to test final relu is applied
inp[0, 0, 1, 1] = -4.0
inp.requires_grad_()
exp = 0.5625 * inp
exp[0, 0, 1, 1] = 0.0
self._grad_cam_test_assert(
net,
net.conv1,
inp,
exp,
attribute_to_layer_input=True,
relu_attributions=True,
)
def test_simple_multi_input_conv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16).view(1, 1, 4, 4).float()
inp2 = torch.ones((1, 1, 4, 4))
self._grad_cam_test_assert(
net, net.conv1, (inp, inp2), [[[[14.5, 19.0], [32.5, 37.0]]]]
)
def _grad_cam_test_assert(
self,
model: Module,
target_layer: Module,
test_input: Union[Tensor, Tuple[Tensor, ...]],
expected_activation: Union[
TensorLikeList,
Tuple[TensorLikeList, ...],
Tensor,
Tuple[Tensor, ...],
],
additional_input: Any = None,
attribute_to_layer_input: bool = False,
relu_attributions: bool = False,
attr_dim_summation: bool = True,
):
layer_gc = LayerGradCam(model, target_layer)
self.assertFalse(layer_gc.multiplies_by_inputs)
attributions = layer_gc.attribute(
test_input,
target=0,
additional_forward_args=additional_input,
attribute_to_layer_input=attribute_to_layer_input,
relu_attributions=relu_attributions,
attr_dim_summation=attr_dim_summation,
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_activation, delta=0.01
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
from __future__ import print_function
from typing import cast, List, Tuple, Union
import torch
from captum.attr._core.layer.layer_deep_lift import LayerDeepLift, LayerDeepLiftShap
from tests.helpers.basic import (
assert_delta,
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
)
from tests.helpers.basic_models import (
BasicModel_ConvNet,
BasicModel_ConvNet_MaxPool3d,
BasicModel_MaxPool_ReLU,
BasicModel_MultiLayer,
LinearMaxPoolLinearModel,
ReLULinearModel,
)
from torch import Tensor
class TestDeepLift(BaseTest):
def test_relu_layer_deeplift(self) -> None:
model = ReLULinearModel(inplace=True)
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
layer_dl = LayerDeepLift(model, model.relu)
attributions, delta = layer_dl.attribute(
inputs,
baselines,
attribute_to_layer_input=True,
return_convergence_delta=True,
)
assertTensorAlmostEqual(self, attributions[0], [0.0, 15.0])
assert_delta(self, delta)
def test_relu_layer_deeplift_wo_mutliplying_by_inputs(self) -> None:
model = ReLULinearModel(inplace=True)
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
layer_dl = LayerDeepLift(model, model.relu, multiply_by_inputs=False)
attributions = layer_dl.attribute(
inputs,
baselines,
attribute_to_layer_input=True,
)
assertTensorAlmostEqual(self, attributions[0], [0.0, 1.0])
def test_relu_layer_deeplift_multiple_output(self) -> None:
model = BasicModel_MultiLayer(multi_input_module=True)
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
layer_dl = LayerDeepLift(model, model.multi_relu)
attributions, delta = layer_dl.attribute(
inputs[0],
baselines[0],
target=0,
attribute_to_layer_input=False,
return_convergence_delta=True,
)
assertTensorTuplesAlmostEqual(
self, attributions, ([[0.0, -1.0, -1.0, -1.0]], [[0.0, -1.0, -1.0, -1.0]])
)
assert_delta(self, delta)
def test_relu_layer_deeplift_add_args(self) -> None:
model = ReLULinearModel()
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
layer_dl = LayerDeepLift(model, model.relu)
attributions, delta = layer_dl.attribute(
inputs,
baselines,
additional_forward_args=3.0,
attribute_to_layer_input=True,
return_convergence_delta=True,
)
assertTensorAlmostEqual(self, attributions[0], [0.0, 45.0])
assert_delta(self, delta)
def test_linear_layer_deeplift(self) -> None:
model = ReLULinearModel(inplace=True)
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
layer_dl = LayerDeepLift(model, model.l3)
attributions, delta = layer_dl.attribute(
inputs,
baselines,
attribute_to_layer_input=True,
return_convergence_delta=True,
)
assertTensorAlmostEqual(self, attributions[0], [0.0, 15.0])
assert_delta(self, delta)
def test_relu_deeplift_with_custom_attr_func(self) -> None:
model = ReLULinearModel()
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
attr_method = LayerDeepLift(model, model.l3)
self._relu_custom_attr_func_assert(attr_method, inputs, baselines, [[2.0]])
def test_inplace_maxpool_relu_with_custom_attr_func(self) -> None:
model = BasicModel_MaxPool_ReLU(inplace=True)
inp = torch.tensor([[[1.0, 2.0, -4.0], [-3.0, -2.0, -1.0]]])
dl = LayerDeepLift(model, model.maxpool)
def custom_att_func(mult, inp, baseline):
assertTensorAlmostEqual(self, mult[0], [[[1.0], [0.0]]])
assertTensorAlmostEqual(self, inp[0], [[[2.0], [-1.0]]])
assertTensorAlmostEqual(self, baseline[0], [[[0.0], [0.0]]])
return mult
dl.attribute(inp, custom_attribution_func=custom_att_func)
def test_linear_layer_deeplift_batch(self) -> None:
model = ReLULinearModel(inplace=True)
_, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
x1 = torch.tensor(
[[-10.0, 1.0, -5.0], [-10.0, 1.0, -5.0], [-10.0, 1.0, -5.0]],
requires_grad=True,
)
x2 = torch.tensor(
[[3.0, 3.0, 1.0], [3.0, 3.0, 1.0], [3.0, 3.0, 1.0]], requires_grad=True
)
inputs = (x1, x2)
layer_dl = LayerDeepLift(model, model.l3)
attributions, delta = layer_dl.attribute(
inputs,
baselines,
attribute_to_layer_input=True,
return_convergence_delta=True,
)
assertTensorAlmostEqual(self, attributions[0], [0.0, 15.0])
assert_delta(self, delta)
attributions, delta = layer_dl.attribute(
inputs,
baselines,
attribute_to_layer_input=False,
return_convergence_delta=True,
)
assertTensorAlmostEqual(self, attributions, [[15.0], [15.0], [15.0]])
assert_delta(self, delta)
def test_relu_layer_deepliftshap(self) -> None:
model = ReLULinearModel()
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
layer_dl_shap = LayerDeepLiftShap(model, model.relu)
attributions, delta = layer_dl_shap.attribute(
inputs,
baselines,
attribute_to_layer_input=True,
return_convergence_delta=True,
)
assertTensorAlmostEqual(self, attributions[0], [0.0, 15.0])
assert_delta(self, delta)
def test_relu_layer_deepliftshap_wo_mutliplying_by_inputs(self) -> None:
model = ReLULinearModel()
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
layer_dl_shap = LayerDeepLiftShap(model, model.relu, multiply_by_inputs=False)
attributions = layer_dl_shap.attribute(
inputs,
baselines,
attribute_to_layer_input=True,
)
assertTensorAlmostEqual(self, attributions[0], [0.0, 1.0])
def test_relu_layer_deepliftshap_multiple_output(self) -> None:
model = BasicModel_MultiLayer(multi_input_module=True)
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
layer_dl = LayerDeepLiftShap(model, model.multi_relu)
attributions, delta = layer_dl.attribute(
inputs[0],
baselines[0],
target=0,
attribute_to_layer_input=False,
return_convergence_delta=True,
)
assertTensorTuplesAlmostEqual(
self, attributions, ([[0.0, -1.0, -1.0, -1.0]], [[0.0, -1.0, -1.0, -1.0]])
)
assert_delta(self, delta)
def test_linear_layer_deepliftshap(self) -> None:
model = ReLULinearModel(inplace=True)
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
layer_dl_shap = LayerDeepLiftShap(model, model.l3)
attributions, delta = layer_dl_shap.attribute(
inputs,
baselines,
attribute_to_layer_input=True,
return_convergence_delta=True,
)
assertTensorAlmostEqual(self, attributions[0], [0.0, 15.0])
assert_delta(self, delta)
attributions, delta = layer_dl_shap.attribute(
inputs,
baselines,
attribute_to_layer_input=False,
return_convergence_delta=True,
)
assertTensorAlmostEqual(self, attributions, [[15.0]])
assert_delta(self, delta)
def test_relu_deepliftshap_with_custom_attr_func(self) -> None:
model = ReLULinearModel()
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
attr_method = LayerDeepLiftShap(model, model.l3)
self._relu_custom_attr_func_assert(attr_method, inputs, baselines, [[2.0]])
def test_lin_maxpool_lin_classification(self) -> None:
inputs = torch.ones(2, 4)
baselines = torch.tensor([[1, 2, 3, 9], [4, 8, 6, 7]]).float()
model = LinearMaxPoolLinearModel()
dl = LayerDeepLift(model, model.pool1)
attrs, delta = dl.attribute(
inputs, baselines, target=0, return_convergence_delta=True
)
expected = [[[-8.0]], [[-7.0]]]
expected_delta = [0.0, 0.0]
assertTensorAlmostEqual(self, cast(Tensor, attrs), expected, 0.0001, "max")
assertTensorAlmostEqual(self, delta, expected_delta, 0.0001, "max")
def test_convnet_maxpool2d_classification(self) -> None:
inputs = 100 * torch.randn(2, 1, 10, 10)
model = BasicModel_ConvNet()
model.eval()
dl = LayerDeepLift(model, model.pool1)
dl2 = LayerDeepLift(model, model.conv2)
attr = dl.attribute(inputs, target=0)
attr2 = dl2.attribute(inputs, target=0, attribute_to_layer_input=True)
self.assertTrue(cast(Tensor, attr).sum() == cast(Tensor, attr2).sum())
def test_convnet_maxpool3d_classification(self) -> None:
inputs = 100 * torch.randn(2, 1, 10, 10, 10)
model = BasicModel_ConvNet_MaxPool3d()
model.eval()
dl = LayerDeepLift(model, model.pool1)
dl2 = LayerDeepLift(model, model.conv2)
# with self.assertRaises(AssertionError) doesn't run with Cicle CI
# the error is being converted into RuntimeError
attr = dl.attribute(inputs, target=0, attribute_to_layer_input=False)
attr2 = dl2.attribute(inputs, target=0, attribute_to_layer_input=True)
self.assertTrue(cast(Tensor, attr).sum() == cast(Tensor, attr2).sum())
def _relu_custom_attr_func_assert(
self,
attr_method: Union[LayerDeepLift, LayerDeepLiftShap],
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: Union[Tensor, Tuple[Tensor, ...]],
expected: List[List[float]],
) -> None:
def custom_attr_func(multipliers, inputs, baselines):
return tuple(multiplier * 2 for multiplier in multipliers)
attr = attr_method.attribute(
inputs,
baselines,
custom_attribution_func=custom_attr_func,
return_convergence_delta=True,
)
assertTensorAlmostEqual(self, attr[0], expected, 1e-19)
def _create_inps_and_base_for_deeplift_neuron_layer_testing() -> Tuple[
Tuple[Tensor, Tensor], Tuple[Tensor, Tensor]
]:
x1 = torch.tensor([[-10.0, 1.0, -5.0]], requires_grad=True)
x2 = torch.tensor([[3.0, 3.0, 1.0]], requires_grad=True)
b1 = torch.tensor([[0.0, 0.0, 0.0]], requires_grad=True)
b2 = torch.tensor([[0.0, 0.0, 0.0]], requires_grad=True)
inputs = (x1, x2)
baselines = (b1, b2)
return inputs, baselines
def _create_inps_and_base_for_deepliftshap_neuron_layer_testing() -> Tuple[
Tuple[Tensor, Tensor], Tuple[Tensor, Tensor]
]:
x1 = torch.tensor([[-10.0, 1.0, -5.0]], requires_grad=True)
x2 = torch.tensor([[3.0, 3.0, 1.0]], requires_grad=True)
b1 = torch.tensor(
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], requires_grad=True
)
b2 = torch.tensor(
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], requires_grad=True
)
inputs = (x1, x2)
baselines = (b1, b2)
return inputs, baselines
|
#!/usr/bin/env python3
import unittest
from typing import Any, List, Tuple, Union
import torch
from captum._utils.typing import BaselineType
from captum.attr._core.layer.layer_feature_ablation import LayerFeatureAblation
from tests.helpers.basic import assertTensorTuplesAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel_ConvNet_One_Conv,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_simple_ablation_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._ablation_test_assert(
net,
net.linear0,
inp,
([280.0, 280.0, 120.0],),
layer_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
attribute_to_layer_input=True,
)
def test_multi_input_ablation(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
baseline = torch.tensor([[1.0, 2.0, 3.0]])
self._ablation_test_assert(
net,
net.model.linear1,
(inp1, inp2, inp3),
[[168.0, 992.0, 148.0], [84.0, 632.0, 120.0]],
additional_input=(1,),
baselines=baseline,
perturbations_per_eval=(1, 2, 3),
attribute_to_layer_input=True,
)
self._ablation_test_assert(
net,
net.model.linear0,
(inp1, inp2, inp3),
[[168.0, 992.0, 148.0], [84.0, 632.0, 120.0]],
additional_input=(1,),
baselines=baseline,
perturbations_per_eval=(1, 2, 3),
attribute_to_layer_input=False,
)
def test_multi_input_ablation_with_layer_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
baseline = torch.tensor([[1.0, 2.0, 3.0]])
layer_mask = torch.tensor([[0, 1, 0], [0, 1, 2]])
self._ablation_test_assert(
net,
net.model.linear1,
(inp1, inp2, inp3),
[[316.0, 992.0, 316.0], [84.0, 632.0, 120.0]],
additional_input=(1,),
baselines=baseline,
perturbations_per_eval=(1, 2, 3),
layer_mask=layer_mask,
attribute_to_layer_input=True,
)
self._ablation_test_assert(
net,
net.model.linear0,
(inp1, inp2, inp3),
[[316.0, 992.0, 316.0], [84.0, 632.0, 120.0]],
additional_input=(1,),
baselines=baseline,
layer_mask=layer_mask,
perturbations_per_eval=(1, 2, 3),
)
def test_simple_multi_input_conv_intermediate(self) -> None:
net = BasicModel_ConvNet_One_Conv(inplace=True)
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
self._ablation_test_assert(
net,
net.relu1,
(inp, inp2),
[[[[4.0, 13.0], [40.0, 49.0]], [[0, 0], [-15.0, -24.0]]]],
perturbations_per_eval=(1, 2, 4, 8, 12, 16),
)
self._ablation_test_assert(
net,
net.relu1,
(inp, inp2),
([[[4.0, 13.0], [40.0, 49.0]], [[0, 0], [-15.0, -24.0]]],),
baselines=torch.tensor(
[[[-4.0, -13.0], [-2.0, -2.0]], [[0, 0], [0.0, 0.0]]]
),
perturbations_per_eval=(1, 2, 4, 8, 12, 16),
attribute_to_layer_input=True,
)
self._ablation_test_assert(
net,
net.relu1,
(inp, inp2),
[[[[17.0, 17.0], [67.0, 67.0]], [[0, 0], [-39.0, -39.0]]]],
perturbations_per_eval=(1, 2, 4),
layer_mask=torch.tensor([[[[0, 0], [1, 1]], [[2, 2], [3, 3]]]]),
)
def test_simple_multi_output_ablation(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 6.0, 0.0]])
self._ablation_test_assert(
net, net.multi_relu, inp, ([[0.0, 7.0, 7.0, 7.0]], [[0.0, 7.0, 7.0, 7.0]])
)
def test_simple_multi_output_input_ablation(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 6.0, 0.0]])
self._ablation_test_assert(
net,
net.multi_relu,
inp,
([[0.0, 7.0, 7.0, 7.0]], [[0.0, 7.0, 7.0, 7.0]]),
attribute_to_layer_input=True,
)
def _ablation_test_assert(
self,
model: Module,
layer: Module,
test_input: Union[Tensor, Tuple[Tensor, ...]],
expected_ablation: Union[List, Tuple],
layer_mask: Union[None, Tensor, Tuple[Tensor, ...]] = None,
additional_input: Any = None,
perturbations_per_eval: Tuple[int, ...] = (1,),
baselines: BaselineType = None,
target: Union[None, int] = 0,
attribute_to_layer_input: bool = False,
) -> None:
for batch_size in perturbations_per_eval:
ablation = LayerFeatureAblation(model, layer)
attributions = ablation.attribute(
test_input,
target=target,
layer_mask=layer_mask,
additional_forward_args=additional_input,
layer_baselines=baselines,
perturbations_per_eval=batch_size,
attribute_to_layer_input=attribute_to_layer_input,
)
assertTensorTuplesAlmostEqual(self, attributions, expected_ablation)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import torch
import torch.nn as nn
from captum.attr import LayerLRP
from captum.attr._utils.lrp_rules import Alpha1_Beta0_Rule, EpsilonRule, GammaRule
from ...helpers.basic import assertTensorAlmostEqual, BaseTest
from ...helpers.basic_models import BasicModel_ConvNet_One_Conv, SimpleLRPModel
def _get_basic_config():
input = torch.arange(16).view(1, 1, 4, 4).float()
return BasicModel_ConvNet_One_Conv(), input
def _get_simple_model(inplace=False):
model = SimpleLRPModel(inplace)
inputs = torch.tensor([[1.0, 2.0, 3.0]])
return model, inputs
def _get_simple_model2(inplace=False):
class MyModel(nn.Module):
def __init__(self, inplace) -> None:
super().__init__()
self.lin = nn.Linear(2, 2)
self.lin.weight = nn.Parameter(torch.ones(2, 2))
self.relu = torch.nn.ReLU(inplace=inplace)
def forward(self, input):
return self.relu(self.lin(input))[0].unsqueeze(0)
input = torch.tensor([[1.0, 2.0], [1.0, 3.0]])
model = MyModel(inplace)
return model, input
class Test(BaseTest):
def test_lrp_creator(self) -> None:
model, _ = _get_basic_config()
model.conv1.rule = 1
self.assertRaises(TypeError, LayerLRP, model, model.conv1)
def test_lrp_creator_activation(self) -> None:
model, inputs = _get_basic_config()
model.add_module("sigmoid", nn.Sigmoid())
lrp = LayerLRP(model, model.conv1)
self.assertRaises(TypeError, lrp.attribute, inputs)
def test_lrp_basic_attributions(self):
model, inputs = _get_basic_config()
logits = model(inputs)
score, classIndex = torch.max(logits, 1)
lrp = LayerLRP(model, model.conv1)
relevance, delta = lrp.attribute(
inputs, classIndex.item(), return_convergence_delta=True
)
assertTensorAlmostEqual(
self, relevance[0], torch.Tensor([[[0, 4], [31, 40]], [[0, 0], [-6, -15]]])
)
assertTensorAlmostEqual(self, delta, torch.Tensor([0]))
def test_lrp_simple_attributions(self):
model, inputs = _get_simple_model(inplace=False)
model.eval()
model.linear.rule = EpsilonRule()
model.linear2.rule = EpsilonRule()
lrp_upper = LayerLRP(model, model.linear2)
relevance_upper, delta = lrp_upper.attribute(
inputs, attribute_to_layer_input=True, return_convergence_delta=True
)
lrp_lower = LayerLRP(model, model.linear)
relevance_lower = lrp_lower.attribute(inputs)
assertTensorAlmostEqual(self, relevance_lower[0], relevance_upper[0])
self.assertEqual(delta.item(), 0)
def test_lrp_simple_repeat_attributions(self) -> None:
model, inputs = _get_simple_model()
model.eval()
model.linear.rule = GammaRule()
model.linear2.rule = Alpha1_Beta0_Rule()
output = model(inputs)
lrp = LayerLRP(model, model.linear)
_ = lrp.attribute(inputs)
output_after = model(inputs)
assertTensorAlmostEqual(self, output, output_after)
def test_lrp_simple_inplaceReLU(self) -> None:
model_default, inputs = _get_simple_model()
model_inplace, _ = _get_simple_model(inplace=True)
for model in [model_default, model_inplace]:
model.eval()
model.linear.rule = EpsilonRule()
model.linear2.rule = EpsilonRule()
lrp_default = LayerLRP(model_default, model_default.linear2)
lrp_inplace = LayerLRP(model_inplace, model_inplace.linear2)
relevance_default = lrp_default.attribute(inputs, attribute_to_layer_input=True)
relevance_inplace = lrp_inplace.attribute(inputs, attribute_to_layer_input=True)
assertTensorAlmostEqual(self, relevance_default[0], relevance_inplace[0])
def test_lrp_simple_tanh(self) -> None:
class Model(nn.Module):
def __init__(self) -> None:
super(Model, self).__init__()
self.linear = nn.Linear(3, 3, bias=False)
self.linear.weight.data.fill_(0.1)
self.tanh = torch.nn.Tanh()
self.linear2 = nn.Linear(3, 1, bias=False)
self.linear2.weight.data.fill_(0.1)
def forward(self, x):
return self.linear2(self.tanh(self.linear(x)))
model = Model()
_, inputs = _get_simple_model()
lrp = LayerLRP(model, model.linear)
relevance = lrp.attribute(inputs)
assertTensorAlmostEqual(
self, relevance[0], torch.Tensor([0.0537, 0.0537, 0.0537])
) # Result if tanh is skipped for propagation
def test_lrp_simple_attributions_GammaRule(self) -> None:
model, inputs = _get_simple_model()
with torch.no_grad():
model.linear.weight.data[0][0] = -2
model.eval()
model.linear.rule = GammaRule(gamma=1)
model.linear2.rule = GammaRule()
lrp = LayerLRP(model, model.linear)
relevance = lrp.attribute(inputs)
assertTensorAlmostEqual(self, relevance[0], torch.tensor([24.0, 36.0, 36.0]))
def test_lrp_simple_attributions_AlphaBeta(self) -> None:
model, inputs = _get_simple_model()
with torch.no_grad():
model.linear.weight.data[0][0] = -2
model.eval()
model.linear.rule = Alpha1_Beta0_Rule()
model.linear2.rule = Alpha1_Beta0_Rule()
lrp = LayerLRP(model, model.linear)
relevance = lrp.attribute(inputs)
assertTensorAlmostEqual(self, relevance[0], torch.tensor([24.0, 36.0, 36.0]))
def test_lrp_simple_attributions_all_layers(self) -> None:
model, inputs = _get_simple_model(inplace=False)
model.eval()
model.linear.rule = EpsilonRule()
model.linear2.rule = EpsilonRule()
layers = [model.linear, model.linear2]
lrp = LayerLRP(model, layers)
relevance = lrp.attribute(inputs, attribute_to_layer_input=True)
self.assertEqual(len(relevance), 2)
assertTensorAlmostEqual(self, relevance[0][0], torch.tensor([18.0, 36.0, 54.0]))
def test_lrp_simple_attributions_all_layers_delta(self) -> None:
model, inputs = _get_simple_model(inplace=False)
model.eval()
model.linear.rule = EpsilonRule()
model.linear2.rule = EpsilonRule()
layers = [model.linear, model.linear2]
lrp = LayerLRP(model, layers)
inputs = torch.cat((inputs, 2 * inputs))
relevance, delta = lrp.attribute(
inputs, attribute_to_layer_input=True, return_convergence_delta=True
)
self.assertEqual(len(relevance), len(delta))
assertTensorAlmostEqual(
self,
relevance[0],
torch.tensor([[18.0, 36.0, 54.0], [36.0, 72.0, 108.0]]),
)
|
#!/usr/bin/env python3
import unittest
from typing import Any, List, Tuple, Union
import torch
from captum._utils.typing import ModuleOrModuleList
from captum.attr._core.layer.layer_activation import LayerActivation
from captum.attr._core.layer.layer_gradient_x_activation import LayerGradientXActivation
from tests.helpers.basic import assertTensorTuplesAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicEmbeddingModel,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_simple_input_gradient_activation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
self._layer_activation_test_assert(net, net.linear0, inp, [[0.0, 400.0, 0.0]])
def test_simple_input_gradient_activation_no_grad(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
with torch.no_grad():
self._layer_activation_test_assert(
net, net.linear0, inp, [[0.0, 400.0, 0.0]]
)
def test_simple_linear_gradient_activation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._layer_activation_test_assert(
net, net.linear1, inp, [[90.0, 101.0, 101.0, 101.0]]
)
def test_multi_layer_linear_gradient_activation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
module_list: List[Module] = [net.linear0, net.linear1]
self._layer_activation_test_assert(
net,
module_list,
inp,
([[0.0, 400.0, 0.0]], [[90.0, 101.0, 101.0, 101.0]]),
)
def test_simple_linear_gradient_activation_no_grad(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
# this way we deactivate require_grad. Some models explicitly
# do that before interpreting the model.
for param in net.parameters():
param.requires_grad = False
self._layer_activation_test_assert(
net, net.linear1, inp, [[90.0, 101.0, 101.0, 101.0]]
)
def test_simple_multi_gradient_activation(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[3.0, 4.0, 0.0]])
self._layer_activation_test_assert(
net, net.multi_relu, inp, ([[0.0, 8.0, 8.0, 8.0]], [[0.0, 8.0, 8.0, 8.0]])
)
def test_simple_relu_gradient_activation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[3.0, 4.0, 0.0]], requires_grad=True)
self._layer_activation_test_assert(net, net.relu, inp, [[0.0, 8.0, 8.0, 8.0]])
def test_multi_layer_multi_gradient_activation(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[3.0, 4.0, 0.0]])
module_list: List[Module] = [net.multi_relu, net.linear0]
self._layer_activation_test_assert(
net,
module_list,
inp,
[([[0.0, 8.0, 8.0, 8.0]], [[0.0, 8.0, 8.0, 8.0]]), [[9.0, 12.0, 0.0]]],
)
def test_simple_output_gradient_activation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._layer_activation_test_assert(net, net.linear2, inp, [[392.0, 0.0]])
def test_simple_gradient_activation_multi_input_linear2(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 0.0]])
inp2 = torch.tensor([[0.0, 10.0, 0.0]])
inp3 = torch.tensor([[0.0, 5.0, 0.0]])
self._layer_activation_test_assert(
net, net.model.linear2, (inp1, inp2, inp3), [[392.0, 0.0]], (4,)
)
def test_simple_gradient_activation_multi_input_relu(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 1.0]])
inp2 = torch.tensor([[0.0, 4.0, 5.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0]])
self._layer_activation_test_assert(
net, net.model.relu, (inp1, inp2), [[90.0, 101.0, 101.0, 101.0]], (inp3, 5)
)
def test_gradient_activation_embedding(self) -> None:
input1 = torch.tensor([2, 5, 0, 1])
input2 = torch.tensor([3, 0, 0, 2])
model = BasicEmbeddingModel()
layer_act = LayerGradientXActivation(model, model.embedding1)
self.assertEqual(
list(layer_act.attribute(inputs=(input1, input2)).shape), [4, 100]
)
def test_gradient_activation_embedding_no_grad(self) -> None:
input1 = torch.tensor([2, 5, 0, 1])
input2 = torch.tensor([3, 0, 0, 2])
model = BasicEmbeddingModel()
for param in model.parameters():
param.requires_grad = False
with torch.no_grad():
layer_act = LayerGradientXActivation(model, model.embedding1)
self.assertEqual(
list(layer_act.attribute(inputs=(input1, input2)).shape), [4, 100]
)
def _layer_activation_test_assert(
self,
model: Module,
target_layer: ModuleOrModuleList,
test_input: Union[Tensor, Tuple[Tensor, ...]],
expected_activation: Union[List, Tuple[List[List[float]], ...]],
additional_input: Any = None,
) -> None:
layer_act = LayerGradientXActivation(model, target_layer)
self.assertTrue(layer_act.multiplies_by_inputs)
attributions = layer_act.attribute(
test_input, target=0, additional_forward_args=additional_input
)
if isinstance(target_layer, Module):
assertTensorTuplesAlmostEqual(
self, attributions, expected_activation, delta=0.01
)
else:
for i in range(len(target_layer)):
assertTensorTuplesAlmostEqual(
self, attributions[i], expected_activation[i], delta=0.01
)
# test Layer Gradient without multiplying with activations
layer_grads = LayerGradientXActivation(
model, target_layer, multiply_by_inputs=False
)
layer_act = LayerActivation(model, target_layer)
self.assertFalse(layer_grads.multiplies_by_inputs)
grads = layer_grads.attribute(
test_input, target=0, additional_forward_args=additional_input
)
acts = layer_act.attribute(test_input, additional_forward_args=additional_input)
if isinstance(target_layer, Module):
assertTensorTuplesAlmostEqual(
self,
attributions,
tuple(act * grad for act, grad in zip(acts, grads)),
delta=0.01,
)
else:
for i in range(len(target_layer)):
assertTensorTuplesAlmostEqual(
self,
attributions[i],
tuple(act * grad for act, grad in zip(acts[i], grads[i])),
delta=0.01,
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import unittest
from typing import Any, cast, List, Tuple, Union
import torch
from captum._utils.typing import BaselineType
from captum.attr._core.layer.layer_conductance import LayerConductance
from tests.attr.helpers.conductance_reference import ConductanceReference
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
)
from tests.helpers.basic_models import (
BasicModel_ConvNet,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_simple_input_conductance(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._conductance_test_assert(net, net.linear0, inp, [[0.0, 390.0, 0.0]])
def test_simple_input_multi_conductance(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._conductance_test_assert(
net,
net.multi_relu,
inp,
([[90.0, 100.0, 100.0, 100.0]], [[90.0, 100.0, 100.0, 100.0]]),
)
def test_simple_input_with_scalar_baseline_conductance(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._conductance_test_assert(
net, net.linear0, inp, [[0.0, 390.0, 0.0]], baselines=0.0
)
def test_simple_linear_conductance(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
self._conductance_test_assert(
net, net.linear1, inp, [[90.0, 100.0, 100.0, 100.0]]
)
def test_simple_relu_conductance(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._conductance_test_assert(net, net.relu, inp, [[90.0, 100.0, 100.0, 100.0]])
def test_simple_output_conductance(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
self._conductance_test_assert(net, net.linear2, inp, [[390.0, 0.0]])
def test_simple_multi_input_linear2_conductance(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 0.0]])
inp2 = torch.tensor([[0.0, 10.0, 0.0]])
inp3 = torch.tensor([[0.0, 5.0, 0.0]])
self._conductance_test_assert(
net,
net.model.linear2,
(inp1, inp2, inp3),
[[390.0, 0.0]],
additional_args=(4,),
)
def test_simple_multi_input_relu_conductance(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 1.0]])
inp2 = torch.tensor([[0.0, 4.0, 5.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0]])
self._conductance_test_assert(
net,
net.model.relu,
(inp1, inp2),
[[90.0, 100.0, 100.0, 100.0]],
additional_args=(inp3, 5),
)
def test_simple_multi_input_relu_conductance_batch(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 1.0], [0.0, 0.0, 10.0]])
inp2 = torch.tensor([[0.0, 4.0, 5.0], [0.0, 0.0, 10.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 5.0]])
self._conductance_test_assert(
net,
net.model.relu,
(inp1, inp2),
[[90.0, 100.0, 100.0, 100.0], [100.0, 100.0, 100.0, 100.0]],
additional_args=(inp3, 5),
)
def test_matching_conv1_conductance(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(1, 1, 10, 10, requires_grad=True)
self._conductance_reference_test_assert(net, net.conv1, inp, n_steps=100)
def test_matching_pool1_conductance(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(1, 1, 10, 10)
self._conductance_reference_test_assert(net, net.pool1, inp)
def test_matching_conv2_conductance(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(1, 1, 10, 10, requires_grad=True)
self._conductance_reference_test_assert(net, net.conv2, inp)
def test_matching_pool2_conductance(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(1, 1, 10, 10)
self._conductance_reference_test_assert(net, net.pool2, inp)
def test_matching_conv_multi_input_conductance(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(4, 1, 10, 10, requires_grad=True)
self._conductance_reference_test_assert(net, net.relu3, inp)
def test_matching_conv_with_baseline_conductance(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(3, 1, 10, 10)
baseline = 100 * torch.randn(3, 1, 10, 10, requires_grad=True)
self._conductance_reference_test_assert(net, net.fc1, inp, baseline)
def _conductance_test_assert(
self,
model: Module,
target_layer: Module,
test_input: Union[Tensor, Tuple[Tensor, ...]],
expected_conductance: Union[List[List[float]], Tuple[List[List[float]], ...]],
baselines: BaselineType = None,
additional_args: Any = None,
) -> None:
cond = LayerConductance(model, target_layer)
self.assertTrue(cond.multiplies_by_inputs)
for internal_batch_size in (None, 4, 20):
attributions, delta = cond.attribute(
test_input,
baselines=baselines,
target=0,
n_steps=500,
method="gausslegendre",
additional_forward_args=additional_args,
internal_batch_size=internal_batch_size,
return_convergence_delta=True,
)
delta_condition = (delta.abs() < 0.01).all()
self.assertTrue(
delta_condition,
"Sum of attributions does {}"
" not match the difference of endpoints.".format(delta),
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_conductance, delta=0.1
)
def _conductance_reference_test_assert(
self,
model: Module,
target_layer: Module,
test_input: Tensor,
test_baseline: Union[None, Tensor] = None,
n_steps=300,
) -> None:
layer_output = None
def forward_hook(module, inp, out):
nonlocal layer_output
layer_output = out
hook = target_layer.register_forward_hook(forward_hook)
final_output = model(test_input)
layer_output = cast(Tensor, layer_output)
hook.remove()
target_index = torch.argmax(torch.sum(final_output, 0))
cond = LayerConductance(model, target_layer)
cond_ref = ConductanceReference(model, target_layer)
attributions, delta = cast(
Tuple[Tensor, Tensor],
cond.attribute(
test_input,
baselines=test_baseline,
target=target_index,
n_steps=n_steps,
method="gausslegendre",
return_convergence_delta=True,
),
)
delta_condition = (delta.abs() < 0.005).all()
self.assertTrue(
delta_condition,
"Sum of attribution values does {} "
" not match the difference of endpoints.".format(delta),
)
attributions_reference = cond_ref.attribute(
test_input,
baselines=test_baseline,
target=target_index,
n_steps=n_steps,
method="gausslegendre",
)
# Check that layer output size matches conductance size.
self.assertEqual(layer_output.shape, attributions.shape)
# Check that reference implementation output matches standard implementation.
assertTensorAlmostEqual(
self,
attributions,
attributions_reference,
delta=0.07,
mode="max",
)
# Test if batching is working correctly for inputs with multiple examples
if test_input.shape[0] > 1:
for i in range(test_input.shape[0]):
single_attributions = cast(
Tensor,
cond.attribute(
test_input[i : i + 1],
baselines=test_baseline[i : i + 1]
if test_baseline is not None
else None,
target=target_index,
n_steps=n_steps,
method="gausslegendre",
),
)
# Verify that attributions when passing example independently
# matches corresponding attribution of batched input.
assertTensorAlmostEqual(
self,
attributions[i : i + 1],
single_attributions,
delta=0.01,
mode="max",
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import unittest
from typing import Any, List, Tuple, Union
import torch
from captum._utils.typing import BaselineType
from captum.attr._core.layer.internal_influence import InternalInfluence
from tests.helpers.basic import assertTensorTuplesAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_simple_input_internal_inf(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
self._internal_influence_test_assert(net, net.linear0, inp, [[3.9, 3.9, 3.9]])
def test_simple_input_multi_internal_inf(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
self._internal_influence_test_assert(
net,
net.multi_relu,
inp,
([[0.9, 1.0, 1.0, 1.0]], [[0.9, 1.0, 1.0, 1.0]]),
attribute_to_layer_input=True,
)
def test_simple_linear_internal_inf(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._internal_influence_test_assert(
net, net.linear1, inp, [[0.9, 1.0, 1.0, 1.0]]
)
def test_simple_relu_input_internal_inf_inplace(self) -> None:
net = BasicModel_MultiLayer(inplace=True)
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._internal_influence_test_assert(
net, net.relu, inp, ([0.9, 1.0, 1.0, 1.0],), attribute_to_layer_input=True
)
def test_simple_linear_internal_inf_inplace(self) -> None:
net = BasicModel_MultiLayer(inplace=True)
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._internal_influence_test_assert(
net, net.linear1, inp, [[0.9, 1.0, 1.0, 1.0]]
)
def test_simple_relu_internal_inf(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[3.0, 4.0, 0.0]], requires_grad=True)
self._internal_influence_test_assert(net, net.relu, inp, [[1.0, 1.0, 1.0, 1.0]])
def test_simple_output_internal_inf(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._internal_influence_test_assert(net, net.linear2, inp, [[1.0, 0.0]])
def test_simple_with_baseline_internal_inf(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 80.0, 0.0]])
base = torch.tensor([[0.0, -20.0, 0.0]])
self._internal_influence_test_assert(
net, net.linear1, inp, [[0.7, 0.8, 0.8, 0.8]], base
)
def test_simple_multi_input_linear2_internal_inf(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 0.0]])
inp2 = torch.tensor([[0.0, 10.0, 0.0]])
inp3 = torch.tensor([[0.0, 5.0, 0.0]])
self._internal_influence_test_assert(
net,
net.model.linear2,
(inp1, inp2, inp3),
[[1.0, 0.0]],
additional_args=(4,),
)
def test_simple_multi_input_relu_internal_inf(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 1.0]])
inp2 = torch.tensor([[0.0, 4.0, 5.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0]])
self._internal_influence_test_assert(
net,
net.model.relu,
(inp1, inp2),
[[1.0, 1.0, 1.0, 1.0]],
additional_args=(inp3, 5),
)
def test_simple_multi_input_batch_relu_internal_inf(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 6.0, 14.0], [0.0, 80.0, 0.0]])
inp2 = torch.tensor([[0.0, 6.0, 14.0], [0.0, 20.0, 0.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0], [0.0, 20.0, 0.0]])
self._internal_influence_test_assert(
net,
net.model.linear1,
(inp1, inp2),
[[0.95, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]],
additional_args=(inp3, 5),
)
def test_multiple_linear_internal_inf(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor(
[
[0.0, 100.0, 0.0],
[0.0, 100.0, 0.0],
[0.0, 100.0, 0.0],
[0.0, 100.0, 0.0],
],
requires_grad=True,
)
self._internal_influence_test_assert(
net,
net.linear1,
inp,
[
[0.9, 1.0, 1.0, 1.0],
[0.9, 1.0, 1.0, 1.0],
[0.9, 1.0, 1.0, 1.0],
[0.9, 1.0, 1.0, 1.0],
],
)
def test_multiple_with_baseline_internal_inf(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 80.0, 0.0], [30.0, 30.0, 0.0]], requires_grad=True)
base = torch.tensor(
[[0.0, -20.0, 0.0], [-20.0, -20.0, 0.0]], requires_grad=True
)
self._internal_influence_test_assert(
net, net.linear1, inp, [[0.7, 0.8, 0.8, 0.8], [0.5, 0.6, 0.6, 0.6]], base
)
def _internal_influence_test_assert(
self,
model: Module,
target_layer: Module,
test_input: Union[Tensor, Tuple[Tensor, ...]],
expected_activation: Union[
float,
List[List[float]],
Tuple[List[float], ...],
Tuple[List[List[float]], ...],
],
baseline: BaselineType = None,
additional_args: Any = None,
attribute_to_layer_input: bool = False,
):
for internal_batch_size in [None, 5, 20]:
int_inf = InternalInfluence(model, target_layer)
self.assertFalse(int_inf.multiplies_by_inputs)
attributions = int_inf.attribute(
test_input,
baselines=baseline,
target=0,
n_steps=500,
method="riemann_trapezoid",
additional_forward_args=additional_args,
internal_batch_size=internal_batch_size,
attribute_to_layer_input=attribute_to_layer_input,
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_activation, delta=0.01, mode="max"
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import unittest
from typing import Any, List, Tuple, Union
import torch
import torch.nn as nn
from captum.attr._core.layer.layer_activation import LayerActivation
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
)
from tests.helpers.basic_models import (
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
Conv1dSeqModel,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_simple_input_activation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
self._layer_activation_test_assert(net, net.linear0, inp, [[0.0, 100.0, 0.0]])
def test_simple_linear_activation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._layer_activation_test_assert(
net, net.linear1, inp, [[90.0, 101.0, 101.0, 101.0]]
)
def test_simple_multi_linear_activation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._multiple_layer_activation_test_assert(
net,
[net.linear1, net.linear0],
inp,
([[90.0, 101.0, 101.0, 101.0]], [[0.0, 100.0, 0.0]]),
)
def test_simple_relu_activation_input_inplace(self) -> None:
net = BasicModel_MultiLayer(inplace=True)
inp = torch.tensor([[2.0, -5.0, 4.0]])
self._layer_activation_test_assert(
net, net.relu, inp, ([-9.0, 2.0, 2.0, 2.0],), attribute_to_layer_input=True
)
def test_simple_linear_activation_inplace(self) -> None:
net = BasicModel_MultiLayer(inplace=True)
inp = torch.tensor([[2.0, -5.0, 4.0]])
self._layer_activation_test_assert(
net, net.linear1, inp, [[-9.0, 2.0, 2.0, 2.0]]
)
def test_simple_relu_activation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[3.0, 4.0, 0.0]], requires_grad=True)
self._layer_activation_test_assert(net, net.relu, inp, [[0.0, 8.0, 8.0, 8.0]])
def test_simple_output_activation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._layer_activation_test_assert(net, net.linear2, inp, [[392.0, 394.0]])
def test_simple_multi_output_activation(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 6.0, 0.0]])
self._layer_activation_test_assert(
net, net.multi_relu, inp, ([[0.0, 7.0, 7.0, 7.0]], [[0.0, 7.0, 7.0, 7.0]])
)
def test_simple_multi_layer_multi_output_activation(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 6.0, 0.0]])
self._multiple_layer_activation_test_assert(
net,
[net.multi_relu, net.linear0, net.linear1],
inp,
[
([[0.0, 7.0, 7.0, 7.0]], [[0.0, 7.0, 7.0, 7.0]]),
[[0.0, 6.0, 0.0]],
[[-4.0, 7.0, 7.0, 7.0]],
],
)
def test_simple_multi_input_activation(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 6.0, 0.0]])
self._layer_activation_test_assert(
net,
net.multi_relu,
inp,
([[-4.0, 7.0, 7.0, 7.0]], [[-4.0, 7.0, 7.0, 7.0]]),
attribute_to_layer_input=True,
)
def test_simple_multi_input_linear2_activation(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 0.0]])
inp2 = torch.tensor([[0.0, 10.0, 0.0]])
inp3 = torch.tensor([[0.0, 5.0, 0.0]])
self._layer_activation_test_assert(
net, net.model.linear2, (inp1, inp2, inp3), [[392.0, 394.0]], (4,)
)
def test_simple_multi_input_relu_activation(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 1.0]])
inp2 = torch.tensor([[0.0, 4.0, 5.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0]])
self._layer_activation_test_assert(
net, net.model.relu, (inp1, inp2), [[90.0, 101.0, 101.0, 101.0]], (inp3, 5)
)
def test_sequential_in_place(self) -> None:
model = nn.Sequential(nn.Conv2d(3, 4, 3), nn.ReLU(inplace=True))
layer_act = LayerActivation(model, model[0])
input = torch.randn(1, 3, 5, 5)
assertTensorAlmostEqual(self, layer_act.attribute(input), model[0](input))
def test_sequential_module(self) -> None:
model = Conv1dSeqModel()
layer_act = LayerActivation(model, model.seq)
input = torch.randn(2, 4, 1000)
out = model(input)
assertTensorAlmostEqual(self, layer_act.attribute(input), out)
def _layer_activation_test_assert(
self,
model: Module,
target_layer: Module,
test_input: Union[Tensor, Tuple[Tensor, ...]],
expected_activation: Union[
List[List[float]], Tuple[List[float], ...], Tuple[List[List[float]], ...]
],
additional_input: Any = None,
attribute_to_layer_input: bool = False,
):
layer_act = LayerActivation(model, target_layer)
self.assertTrue(layer_act.multiplies_by_inputs)
attributions = layer_act.attribute(
test_input,
additional_forward_args=additional_input,
attribute_to_layer_input=attribute_to_layer_input,
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_activation, delta=0.01
)
def _multiple_layer_activation_test_assert(
self,
model: Module,
target_layers: List[Module],
test_input: Union[Tensor, Tuple[Tensor, ...]],
expected_activation: Union[
List, Tuple[List[float], ...], Tuple[List[List[float]], ...]
],
additional_input: Any = None,
attribute_to_layer_input: bool = False,
):
layer_act = LayerActivation(model, target_layers)
self.assertTrue(layer_act.multiplies_by_inputs)
attributions = layer_act.attribute(
test_input,
additional_forward_args=additional_input,
attribute_to_layer_input=attribute_to_layer_input,
)
for i in range(len(target_layers)):
assertTensorTuplesAlmostEqual(
self, attributions[i], expected_activation[i], delta=0.01
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Tuple, Union
import torch
from captum._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.gradient_shap import GradientShap
from captum.attr._core.layer.layer_gradient_shap import LayerGradientShap
from tests.attr.test_gradient_shap import _assert_attribution_delta
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
)
from tests.helpers.basic_models import (
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from tests.helpers.classification_models import SoftmaxModel
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_basic_multilayer(self) -> None:
model = BasicModel_MultiLayer(inplace=True)
model.eval()
inputs = torch.tensor([[1.0, -20.0, 10.0]])
baselines = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [2.0, 2.0, 2.0]])
expected = [[-8.4, 0.0]]
self._assert_attributions(model, model.linear2, inputs, baselines, 0, expected)
def test_basic_multilayer_wo_multiplying_by_inputs(self) -> None:
model = BasicModel_MultiLayer(inplace=True)
model.eval()
inputs = torch.tensor([[1.0, -20.0, 10.0]])
baselines = torch.zeros(3, 3)
lgs = LayerGradientShap(model, model.linear2, multiply_by_inputs=False)
attrs = lgs.attribute(
inputs,
baselines,
target=0,
stdevs=0.0,
)
assertTensorAlmostEqual(self, attrs, torch.tensor([[1.0, 0.0]]))
def test_basic_multi_tensor_output(self) -> None:
model = BasicModel_MultiLayer(multi_input_module=True)
model.eval()
inputs = torch.tensor([[0.0, 100.0, 0.0]])
expected = ([[90.0, 100.0, 100.0, 100.0]], [[90.0, 100.0, 100.0, 100.0]])
self._assert_attributions(
model,
model.multi_relu,
inputs,
torch.zeros_like(inputs),
0,
expected,
n_samples=5,
)
def test_basic_multilayer_with_add_args(self) -> None:
model = BasicModel_MultiLayer(inplace=True)
model.eval()
inputs = torch.tensor([[1.0, -20.0, 10.0]])
add_args = torch.ones(1, 3)
baselines = torch.randn(30, 3)
expected = [[-13.9510, 0.0]]
self._assert_attributions(
model, model.linear2, inputs, baselines, 0, expected, add_args=add_args
)
def test_basic_multilayer_compare_w_inp_features(self) -> None:
model = BasicModel_MultiLayer()
model.eval()
inputs = torch.tensor([[10.0, 20.0, 10.0]])
baselines = torch.randn(30, 3)
gs = GradientShap(model)
expected, delta = gs.attribute(
inputs, baselines, target=0, return_convergence_delta=True
)
self.setUp()
self._assert_attributions(
model,
model.linear0,
inputs,
baselines,
0,
expected,
expected_delta=delta,
attribute_to_layer_input=True,
)
def test_classification(self) -> None:
def custom_baseline_fn(inputs):
num_in = inputs.shape[1]
return torch.arange(0.0, num_in * 4.0).reshape(4, num_in)
num_in = 40
n_samples = 10
# 10-class classification model
model = SoftmaxModel(num_in, 20, 10)
model.eval()
inputs = torch.arange(0.0, num_in * 2.0).reshape(2, num_in)
baselines = custom_baseline_fn
expected = torch.zeros(2, 20)
self._assert_attributions(
model, model.relu1, inputs, baselines, 1, expected, n_samples=n_samples
)
def test_basic_multi_input(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inputs = (torch.tensor([[10.0, 20.0, 10.0]]), torch.tensor([[1.0, 2.0, 1.0]]))
add_args = (torch.tensor([[1.0, 2.0, 3.0]]), 1.0)
baselines = (torch.randn(30, 3), torch.randn(30, 3))
expected = torch.tensor([[171.6841, 0.0]])
self._assert_attributions(
net, net.model.linear2, inputs, baselines, 0, expected, add_args=add_args
)
def _assert_attributions(
self,
model: Module,
layer: Module,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: Union[TensorOrTupleOfTensorsGeneric, Callable],
target: TargetType,
expected: Union[
Tensor,
Tuple[Tensor, ...],
List[float],
List[List[float]],
Tuple[List[float], ...],
Tuple[List[List[float]], ...],
],
expected_delta: Tensor = None,
n_samples: int = 5,
attribute_to_layer_input: bool = False,
add_args: Any = None,
) -> None:
lgs = LayerGradientShap(model, layer)
attrs, delta = lgs.attribute(
inputs,
baselines,
target=target,
additional_forward_args=add_args,
n_samples=n_samples,
stdevs=0.0009,
return_convergence_delta=True,
attribute_to_layer_input=attribute_to_layer_input,
)
assertTensorTuplesAlmostEqual(self, attrs, expected, delta=0.005)
if expected_delta is None:
_assert_attribution_delta(
self, inputs, attrs, n_samples, delta, is_layer=True
)
else:
for delta_i, expected_delta_i in zip(delta, expected_delta):
assertTensorAlmostEqual(self, delta_i, expected_delta_i, delta=0.01)
|
#!/usr/bin/env python3
import torch
from captum.attr._core.deep_lift import DeepLift, DeepLiftShap
from captum.attr._core.feature_ablation import FeatureAblation
from captum.attr._core.feature_permutation import FeaturePermutation
from captum.attr._core.gradient_shap import GradientShap
from captum.attr._core.guided_backprop_deconvnet import Deconvolution, GuidedBackprop
from captum.attr._core.guided_grad_cam import GuidedGradCam
from captum.attr._core.input_x_gradient import InputXGradient
from captum.attr._core.integrated_gradients import IntegratedGradients
from captum.attr._core.kernel_shap import KernelShap
from captum.attr._core.layer.grad_cam import LayerGradCam
from captum.attr._core.layer.internal_influence import InternalInfluence
from captum.attr._core.layer.layer_activation import LayerActivation
from captum.attr._core.layer.layer_conductance import LayerConductance
from captum.attr._core.layer.layer_deep_lift import LayerDeepLift, LayerDeepLiftShap
from captum.attr._core.layer.layer_feature_ablation import LayerFeatureAblation
from captum.attr._core.layer.layer_gradient_shap import LayerGradientShap
from captum.attr._core.layer.layer_gradient_x_activation import LayerGradientXActivation
from captum.attr._core.layer.layer_integrated_gradients import LayerIntegratedGradients
from captum.attr._core.layer.layer_lrp import LayerLRP
from captum.attr._core.lime import Lime
from captum.attr._core.lrp import LRP
from captum.attr._core.neuron.neuron_conductance import NeuronConductance
from captum.attr._core.neuron.neuron_deep_lift import NeuronDeepLift, NeuronDeepLiftShap
from captum.attr._core.neuron.neuron_feature_ablation import NeuronFeatureAblation
from captum.attr._core.neuron.neuron_gradient import NeuronGradient
from captum.attr._core.neuron.neuron_gradient_shap import NeuronGradientShap
from captum.attr._core.neuron.neuron_guided_backprop_deconvnet import (
NeuronDeconvolution,
NeuronGuidedBackprop,
)
from captum.attr._core.neuron.neuron_integrated_gradients import (
NeuronIntegratedGradients,
)
from captum.attr._core.occlusion import Occlusion
from captum.attr._core.saliency import Saliency
from captum.attr._core.shapley_value import ShapleyValueSampling
from captum.attr._utils.input_layer_wrapper import ModelInputWrapper
from tests.helpers.basic import set_all_random_seeds
from tests.helpers.basic_models import (
BasicModel_ConvNet,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
BasicModel_MultiLayer_TrueMultiInput,
ReLULinearModel,
)
"""
This file defines a test configuration for attribution methods, particularly
defining valid input parameters for attribution methods. These test cases are
utilized for DataParallel tests, JIT tests, and target tests. Generally, these
tests follow a consistent structure of running the identified algorithm(s) in
two different way, e.g. with a DataParallel or JIT wrapped model versus a standard
model and verifying that the results match. New tests for additional model variants
or features can be built using this config.
The current schema for each test cases (each element in the list config) includes
the following information:
* "name": String defining name for test config
* "algorithms": List of algorithms (Attribution classes) which are applicable for
the given test case
* "model": nn.Module model for given test
* "attribute_args": Arguments to be passed to attribute call of algorithm
* "layer": nn.Module corresponding to layer for Layer or Neuron attribution
* "noise_tunnel": True or False, based on whether to apply NoiseTunnel to the algorithm.
If True, "attribute_args" corresponds to arguments for NoiseTunnel.attribute.
* "baseline_distr": True or False based on whether baselines in "attribute_args" are
provided as a distribution or per-example.
* "target_delta": Delta for comparison in test_targets
* "dp_delta": Delta for comparison in test_data_parallel
To add tests for a new algorithm, simply add the algorithm to any existing test
case with applicable parameters by adding the algorithm to the corresponding
algorithms list. If the algorithm has particular arguments not covered by existing
test cases, add a new test case following the config schema described above. For
targets tests, ensure that the new test cases includes cases with tensor or list
targets. If the new algorithm works with JIT models, make sure to also
add the method to the whitelist in test_jit.
To create new tests for all methods, follow the same structure as test_jit,
test_targets, or test_data_parallel. Each of these iterates through the test
config and creates relevant test cases based on the config.
"""
# Set random seeds to ensure deterministic behavior
set_all_random_seeds(1234)
config = [
# Attribution Method Configs
# Primary Methods (Generic Configs)
{
"name": "basic_single_target",
"algorithms": [
IntegratedGradients,
InputXGradient,
FeatureAblation,
DeepLift,
Saliency,
GuidedBackprop,
Deconvolution,
ShapleyValueSampling,
FeaturePermutation,
Lime,
KernelShap,
LRP,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {"inputs": torch.randn(4, 3), "target": 1},
},
{
"name": "basic_multi_input",
"algorithms": [
IntegratedGradients,
InputXGradient,
FeatureAblation,
DeepLift,
Saliency,
GuidedBackprop,
Deconvolution,
ShapleyValueSampling,
FeaturePermutation,
Lime,
KernelShap,
LRP,
],
"model": BasicModel_MultiLayer_MultiInput(),
"attribute_args": {
"inputs": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"additional_forward_args": (2 * torch.randn(12, 3), 5),
"target": 0,
},
"dp_delta": 0.001,
},
{
"name": "basic_multi_target",
"algorithms": [
IntegratedGradients,
InputXGradient,
FeatureAblation,
DeepLift,
Saliency,
GuidedBackprop,
Deconvolution,
ShapleyValueSampling,
FeaturePermutation,
Lime,
KernelShap,
LRP,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {"inputs": torch.randn(4, 3), "target": [0, 1, 1, 0]},
},
{
"name": "basic_multi_input_multi_target",
"algorithms": [
IntegratedGradients,
InputXGradient,
FeatureAblation,
DeepLift,
Saliency,
GuidedBackprop,
Deconvolution,
ShapleyValueSampling,
FeaturePermutation,
Lime,
KernelShap,
LRP,
],
"model": BasicModel_MultiLayer_MultiInput(),
"attribute_args": {
"inputs": (10 * torch.randn(6, 3), 5 * torch.randn(6, 3)),
"additional_forward_args": (2 * torch.randn(6, 3), 5),
"target": [0, 1, 1, 0, 0, 1],
},
"dp_delta": 0.0005,
},
{
"name": "basic_multiple_tuple_target",
"algorithms": [
IntegratedGradients,
Saliency,
InputXGradient,
FeatureAblation,
DeepLift,
GuidedBackprop,
Deconvolution,
ShapleyValueSampling,
FeaturePermutation,
Lime,
KernelShap,
LRP,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
},
},
{
"name": "basic_tensor_single_target",
"algorithms": [
IntegratedGradients,
Saliency,
InputXGradient,
FeatureAblation,
DeepLift,
GuidedBackprop,
Deconvolution,
ShapleyValueSampling,
FeaturePermutation,
Lime,
KernelShap,
LRP,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {"inputs": torch.randn(4, 3), "target": torch.tensor([0])},
},
{
"name": "basic_tensor_multi_target",
"algorithms": [
IntegratedGradients,
Saliency,
InputXGradient,
FeatureAblation,
DeepLift,
GuidedBackprop,
Deconvolution,
ShapleyValueSampling,
FeaturePermutation,
Lime,
KernelShap,
LRP,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": torch.tensor([1, 1, 0, 0]),
},
},
# Primary Configs with Baselines
{
"name": "basic_multiple_tuple_target_with_baselines",
"algorithms": [
IntegratedGradients,
FeatureAblation,
DeepLift,
ShapleyValueSampling,
Lime,
KernelShap,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
},
},
{
"name": "basic_tensor_single_target_with_baselines",
"algorithms": [
IntegratedGradients,
FeatureAblation,
DeepLift,
ShapleyValueSampling,
Lime,
KernelShap,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(4, 3),
"target": torch.tensor([0]),
},
},
# Primary Configs with Internal Batching
{
"name": "basic_multiple_tuple_target_with_internal_batching",
"algorithms": [IntegratedGradients],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
"internal_batch_size": 2,
},
},
# NoiseTunnel
{
"name": "basic_multi_input_multi_target_nt",
"algorithms": [
IntegratedGradients,
InputXGradient,
FeatureAblation,
DeepLift,
Saliency,
GuidedBackprop,
Deconvolution,
LRP,
],
"model": BasicModel_MultiLayer_MultiInput(),
"attribute_args": {
"inputs": (10 * torch.randn(6, 3), 5 * torch.randn(6, 3)),
"additional_forward_args": (2 * torch.randn(6, 3), 5),
"target": [0, 1, 1, 0, 0, 1],
"nt_samples": 20,
"stdevs": 0.0,
},
"noise_tunnel": True,
"dp_delta": 0.01,
},
{
"name": "basic_multiple_target_with_baseline_nt",
"algorithms": [
IntegratedGradients,
Saliency,
InputXGradient,
FeatureAblation,
DeepLift,
GuidedBackprop,
Deconvolution,
LRP,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [0, 1, 1, 0],
"nt_samples": 20,
"stdevs": 0.0,
},
"noise_tunnel": True,
},
{
"name": "basic_multiple_tuple_target_nt",
"algorithms": [
IntegratedGradients,
Saliency,
InputXGradient,
FeatureAblation,
DeepLift,
GuidedBackprop,
Deconvolution,
LRP,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
"nt_samples": 20,
"stdevs": 0.0,
},
"noise_tunnel": True,
},
{
"name": "basic_single_tensor_target_nt",
"algorithms": [
IntegratedGradients,
Saliency,
InputXGradient,
FeatureAblation,
DeepLift,
GuidedBackprop,
Deconvolution,
LRP,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": torch.tensor([0]),
"nt_samples": 20,
"stdevs": 0.0,
},
"noise_tunnel": True,
},
{
"name": "basic_multi_tensor_target_nt",
"algorithms": [
IntegratedGradients,
Saliency,
InputXGradient,
FeatureAblation,
DeepLift,
GuidedBackprop,
Deconvolution,
LRP,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": torch.tensor([0, 1, 1, 0]),
"nt_samples": 20,
"stdevs": 0.0,
},
"noise_tunnel": True,
},
{
"name": "basic_multi_tensor_target_batched_nt",
"algorithms": [
IntegratedGradients,
Saliency,
InputXGradient,
FeatureAblation,
DeepLift,
GuidedBackprop,
Deconvolution,
LRP,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": torch.tensor([0, 1, 1, 0]),
"nt_samples": 20,
"nt_samples_batch_size": 2,
"stdevs": 0.0,
},
"noise_tunnel": True,
},
# DeepLift SHAP
{
"name": "basic_dl_shap",
"algorithms": [DeepLiftShap],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(6, 3),
"target": 0,
},
"baseline_distr": True,
},
{
"name": "basic_multi_input_dl_shap",
"algorithms": [DeepLiftShap],
"model": BasicModel_MultiLayer_MultiInput(),
"attribute_args": {
"inputs": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"baselines": (torch.randn(4, 3), torch.randn(4, 3)),
"additional_forward_args": (2 * torch.randn(12, 3), 5),
"target": 0,
},
"baseline_distr": True,
},
{
"name": "basic_multiple_target_dl_shap",
"algorithms": [DeepLiftShap],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(4, 3),
"target": [0, 1, 1, 0],
},
"baseline_distr": True,
},
{
"name": "basic_multiple_tuple_target_dl_shap",
"algorithms": [DeepLiftShap],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
},
"baseline_distr": True,
},
{
"name": "basic_single_tensor_targe_dl_shap",
"algorithms": [DeepLiftShap],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(4, 3),
"target": torch.tensor([0]),
},
"baseline_distr": True,
},
{
"name": "basic_multi_tensor_target_dl_shap",
"algorithms": [DeepLiftShap],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(4, 3),
"target": torch.tensor([0, 1, 1, 0]),
},
"baseline_distr": True,
},
# Gradient SHAP
{
"name": "basic_multi_inp_with_single_baseline_grad_shap",
"algorithms": [GradientShap],
"model": BasicModel_MultiLayer_MultiInput(),
"attribute_args": {
"inputs": (torch.randn(6, 3), torch.randn(6, 3)),
"baselines": (torch.randn(1, 3), torch.randn(1, 3)),
"additional_forward_args": (torch.randn(6, 3), 5),
"target": [0, 1, 1, 0, 0, 1],
"stdevs": 0.0,
"n_samples": 2000,
},
"target_delta": 1.0,
"dp_delta": 0.005,
"baseline_distr": True,
},
{
"name": "basic_multiple_target_with_single_baseline_grad_shap",
"algorithms": [GradientShap],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(1, 3),
"target": [0, 1, 1, 0],
"n_samples": 800,
"stdevs": 0.0,
},
"target_delta": 0.6,
"baseline_distr": True,
},
{
"name": "basic_multiple_tuple_target_with_single_baseline_grad_shap",
"algorithms": [GradientShap],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.15 * torch.randn(1, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
"n_samples": 2000,
"stdevs": 0.0,
},
"target_delta": 0.6,
"dp_delta": 0.003,
"baseline_distr": True,
},
{
"name": "basic_single_tensor_target_with_single_baseline_grad_shap",
"algorithms": [GradientShap],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(1, 3),
"target": torch.tensor([0]),
"n_samples": 500,
"stdevs": 0.0,
},
"target_delta": 0.6,
"baseline_distr": True,
},
{
"name": "basic_multi_tensor_target_with_single_baseline_grad_shap",
"algorithms": [GradientShap],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(1, 3),
"target": torch.tensor([0, 1, 1, 0]),
"n_samples": 500,
"stdevs": 0.0,
},
"target_delta": 0.6,
"baseline_distr": True,
},
# Perturbation-Specific Configs
{
"name": "conv_with_perturbations_per_eval",
"algorithms": [
FeatureAblation,
ShapleyValueSampling,
FeaturePermutation,
Lime,
KernelShap,
],
"model": BasicModel_ConvNet(),
"attribute_args": {
"inputs": torch.arange(400).view(4, 1, 10, 10).float(),
"target": 0,
"perturbations_per_eval": 20,
},
"dp_delta": 0.008,
},
{
"name": "basic_multiple_tuple_target_with_perturbations_per_eval",
"algorithms": [
FeatureAblation,
ShapleyValueSampling,
FeaturePermutation,
Lime,
KernelShap,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
"perturbations_per_eval": 2,
},
},
{
"name": "conv_occlusion_with_perturbations_per_eval",
"algorithms": [Occlusion],
"model": BasicModel_ConvNet(),
"attribute_args": {
"inputs": torch.arange(400).view(4, 1, 10, 10).float(),
"perturbations_per_eval": 8,
"sliding_window_shapes": (1, 4, 2),
"target": 0,
},
},
{
"name": "basic_multi_input_with_perturbations_per_eval_occlusion",
"algorithms": [Occlusion],
"model": ReLULinearModel(),
"attribute_args": {
"inputs": (torch.randn(4, 3), torch.randn(4, 3)),
"perturbations_per_eval": 2,
"sliding_window_shapes": ((2,), (1,)),
},
},
{
"name": "basic_multiple_tuple_target_occlusion",
"algorithms": [Occlusion],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
"sliding_window_shapes": (2,),
},
},
# Layer Attribution Method Configs
{
"name": "conv_layer_single_target",
"algorithms": [
LayerConductance,
LayerIntegratedGradients,
LayerDeepLift,
InternalInfluence,
LayerFeatureAblation,
LayerGradientXActivation,
LayerGradCam,
GuidedGradCam,
],
"model": BasicModel_ConvNet(),
"layer": "conv2",
"attribute_args": {"inputs": 100 * torch.randn(4, 1, 10, 10), "target": 1},
},
{
"name": "basic_layer_in_place",
"algorithms": [
LayerConductance,
LayerIntegratedGradients,
LayerDeepLift,
InternalInfluence,
LayerFeatureAblation,
LayerGradientXActivation,
LayerGradCam,
],
"model": BasicModel_MultiLayer(inplace=True),
"layer": "relu",
"attribute_args": {"inputs": torch.randn(4, 3), "target": 0},
},
{
"name": "basic_layer_multi_output",
"algorithms": [
LayerConductance,
LayerIntegratedGradients,
LayerDeepLift,
InternalInfluence,
LayerFeatureAblation,
LayerGradientXActivation,
LayerGradCam,
],
"model": BasicModel_MultiLayer(multi_input_module=True),
"layer": "multi_relu",
"attribute_args": {"inputs": torch.randn(4, 3), "target": 0},
},
{
"name": "basic_layer_multi_input",
"algorithms": [
LayerConductance,
LayerIntegratedGradients,
LayerDeepLift,
InternalInfluence,
LayerFeatureAblation,
LayerGradientXActivation,
LayerGradCam,
],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"additional_forward_args": (2 * torch.randn(12, 3), 5),
"target": 0,
},
},
{
"name": "basic_layer_multiple_target",
"algorithms": [
LayerConductance,
LayerIntegratedGradients,
LayerDeepLift,
InternalInfluence,
LayerFeatureAblation,
LayerGradientXActivation,
LayerGradCam,
],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {"inputs": torch.randn(4, 3), "target": [0, 1, 1, 0]},
},
{
"name": "basic_layer_tensor_multiple_target",
"algorithms": [
LayerConductance,
LayerIntegratedGradients,
LayerDeepLift,
InternalInfluence,
LayerFeatureAblation,
LayerGradientXActivation,
LayerGradCam,
],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": torch.tensor([0, 1, 1, 0]),
},
},
{
"name": "basic_layer_multiple_tuple_target",
"algorithms": [
LayerConductance,
LayerIntegratedGradients,
LayerDeepLift,
InternalInfluence,
LayerFeatureAblation,
LayerGradientXActivation,
LayerGradCam,
],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
},
},
{
"name": "basic_layer_multiple_tuple_target_with_internal_batching",
"algorithms": [LayerConductance, InternalInfluence, LayerIntegratedGradients],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
"internal_batch_size": 2,
},
},
{
"name": "basic_layer_multi_input_with_internal_batching",
"algorithms": [LayerConductance, InternalInfluence, LayerIntegratedGradients],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"additional_forward_args": (2 * torch.randn(12, 3), 5),
"target": 0,
"internal_batch_size": 2,
},
},
{
"name": "basic_layer_multi_output_with_internal_batching",
"algorithms": [LayerConductance, InternalInfluence, LayerIntegratedGradients],
"model": BasicModel_MultiLayer(multi_input_module=True),
"layer": "multi_relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": 0,
"internal_batch_size": 2,
},
},
# Layer Perturbation
{
"name": "basic_layer_multi_input_with_perturbations_per_eval",
"algorithms": [LayerFeatureAblation],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"additional_forward_args": (2 * torch.randn(12, 3), 5),
"target": 0,
"perturbations_per_eval": 2,
},
},
{
"name": "basic_layer_multi_output_perturbations_per_eval",
"algorithms": [LayerFeatureAblation],
"model": BasicModel_MultiLayer(multi_input_module=True),
"layer": "multi_relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": 0,
"perturbations_per_eval": 2,
},
},
{
"name": "conv_layer_with_perturbations_per_eval",
"algorithms": [LayerFeatureAblation],
"model": BasicModel_ConvNet(),
"layer": "conv2",
"attribute_args": {
"inputs": 100 * torch.randn(4, 1, 10, 10),
"target": 1,
"perturbations_per_eval": 20,
},
},
# Layer DeepLiftSHAP
{
"name": "relu_layer_multi_inp_dl_shap",
"algorithms": [LayerDeepLiftShap],
"model": ReLULinearModel(),
"layer": "l3",
"attribute_args": {
"inputs": (10 * torch.randn(6, 3), 5 * torch.randn(6, 3)),
"baselines": (2 * torch.randn(2, 3), 6 * torch.randn(2, 3)),
},
"baseline_distr": True,
},
{
"name": "basic_layer_multi_output_dl_shap",
"algorithms": [LayerDeepLiftShap],
"model": BasicModel_MultiLayer(multi_input_module=True),
"layer": "multi_relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": torch.randn(2, 3),
"target": 0,
},
"baseline_distr": True,
},
{
"name": "basic_layer_multi_inp_multi_target_dl_shap",
"algorithms": [LayerDeepLiftShap],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(6, 3), 5 * torch.randn(6, 3)),
"baselines": (2 * torch.randn(11, 3), 6 * torch.randn(11, 3)),
"additional_forward_args": (2 * torch.randn(6, 3), 5),
"target": [0, 1, 1, 0, 0, 1],
},
"baseline_distr": True,
},
{
"name": "basic_layer_multiple_target_dl_shap",
"algorithms": [LayerDeepLiftShap],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(6, 3),
"target": [0, 1, 1, 0],
},
"baseline_distr": True,
},
# Layer Gradient SHAP
{
"name": "relu_layer_multi_inp_grad_shap",
"algorithms": [LayerGradientShap],
"model": ReLULinearModel(),
"layer": "l3",
"attribute_args": {
"inputs": (10 * torch.randn(6, 3), 5 * torch.randn(6, 3)),
"baselines": (2 * torch.randn(2, 3), 6 * torch.randn(2, 3)),
},
"baseline_distr": True,
},
{
"name": "basic_layer_multi_output_grad_shap",
"algorithms": [LayerGradientShap],
"model": BasicModel_MultiLayer(multi_input_module=True),
"layer": "multi_relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": torch.randn(2, 3),
"target": 0,
},
"baseline_distr": True,
},
{
"name": "basic_layer_multi_inp_multi_target_grad_shap",
"algorithms": [LayerGradientShap],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (torch.randn(6, 3), torch.randn(6, 3)),
"baselines": (torch.randn(2, 3), torch.randn(2, 3)),
"additional_forward_args": (2 * torch.randn(6, 3), 5),
"target": [0, 1, 1, 0, 0, 1],
"n_samples": 1000,
},
"baseline_distr": True,
"target_delta": 0.6,
},
# Neuron Attribution Method Configs
{
"name": "basic_neuron",
"algorithms": [
NeuronGradient,
NeuronIntegratedGradients,
NeuronGuidedBackprop,
NeuronDeconvolution,
NeuronDeepLift,
NeuronFeatureAblation,
],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {"inputs": torch.randn(4, 3), "neuron_selector": 3},
},
{
"name": "conv_neuron",
"algorithms": [
NeuronGradient,
NeuronIntegratedGradients,
NeuronGuidedBackprop,
NeuronDeconvolution,
NeuronDeepLift,
NeuronFeatureAblation,
],
"model": BasicModel_ConvNet(),
"layer": "conv2",
"attribute_args": {
"inputs": 100 * torch.randn(4, 1, 10, 10),
"neuron_selector": (0, 1, 0),
},
},
{
"name": "basic_neuron_multi_input",
"algorithms": [
NeuronGradient,
NeuronIntegratedGradients,
NeuronGuidedBackprop,
NeuronDeconvolution,
NeuronDeepLift,
NeuronFeatureAblation,
],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"additional_forward_args": (2 * torch.randn(12, 3), 5),
"neuron_selector": (3,),
},
},
# Neuron Conductance (with target)
{
"name": "basic_neuron_single_target",
"algorithms": [NeuronConductance],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": 1,
"neuron_selector": 3,
},
},
{
"name": "basic_neuron_multiple_target",
"algorithms": [NeuronConductance],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [0, 1, 1, 0],
"neuron_selector": 3,
},
},
{
"name": "conv_neuron_single_target",
"algorithms": [NeuronConductance],
"model": BasicModel_ConvNet(),
"layer": "conv2",
"attribute_args": {
"inputs": 100 * torch.randn(4, 1, 10, 10),
"target": 1,
"neuron_selector": (0, 1, 0),
},
},
{
"name": "basic_neuron_multi_input_multi_target",
"algorithms": [NeuronConductance],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(6, 3), 5 * torch.randn(6, 3)),
"additional_forward_args": (2 * torch.randn(6, 3), 5),
"target": [0, 1, 1, 0, 0, 1],
"neuron_selector": 3,
},
},
{
"name": "basic_neuron_tensor_multiple_target",
"algorithms": [NeuronConductance],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": torch.tensor([0, 1, 1, 0]),
"neuron_selector": 3,
},
},
{
"name": "basic_neuron_multiple_tuple_target",
"algorithms": [NeuronConductance],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
"neuron_selector": 3,
},
},
# Neuron Conductance with Internal Batching
{
"name": "basic_neuron_multiple_tuple_target_with_internal_batching",
"algorithms": [NeuronConductance],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
"internal_batch_size": 2,
"neuron_selector": 3,
},
},
{
"name": "basic_neuron_multi_input_multi_target_with_internal_batching",
"algorithms": [NeuronConductance],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(6, 3), 5 * torch.randn(6, 3)),
"additional_forward_args": (2 * torch.randn(6, 3), 5),
"target": [0, 1, 1, 0, 0, 1],
"internal_batch_size": 2,
"neuron_selector": 3,
},
},
# Neuron Gradient SHAP
{
"name": "basic_neuron_grad_shap",
"algorithms": [NeuronGradientShap],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": torch.randn(1, 3),
"neuron_selector": 3,
},
"target_delta": 0.6,
"baseline_distr": True,
},
{
"name": "basic_neuron_multi_inp_grad_shap",
"algorithms": [NeuronGradientShap],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(6, 3), 5 * torch.randn(6, 3)),
"baselines": (10 * torch.randn(1, 3), 5 * torch.randn(1, 3)),
"additional_forward_args": (2 * torch.randn(6, 3), 5),
"neuron_selector": 3,
},
"target_delta": 0.6,
"baseline_distr": True,
},
# Neuron DeepLift SHAP
{
"name": "basic_neuron_dl_shap",
"algorithms": [NeuronDeepLiftShap],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(6, 3),
"neuron_selector": (3,),
},
"baseline_distr": True,
},
{
"name": "basic_neuron_multi_input_dl_shap",
"algorithms": [NeuronDeepLiftShap],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"baselines": (torch.randn(4, 3), torch.randn(4, 3)),
"additional_forward_args": (2 * torch.randn(12, 3), 5),
"neuron_selector": 3,
},
"baseline_distr": True,
},
# Neuron Feature Ablation
{
"name": "conv_neuron_with_perturbations_per_eval",
"algorithms": [NeuronFeatureAblation],
"model": BasicModel_ConvNet(),
"layer": "conv2",
"attribute_args": {
"inputs": torch.arange(400).view(4, 1, 10, 10).float(),
"perturbations_per_eval": 20,
"neuron_selector": (0, 1, 0),
},
},
{
"name": "basic_neuron_multiple_input_with_baselines_and_perturbations_per_eval",
"algorithms": [NeuronFeatureAblation],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"baselines": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"additional_forward_args": (2 * torch.randn(12, 3), 5),
"neuron_selector": (3,),
"perturbations_per_eval": 2,
},
},
# Neuron Attribution with Functional Selector
{
"name": "basic_neuron_multi_input_function_selector",
"algorithms": [
NeuronGradient,
NeuronIntegratedGradients,
NeuronGuidedBackprop,
NeuronDeconvolution,
NeuronDeepLift,
NeuronFeatureAblation,
],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"additional_forward_args": (2 * torch.randn(12, 3), 5),
"neuron_selector": lambda x: torch.sum(x, 1),
},
},
# Neuron Attribution with slice Selector
{
"name": "conv_neuron_slice_selector",
"algorithms": [
NeuronGradient,
NeuronIntegratedGradients,
NeuronGuidedBackprop,
NeuronDeconvolution,
NeuronDeepLift,
NeuronFeatureAblation,
],
"model": BasicModel_ConvNet(),
"layer": "conv2",
"attribute_args": {
"inputs": 100 * torch.randn(4, 1, 10, 10),
"neuron_selector": (slice(0, 2, 1), 1, slice(0, 2, 1)),
},
},
# Layer Attribution with Multiple Layers
{
"name": "basic_activation_multi_layer_multi_output",
"algorithms": [LayerActivation],
"model": BasicModel_MultiLayer(multi_input_module=True),
"layer": ["multi_relu", "linear1", "linear0"],
"attribute_args": {"inputs": torch.randn(4, 3)},
},
{
"name": "basic_gradient_multi_layer_multi_output",
"algorithms": [LayerGradientXActivation],
"model": BasicModel_MultiLayer(multi_input_module=True),
"layer": ["multi_relu", "linear1", "linear0"],
"attribute_args": {"inputs": torch.randn(4, 3), "target": 0},
},
{
"name": "basic_layer_ig_multi_layer_multi_output",
"algorithms": [LayerIntegratedGradients],
"model": BasicModel_MultiLayer_TrueMultiInput(),
"layer": ["m1", "m234"],
"attribute_args": {
"inputs": (
torch.randn(5, 3),
torch.randn(5, 3),
torch.randn(5, 3),
torch.randn(5, 3),
),
"target": 0,
},
},
{
"name": "basic_layer_ig_multi_layer_multi_output_with_input_wrapper",
"algorithms": [LayerIntegratedGradients],
"model": ModelInputWrapper(BasicModel_MultiLayer_TrueMultiInput()),
"layer": ["module.m1", "module.m234"],
"attribute_args": {
"inputs": (
torch.randn(5, 3),
torch.randn(5, 3),
torch.randn(5, 3),
torch.randn(5, 3),
),
"target": 0,
},
},
# Layer LRP
{
"name": "basic_layer_lrp",
"algorithms": [
LayerLRP,
],
"model": BasicModel_MultiLayer(),
"layer": "linear2",
"attribute_args": {"inputs": torch.randn(4, 3), "target": 0},
},
{
"name": "basic_layer_lrp_multi_input",
"algorithms": [
LayerLRP,
],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.linear1",
"attribute_args": {
"inputs": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"additional_forward_args": (2 * torch.randn(12, 3), 5),
"target": 0,
},
"dp_delta": 0.0002,
},
]
|
#!/usr/bin/env python3
import typing
from typing import Any, cast, Dict, List, Tuple, Type, Union
from captum.attr._core.lime import Lime
from captum.attr._models.base import _get_deep_layer_name
from captum.attr._utils.attribution import Attribution
from torch.nn import Module
def gen_test_name(
prefix: str, test_name: str, algorithm: Type[Attribution], noise_tunnel: bool
) -> str:
# Generates test name for dynamically generated tests
return (
prefix
+ "_"
+ test_name
+ "_"
+ algorithm.__name__
+ ("NoiseTunnel" if noise_tunnel else "")
)
def parse_test_config(
test_config: Dict,
) -> Tuple[List[Type[Attribution]], Module, Dict[str, Any], Module, bool, bool]:
algorithms = cast(List[Type[Attribution]], test_config["algorithms"])
model = test_config["model"]
args = cast(Dict[str, Any], test_config["attribute_args"])
layer = test_config["layer"] if "layer" in test_config else None
noise_tunnel = (
test_config["noise_tunnel"] if "noise_tunnel" in test_config else False
)
baseline_distr = (
test_config["baseline_distr"] if "baseline_distr" in test_config else False
)
return algorithms, model, args, layer, noise_tunnel, baseline_distr
def should_create_generated_test(algorithm: Type[Attribution]) -> bool:
if issubclass(algorithm, Lime):
try:
import sklearn # noqa: F401
assert (
sklearn.__version__ >= "0.23.0"
), "Must have sklearn version 0.23.0 or higher to use "
"sample_weight in Lasso regression."
return True
except (ImportError, AssertionError):
return False
return True
@typing.overload
def get_target_layer(model: Module, layer_name: str) -> Module:
...
@typing.overload
def get_target_layer(model: Module, layer_name: List[str]) -> List[Module]:
...
def get_target_layer(
model: Module, layer_name: Union[str, List[str]]
) -> Union[Module, List[Module]]:
if isinstance(layer_name, str):
return _get_deep_layer_name(model, layer_name)
else:
return [
_get_deep_layer_name(model, single_layer_name)
for single_layer_name in layer_name
]
|
#!/usr/bin/env python3
import numpy as np
import torch
from captum._utils.gradient import (
apply_gradient_requirements,
undo_gradient_requirements,
)
from captum.attr._utils.approximation_methods import approximation_parameters
from captum.attr._utils.attribution import LayerAttribution
from captum.attr._utils.common import _reshape_and_sum
"""
Note: This implementation of conductance follows the procedure described in the original
paper exactly (https://arxiv.org/abs/1805.12233), computing gradients of output with
respect to hidden neurons and each hidden neuron with respect to the input and summing
appropriately. Computing the gradient of each neuron with respect to the input is
not necessary to just compute the conductance of a given layer, so the main
implementationof conductance does not use this approach in order to compute layer
conductance more efficiently (https://arxiv.org/pdf/1807.09946.pdf).
This implementation is used only for testing to verify that the output matches
that of the main implementation.
"""
class ConductanceReference(LayerAttribution):
def __init__(self, forward_func, layer) -> None:
r"""
Args
forward_func: The forward function of the model or any modification of it
layer: Layer for which output attributions are computed.
Output size of attribute matches that of layer output.
"""
super().__init__(forward_func, layer)
def _conductance_grads(self, forward_fn, input, target_ind=None):
with torch.autograd.set_grad_enabled(True):
# Set a forward hook on specified module and run forward pass to
# get output tensor size.
saved_tensor = None
def forward_hook(module, inp, out):
nonlocal saved_tensor
saved_tensor = out
hook = self.layer.register_forward_hook(forward_hook)
output = forward_fn(input)
# Compute layer output tensor dimensions and total number of units.
# The hidden layer tensor is assumed to have dimension (num_hidden, ...)
# where the product of the dimensions >= 1 correspond to the total
# number of hidden neurons in the layer.
layer_size = tuple(saved_tensor.size())[1:]
layer_units = int(np.prod(layer_size))
# Remove unnecessary forward hook.
hook.remove()
# Backward hook function to override gradients in order to obtain
# just the gradient of each hidden unit with respect to input.
saved_grads = None
def backward_hook(grads):
nonlocal saved_grads
saved_grads = grads
zero_mat = torch.zeros((1,) + layer_size)
scatter_indices = torch.arange(0, layer_units).view_as(zero_mat)
# Creates matrix with each layer containing a single unit with
# value 1 and remaining zeros, which will provide gradients
# with respect to each unit independently.
to_return = torch.zeros((layer_units,) + layer_size).scatter(
0, scatter_indices, 1
)
to_repeat = [1] * len(to_return.shape)
to_repeat[0] = grads.shape[0] // to_return.shape[0]
expanded = to_return.repeat(to_repeat)
return expanded
# Create a forward hook in order to attach backward hook to appropriate
# tensor. Save backward hook in order to remove hook appropriately.
back_hook = None
def forward_hook_register_back(module, inp, out):
nonlocal back_hook
back_hook = out.register_hook(backward_hook)
hook = self.layer.register_forward_hook(forward_hook_register_back)
# Expand input to include layer_units copies of each input.
# This allows obtaining gradient with respect to each hidden unit
# in one pass.
expanded_input = torch.repeat_interleave(input, layer_units, dim=0)
output = forward_fn(expanded_input)
hook.remove()
output = output[:, target_ind] if target_ind is not None else output
input_grads = torch.autograd.grad(torch.unbind(output), expanded_input)
# Remove backwards hook
back_hook.remove()
# Remove duplicates in gradient with respect to hidden layer,
# choose one for each layer_units indices.
output_mid_grads = torch.index_select(
saved_grads,
0,
torch.tensor(range(0, input_grads[0].shape[0], layer_units)),
)
return input_grads[0], output_mid_grads, layer_units
def attribute(
self,
inputs,
baselines=None,
target=None,
n_steps=500,
method="riemann_trapezoid",
):
r"""
Computes conductance using gradients along the path, applying
riemann's method or gauss-legendre.
The details of the approach can be found here:
https://arxiv.org/abs/1805.12233
Args
inputs: A single high dimensional input tensor, in which
dimension 0 corresponds to number of examples.
baselines: A single high dimensional baseline tensor,
which has the same shape as the input
target: Predicted class index. This is necessary only for
classification use cases
n_steps: The number of steps used by the approximation method
method: Method for integral approximation, one of `riemann_right`,
`riemann_middle`, `riemann_trapezoid` or `gausslegendre`
Return
attributions: Total conductance with respect to each neuron in
output of given layer
"""
if baselines is None:
baselines = 0
gradient_mask = apply_gradient_requirements((inputs,))
# retrieve step size and scaling factor for specified approximation method
step_sizes_func, alphas_func = approximation_parameters(method)
step_sizes, alphas = step_sizes_func(n_steps), alphas_func(n_steps)
# compute scaled inputs from baseline to final input.
scaled_features = torch.cat(
[baselines + alpha * (inputs - baselines) for alpha in alphas], dim=0
)
# Conductance Gradients - Returns gradient of output with respect to
# hidden layer, gradient of hidden layer with respect to input,
# and number of hidden units.
input_gradients, mid_layer_gradients, hidden_units = self._conductance_grads(
self.forward_func, scaled_features, target
)
# Multiply gradient of hidden layer with respect to input by input - baseline
scaled_input_gradients = torch.repeat_interleave(
inputs - baselines, hidden_units, dim=0
)
scaled_input_gradients = input_gradients * scaled_input_gradients.repeat(
*([len(alphas)] + [1] * (len(scaled_input_gradients.shape) - 1))
)
# Sum gradients for each input neuron in order to have total
# for each hidden unit and reshape to match hidden layer shape
summed_input_grads = torch.sum(
scaled_input_gradients, tuple(range(1, len(scaled_input_gradients.shape)))
).view_as(mid_layer_gradients)
# Rescale gradients of hidden layer by by step size.
scaled_grads = mid_layer_gradients.contiguous().view(
n_steps, -1
) * torch.tensor(step_sizes).view(n_steps, 1).to(mid_layer_gradients.device)
undo_gradient_requirements((inputs,), gradient_mask)
# Element-wise mutliply gradient of output with respect to hidden layer
# and summed gradients with respect to input (chain rule) and sum across
# stepped inputs.
return _reshape_and_sum(
scaled_grads.view(mid_layer_gradients.shape) * summed_input_grads,
n_steps,
inputs.shape[0],
mid_layer_gradients.shape[1:],
)
|
#!/usr/bin/env python3import
from typing import cast, Iterable
import torch
from captum.concept._core.concept import Concept
from captum.concept._utils.data_iterator import dataset_to_dataloader
from tests.helpers.basic import BaseTest
from torch.utils.data import IterableDataset
class CustomIterableDataset(IterableDataset):
r"""
An auxiliary class for iterating through an image dataset.
"""
def __init__(self, get_tensor_from_filename_func, path) -> None:
r"""
Args:
path (str): Path to dataset files
"""
self.path = path
self.file_itr = ["x"] * 2
self.get_tensor_from_filename_func = get_tensor_from_filename_func
def get_tensor_from_filename(self, filename):
return self.get_tensor_from_filename_func(filename)
def __iter__(self):
mapped_itr = map(self.get_tensor_from_filename, self.file_itr)
return mapped_itr
class Test(BaseTest):
def test_create_concepts_from_images(self) -> None:
def get_tensor_from_filename(filename):
return torch.rand(3, 224, 224)
# Striped
concepts_path = "./dummy/concepts/striped/"
dataset = CustomIterableDataset(get_tensor_from_filename, concepts_path)
striped_iter = dataset_to_dataloader(dataset)
self.assertEqual(
len(cast(CustomIterableDataset, striped_iter.dataset).file_itr), 2
)
concept = Concept(id=0, name="striped", data_iter=striped_iter)
for data in cast(Iterable, concept.data_iter):
self.assertEqual(data.shape[1:], torch.Size([3, 224, 224]))
# Random
concepts_path = "./dummy/concepts/random/"
dataset = CustomIterableDataset(get_tensor_from_filename, concepts_path)
random_iter = dataset_to_dataloader(dataset)
self.assertEqual(
len(cast(CustomIterableDataset, random_iter.dataset).file_itr), 2
)
concept = Concept(id=1, name="random", data_iter=random_iter)
for data in cast(Iterable, concept.data_iter):
self.assertEqual(data.shape[1:], torch.Size([3, 224, 224]))
|
#!/usr/bin/env python3import
import glob
import os
import tempfile
import unittest
from collections import defaultdict, OrderedDict
from typing import (
Any,
Callable,
cast,
Dict,
Iterable,
Iterator,
List,
Set,
Tuple,
Union,
)
import torch
from captum._utils.av import AV
from captum._utils.common import _get_module_from_name
from captum.concept._core.concept import Concept
from captum.concept._core.tcav import TCAV
from captum.concept._utils.classifier import Classifier
from captum.concept._utils.common import concepts_to_str
from captum.concept._utils.data_iterator import dataset_to_dataloader
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModel_ConvNet
from torch import Tensor
from torch.utils.data import DataLoader, IterableDataset
class CustomClassifier(Classifier):
r"""
Wrapps a custom linear Classifier that is necessary for the
impementation of Concept Activation Vectors (TCAVs), as described
in the paper:
https://arxiv.org/pdf/1711.11279.pdf
This class simulates the output of a Linear Classifier such as
sklearn without actually using it.
"""
def __init__(self) -> None:
Classifier.__init__(self)
def train_and_eval(
self, dataloader: DataLoader, **kwargs: Any
) -> Union[Dict, None]:
inputs = []
labels = []
for input, label in dataloader:
inputs.append(input)
labels.append(label)
inputs = torch.cat(inputs)
labels = torch.cat(labels)
# update concept ids aka classes
self._classes = list(OrderedDict.fromkeys([label.item() for label in labels]))
# Training is skipped for performance and indepenence of sklearn reasons
_, x_test, _, y_test = train_test_split(inputs, labels)
# A tensor with dimensions n_inputs x (1 - test_split) x n_concepts
# should be returned here.
# Assemble a list with size inputs.shape[0], divided in 4 quarters
# [0, 0, 0, ... | 1, 1, 1, ... | 0, 0, 0, ... | 1, 1, 1, ... ]
pred = [0] * x_test.shape[0]
# Store the shape of 1/4 of inputs.shape[0] (sh_4) and use it
sh_4 = x_test.shape[0] / 4
for i in range(1, 4, 2):
from_ = round(i * sh_4)
to_ = round((i + 1) * sh_4)
pred[from_:to_] = [1] * (round((i + 1) * sh_4) - round(i * sh_4))
y_pred = torch.tensor(pred)
score = y_pred == y_test
accs = score.float().mean()
# A hack to mock weights for two different layer
self.num_features = input.shape[1]
return {"accs": accs}
def weights(self) -> Tensor:
if self.num_features != 16:
return torch.randn(2, self.num_features)
return torch.tensor(
[
[
-0.2167,
-0.0809,
-0.1235,
-0.2450,
0.2954,
0.5409,
-0.2587,
-0.3428,
0.2486,
-0.0123,
0.2737,
0.4876,
-0.1133,
0.1616,
-0.2016,
-0.0413,
],
[
-0.2167,
-0.0809,
-0.1235,
-0.2450,
0.2954,
0.5409,
-0.2587,
-0.3428,
0.2486,
-0.0123,
0.2737,
0.4876,
-0.1133,
0.2616,
-0.2016,
-0.0413,
],
],
dtype=torch.float64,
)
def classes(self) -> List[int]:
return self._classes
class CustomClassifier_WO_Returning_Metrics(CustomClassifier):
def __init__(self) -> None:
CustomClassifier.__init__(self)
def train_and_eval(
self, dataloader: DataLoader, **kwargs: Any
) -> Union[Dict, None]:
CustomClassifier.train_and_eval(self, dataloader)
return None
class CustomClassifier_W_Flipped_Class_Id(CustomClassifier):
def __init__(self) -> None:
CustomClassifier.__init__(self)
def weights(self) -> Tensor:
_weights = CustomClassifier.weights(self)
_weights[0], _weights[1] = _weights[1], _weights[0].clone()
return _weights
def classes(self) -> List[int]:
_classes = CustomClassifier.classes(self)
_classes[0], _classes[1] = _classes[1], _classes[0]
return _classes
class CustomIterableDataset(IterableDataset):
r"""
Auxiliary class for iterating through an image dataset.
"""
def __init__(
self, get_tensor_from_filename_func: Callable, path: str, num_samples=100
) -> None:
r"""
Args:
path (str): Path to dataset files
"""
self.path = path
self.file_itr = ["x"] * num_samples
self.get_tensor_from_filename_func = get_tensor_from_filename_func
def get_tensor_from_filename(self, filename: str) -> Tensor:
return self.get_tensor_from_filename_func(filename)
def __iter__(self) -> Iterator:
mapped_itr = map(self.get_tensor_from_filename, self.file_itr)
return mapped_itr
def train_test_split(
x_list: Tensor, y_list: Union[Tensor, List[int]], test_split: float = 0.33
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
z_list = list(zip(x_list, y_list))
# Split
test_size = int(test_split * len(z_list))
z_test, z_train = z_list[:test_size], z_list[test_size:]
x_test, y_test = zip(*z_test)
x_train, y_train = zip(*z_train)
x_train = torch.stack(x_train)
x_test = torch.stack(x_test)
y_train = torch.stack(y_train)
y_test = torch.stack(y_test)
y_train[: len(y_train) // 2] = 0
y_train[len(y_train) // 2 :] = 1
y_test[: len(y_test) // 2] = 0
y_test[len(y_test) // 2 :] = 1
return x_train, x_test, y_train, y_test
def get_tensor_from_filename(filename: str) -> Tensor:
file_tensor = (
torch.tensor(
[
[
[
0.4963,
0.7682,
0.0885,
0.1320,
0.3074,
0.6341,
0.4901,
0.8964,
0.4556,
0.6323,
],
[
0.3489,
0.4017,
0.0223,
0.1689,
0.2939,
0.5185,
0.6977,
0.8000,
0.1610,
0.2823,
],
[
0.6816,
0.9152,
0.3971,
0.8742,
0.4194,
0.5529,
0.9527,
0.0362,
0.1852,
0.3734,
],
[
0.3051,
0.9320,
0.1759,
0.2698,
0.1507,
0.0317,
0.2081,
0.9298,
0.7231,
0.7423,
],
[
0.5263,
0.2437,
0.5846,
0.0332,
0.1387,
0.2422,
0.8155,
0.7932,
0.2783,
0.4820,
],
[
0.8198,
0.9971,
0.6984,
0.5675,
0.8352,
0.2056,
0.5932,
0.1123,
0.1535,
0.2417,
],
[
0.7262,
0.7011,
0.2038,
0.6511,
0.7745,
0.4369,
0.5191,
0.6159,
0.8102,
0.9801,
],
[
0.1147,
0.3168,
0.6965,
0.9143,
0.9351,
0.9412,
0.5995,
0.0652,
0.5460,
0.1872,
],
[
0.0340,
0.9442,
0.8802,
0.0012,
0.5936,
0.4158,
0.4177,
0.2711,
0.6923,
0.2038,
],
[
0.6833,
0.7529,
0.8579,
0.6870,
0.0051,
0.1757,
0.7497,
0.6047,
0.1100,
0.2121,
],
]
]
)
* 100
)
return file_tensor
def get_inputs_tensor() -> Tensor:
input_tensor = torch.tensor(
[
[
[
[
-1.1258e00,
-1.1524e00,
-2.5058e-01,
-4.3388e-01,
8.4871e-01,
6.9201e-01,
-3.1601e-01,
-2.1152e00,
3.2227e-01,
-1.2633e00,
],
[
3.4998e-01,
3.0813e-01,
1.1984e-01,
1.2377e00,
1.1168e00,
-2.4728e-01,
-1.3527e00,
-1.6959e00,
5.6665e-01,
7.9351e-01,
],
[
5.9884e-01,
-1.5551e00,
-3.4136e-01,
1.8530e00,
7.5019e-01,
-5.8550e-01,
-1.7340e-01,
1.8348e-01,
1.3894e00,
1.5863e00,
],
[
9.4630e-01,
-8.4368e-01,
-6.1358e-01,
3.1593e-02,
-4.9268e-01,
2.4841e-01,
4.3970e-01,
1.1241e-01,
6.4079e-01,
4.4116e-01,
],
[
-1.0231e-01,
7.9244e-01,
-2.8967e-01,
5.2507e-02,
5.2286e-01,
2.3022e00,
-1.4689e00,
-1.5867e00,
-6.7309e-01,
8.7283e-01,
],
[
1.0554e00,
1.7784e-01,
-2.3034e-01,
-3.9175e-01,
5.4329e-01,
-3.9516e-01,
-4.4622e-01,
7.4402e-01,
1.5210e00,
3.4105e00,
],
[
-1.5312e00,
-1.2341e00,
1.8197e00,
-5.5153e-01,
-5.6925e-01,
9.1997e-01,
1.1108e00,
1.2899e00,
-1.4782e00,
2.5672e00,
],
[
-4.7312e-01,
3.3555e-01,
-1.6293e00,
-5.4974e-01,
-4.7983e-01,
-4.9968e-01,
-1.0670e00,
1.1149e00,
-1.4067e-01,
8.0575e-01,
],
[
-9.3348e-02,
6.8705e-01,
-8.3832e-01,
8.9182e-04,
8.4189e-01,
-4.0003e-01,
1.0395e00,
3.5815e-01,
-2.4600e-01,
2.3025e00,
],
[
-1.8817e00,
-4.9727e-02,
-1.0450e00,
-9.5650e-01,
3.3532e-02,
7.1009e-01,
1.6459e00,
-1.3602e00,
3.4457e-01,
5.1987e-01,
],
]
],
[
[
[
-2.6133e00,
-1.6965e00,
-2.2824e-01,
2.7995e-01,
2.4693e-01,
7.6887e-02,
3.3801e-01,
4.5440e-01,
4.5694e-01,
-8.6537e-01,
],
[
7.8131e-01,
-9.2679e-01,
-2.1883e-01,
-2.4351e00,
-7.2915e-02,
-3.3986e-02,
9.6252e-01,
3.4917e-01,
-9.2146e-01,
-5.6195e-02,
],
[
-6.2270e-01,
-4.6372e-01,
1.9218e00,
-4.0255e-01,
1.2390e-01,
1.1648e00,
9.2337e-01,
1.3873e00,
-8.8338e-01,
-4.1891e-01,
],
[
-8.0483e-01,
5.6561e-01,
6.1036e-01,
4.6688e-01,
1.9507e00,
-1.0631e00,
-7.7326e-02,
1.1640e-01,
-5.9399e-01,
-1.2439e00,
],
[
-1.0209e-01,
-1.0335e00,
-3.1264e-01,
2.4579e-01,
-2.5964e-01,
1.1834e-01,
2.4396e-01,
1.1646e00,
2.8858e-01,
3.8660e-01,
],
[
-2.0106e-01,
-1.1793e-01,
1.9220e-01,
-7.7216e-01,
-1.9003e00,
1.3068e-01,
-7.0429e-01,
3.1472e-01,
1.5739e-01,
3.8536e-01,
],
[
9.6715e-01,
-9.9108e-01,
3.0161e-01,
-1.0732e-01,
9.9846e-01,
-4.9871e-01,
7.6111e-01,
6.1830e-01,
3.1405e-01,
2.1333e-01,
],
[
-1.2005e-01,
3.6046e-01,
-3.1403e-01,
-1.0787e00,
2.4081e-01,
-1.3962e00,
-6.6144e-02,
-3.5836e-01,
-1.5616e00,
-3.5464e-01,
],
[
1.0811e00,
1.3148e-01,
1.5735e00,
7.8143e-01,
-5.1107e-01,
-1.7137e00,
-5.1006e-01,
-4.7489e-01,
-6.3340e-01,
-1.4677e00,
],
[
-8.7848e-01,
-2.0784e00,
-1.1005e00,
-7.2013e-01,
1.1931e-02,
3.3977e-01,
-2.6345e-01,
1.2805e00,
1.9395e-02,
-8.8080e-01,
],
]
],
],
requires_grad=True,
)
return input_tensor
def create_concept(concept_name: str, concept_id: int) -> Concept:
concepts_path = "./dummy/concepts/" + concept_name + "/"
dataset = CustomIterableDataset(get_tensor_from_filename, concepts_path)
concept_iter = dataset_to_dataloader(dataset)
concept = Concept(id=concept_id, name=concept_name, data_iter=concept_iter)
return concept
def create_concepts() -> Dict[str, Concept]:
# Function to create concept objects from a pre-set concept name list.
concept_names = ["striped", "ceo", "random", "dotted"]
concept_dict: Dict[str, Concept] = defaultdict()
for c, concept_name in enumerate(concept_names):
concept = create_concept(concept_name, c)
concept_dict[concept_name] = concept
return concept_dict
def find_concept_by_id(concepts: Set[Concept], id: int) -> Union[Concept, None]:
for concept in concepts:
if concept.id == id:
return concept
return None
def create_TCAV(
save_path: str,
classifier: Classifier,
layers: Union[str, List[str]],
attribute_to_layer_input: bool = False,
) -> TCAV:
model = BasicModel_ConvNet()
tcav = TCAV(
model,
layers,
classifier=classifier,
save_path=save_path,
attribute_to_layer_input=attribute_to_layer_input,
)
return tcav
def init_TCAV(
save_path: str,
classifier: Classifier,
layers: Union[str, List[str]],
attribute_to_layer_input: bool = False,
) -> Tuple[TCAV, Dict[str, Concept]]:
# Create Concepts
concepts_dict = create_concepts()
tcav = create_TCAV(
save_path, classifier, layers, attribute_to_layer_input=attribute_to_layer_input
)
return tcav, concepts_dict
def remove_pkls(path: str) -> None:
pkl_files = glob.glob(os.path.join(path, "*.pkl"))
for pkl_file in pkl_files:
os.remove(pkl_file)
class Test(BaseTest):
r"""
Class for testing the TCAV class through a sequence of operations:
- Create the Concepts (random tensor generation simulation)
- Create the TCAV class
- Generate Activations
- Compute the CAVs
- Interpret (the images - simulated with random tensors)
"""
def test_compute_cav_repeating_concept_ids(self) -> None:
with tempfile.TemporaryDirectory() as tmpdirname:
tcav = create_TCAV(tmpdirname, CustomClassifier(), "conv1")
experimental_sets = [
[create_concept("striped", 0), create_concept("random", 1)],
[create_concept("ceo", 2), create_concept("striped2", 0)],
]
with self.assertRaises(AssertionError):
tcav.compute_cavs(experimental_sets)
def test_compute_cav_repeating_concept_names(self) -> None:
with tempfile.TemporaryDirectory() as tmpdirname:
tcav = create_TCAV(tmpdirname, CustomClassifier(), "conv1")
experimental_sets = [
[create_concept("striped", 0), create_concept("random", 1)],
[create_concept("ceo", 2), create_concept("striped", 3)],
]
cavs = tcav.compute_cavs(experimental_sets)
self.assertTrue("0-1" in cavs.keys())
self.assertTrue("2-3" in cavs.keys())
self.assertEqual(cavs["0-1"]["conv1"].layer, "conv1")
self.assertEqual(cavs["2-3"]["conv1"].layer, "conv1")
self.assertEqual(cavs["0-1"]["conv1"].concepts[0].id, 0)
self.assertEqual(cavs["0-1"]["conv1"].concepts[0].name, "striped")
self.assertEqual(cavs["0-1"]["conv1"].concepts[1].id, 1)
self.assertEqual(cavs["0-1"]["conv1"].concepts[1].name, "random")
self.assertEqual(cavs["0-1"]["conv1"].stats["classes"], [0, 1])
self.assertAlmostEqual(
cavs["0-1"]["conv1"].stats["accs"].item(), 0.4848, delta=0.001
)
self.assertEqual(
list(cavs["0-1"]["conv1"].stats["weights"].shape), [2, 128]
)
self.assertEqual(cavs["2-3"]["conv1"].concepts[0].id, 2)
self.assertEqual(cavs["2-3"]["conv1"].concepts[0].name, "ceo")
self.assertEqual(cavs["2-3"]["conv1"].concepts[1].id, 3)
self.assertEqual(cavs["2-3"]["conv1"].concepts[1].name, "striped")
self.assertEqual(cavs["2-3"]["conv1"].stats["classes"], [2, 3])
self.assertAlmostEqual(
cavs["2-3"]["conv1"].stats["accs"].item(), 0.4848, delta=0.001
)
self.assertEqual(
list(cavs["2-3"]["conv1"].stats["weights"].shape), [2, 128]
)
def compute_cavs_interpret(
self,
experimental_sets: List[List[str]],
force_train: bool,
accs: float,
sign_count: float,
magnitude: float,
processes: int = 1,
remove_activation: bool = False,
layers: Union[str, List[str]] = "conv2",
attribute_to_layer_input: bool = False,
) -> None:
classifier = CustomClassifier()
self._compute_cavs_interpret(
experimental_sets,
force_train,
accs,
sign_count,
magnitude,
classifier,
processes=processes,
remove_activation=remove_activation,
layers=layers,
attribute_to_layer_input=attribute_to_layer_input,
)
def _compute_cavs_interpret(
self,
experimental_set_list: List[List[str]],
force_train: bool,
accs: Union[float, List[float]],
sign_count: Union[float, List[float]],
magnitude: Union[float, List[float]],
classifier: Classifier,
processes: int = 1,
remove_activation: bool = False,
layers: Union[str, List[str]] = "conv2",
attribute_to_layer_input: bool = False,
) -> None:
def wrap_in_list_if_not_already(input):
return (
input
if isinstance(input, list)
else [
input,
]
)
with tempfile.TemporaryDirectory() as tmpdirname:
tcav, concept_dict = init_TCAV(
tmpdirname,
classifier,
layers,
attribute_to_layer_input=attribute_to_layer_input,
)
experimental_sets = self._create_experimental_sets(
experimental_set_list, concept_dict
)
# Compute CAVs
tcav.compute_cavs(
experimental_sets,
force_train=force_train,
processes=processes,
)
concepts_key = concepts_to_str(experimental_sets[0])
_layers: List[str] = wrap_in_list_if_not_already(layers)
_accs: List[float] = wrap_in_list_if_not_already(accs)
_sign_counts: List[float] = wrap_in_list_if_not_already(sign_count)
_magnitudes: List[float] = wrap_in_list_if_not_already(magnitude)
for layer, acc, sign_count, magnitude in zip(
_layers, _accs, _sign_counts, _magnitudes
):
stats = cast(Dict[str, Tensor], tcav.cavs[concepts_key][layer].stats)
self.assertEqual(
stats["weights"].shape,
torch.Size([2, 16]),
)
if not isinstance(classifier, CustomClassifier_WO_Returning_Metrics):
self.assertAlmostEqual(
stats["accs"].item(),
acc,
delta=0.0001,
)
# Provoking a CAV absence by deleting the .pkl files and one
# activation
if remove_activation:
remove_pkls(tmpdirname)
for fl in glob.glob(tmpdirname + "/av/" + layer + "/random-*-*"):
os.remove(fl)
# Interpret
inputs = 100 * get_inputs_tensor()
scores = tcav.interpret(
inputs=inputs,
experimental_sets=experimental_sets,
target=0,
processes=processes,
)
self.assertAlmostEqual(
cast(float, scores[concepts_key][layer]["sign_count"][0].item()),
sign_count,
delta=0.0001,
)
self.assertAlmostEqual(
cast(float, scores[concepts_key][layer]["magnitude"][0].item()),
magnitude,
delta=0.0001,
)
def _create_experimental_sets(
self, experimental_set_list: List[List[str]], concept_dict: Dict[str, Concept]
) -> List[List[Concept]]:
experimental_sets = []
for concept_set in experimental_set_list:
concepts = []
for concept in concept_set:
self.assertTrue(concept in concept_dict)
concepts.append(concept_dict[concept])
experimental_sets.append(concepts)
return experimental_sets
# Init - Generate Activations
def test_TCAV_1(self) -> None:
# Create Concepts
concepts_dict = create_concepts()
for concept in concepts_dict.values():
self.assertTrue(concept.data_iter is not None)
data_iter = cast(DataLoader, concept.data_iter)
self.assertEqual(
len(cast(CustomIterableDataset, data_iter.dataset).file_itr), 100
)
self.assertTrue(concept.data_iter is not None)
total_batches = 0
for data in cast(Iterable, concept.data_iter):
total_batches += data.shape[0]
self.assertEqual(data.shape[1:], torch.Size([1, 10, 10]))
self.assertEqual(total_batches, 100)
def test_TCAV_generate_all_activations(self) -> None:
def forward_hook_wrapper(expected_act: Tensor):
def forward_hook(module, inp, out=None):
out = torch.reshape(out, (out.shape[0], -1))
self.assertEqual(out.detach().shape[1:], expected_act.shape[1:])
return forward_hook
with tempfile.TemporaryDirectory() as tmpdirname:
layers = ["conv1", "conv2", "fc1", "fc2"]
tcav, concept_dict = init_TCAV(
tmpdirname, CustomClassifier(), layers=layers
)
tcav.concepts = set(concept_dict.values())
# generating all activations for given layers and concepts
tcav.generate_all_activations()
# verify that all activations exist and have correct shapes
for layer in layers:
for _, concept in concept_dict.items():
self.assertTrue(
AV.exists(
tmpdirname, "default_model_id", concept.identifier, layer
)
)
concept_meta: Dict[int, int] = defaultdict(int)
for _, concept in concept_dict.items():
activations = AV.load(
tmpdirname, "default_model_id", concept.identifier, layer
)
def batch_collate(batch):
return torch.cat(batch)
self.assertTrue(concept.data_iter is not None)
assert not (activations is None)
for activation in cast(
Iterable, DataLoader(activations, collate_fn=batch_collate)
):
concept_meta[concept.id] += activation.shape[0]
layer_module = _get_module_from_name(tcav.model, layer)
for data in cast(Iterable, concept.data_iter):
hook = layer_module.register_forward_hook(
forward_hook_wrapper(activation)
)
tcav.model(data)
hook.remove()
# asserting the length of entire dataset for each concept
for concept_meta_i in concept_meta.values():
self.assertEqual(concept_meta_i, 100)
def test_TCAV_multi_layer(self) -> None:
concepts = [["striped", "random"], ["ceo", "random"]]
layers = ["conv1", "conv2"]
classifier = CustomClassifier()
with tempfile.TemporaryDirectory() as tmpdirname:
tcav, concept_dict = init_TCAV(tmpdirname, classifier, layers)
experimental_sets = self._create_experimental_sets(concepts, concept_dict)
# Interpret
inputs = 100 * get_inputs_tensor()
scores = tcav.interpret(
inputs=inputs,
experimental_sets=experimental_sets,
target=0,
processes=3,
)
self.assertEqual(len(scores.keys()), len(experimental_sets))
for _, tcavs in scores.items():
for _, tcav_i in tcavs.items():
self.assertEqual(tcav_i["sign_count"].shape[0], 2)
self.assertEqual(tcav_i["magnitude"].shape[0], 2)
# Force Train
def test_TCAV_1_1_a(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
True,
0.4848,
0.5000,
8.185208066890937e-09,
processes=5,
)
def test_TCAV_1_1_a_wo_acc_metric(self) -> None:
self._compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
True,
-1.0, # acc is not defined, this field will not be asserted
0.5000,
8.185208066890937e-09,
CustomClassifier_WO_Returning_Metrics(),
processes=2,
)
def test_TCAV_1_1_b(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"]], True, 0.4848, 0.5000, 8.185208066890937e-09
)
def test_TCAV_1_1_c(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"], ["striped", "ceo"]],
True,
0.4848,
0.5000,
8.185208066890937e-09,
processes=6,
)
# Non-existing concept in the experimental set ("dotted")
def test_TCAV_1_1_d(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["dotted", "random"]],
True,
0.4848,
0.5000,
8.185208066890937e-09,
processes=4,
)
# Force Train
def test_TCAV_0_1(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
True,
0.4848,
0.5000,
8.185208066890937e-09,
processes=2,
)
def test_TCAV_0_1_attr_to_inputs(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
True,
0.4848,
0.5000,
8.185208066890937e-09,
processes=2,
layers="relu2",
attribute_to_layer_input=True,
)
# Do not Force Train
def test_TCAV_0_0(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
False,
0.4848,
0.5000,
8.185208066890937e-09,
processes=2,
)
# Non-existing concept in the experimental set ("dotted"), do Not Force Train
def test_TCAV_1_0_b(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["dotted", "random"]],
False,
0.4848,
0.5000,
8.185208066890937e-09,
processes=5,
)
# Do not Force Train, Missing Activation
def test_TCAV_1_0_1(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
False,
0.4848,
0.5000,
8.185208066890937e-09,
processes=5,
remove_activation=True,
)
# Do not run parallel:
# Force Train
def test_TCAV_x_1_1_a(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
True,
0.4848,
0.5000,
8.185208066890937e-09,
processes=1,
)
def test_TCAV_x_1_1_b(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"]],
True,
0.4848,
0.5000,
8.185208066890937e-09,
processes=1,
)
def test_TCAV_x_1_1_c(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"], ["striped", "ceo"]],
True,
0.4848,
0.5000,
8.185208066890937e-09,
processes=1,
)
def test_TCAV_x_1_1_c_concept_order_changed(self) -> None:
self.compute_cavs_interpret(
[["random", "striped"], ["random", "ceo"], ["ceo", "striped"]],
True,
0.4848,
0.5000,
8.185208066890937e-09,
processes=1,
)
# Non-existing concept in the experimental set ("dotted")
def test_TCAV_x_1_1_d(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["dotted", "random"]],
True,
0.4848,
0.5000,
8.185208066890937e-09,
processes=1,
)
# Do not Force Train
def test_TCAV_x_1_0_a(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
False,
0.4848,
0.5000,
8.185208066890937e-09,
processes=1,
)
def test_TCAV_x_1_0_1_attr_to_inputs(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
False,
0.4848,
0.5000,
8.185208066890937e-09,
processes=1,
remove_activation=True,
layers="relu2",
attribute_to_layer_input=True,
)
# Non-existing concept in the experimental set ("dotted"), do Not Force Train
def test_TCAV_x_1_0_b(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["dotted", "random"]],
False,
0.4848,
0.5000,
8.185208066890937e-09,
processes=1,
)
# Do not Force Train, Missing Activation
def test_TCAV_x_1_0_1(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
False,
0.4848,
0.5000,
8.185208066890937e-09,
processes=1,
remove_activation=True,
)
def test_TCAV_x_1_0_1_w_flipped_class_id(self) -> None:
self._compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
False,
0.4848,
0.5000,
8.185208066890937e-09,
CustomClassifier_W_Flipped_Class_Id(),
processes=1,
)
# Testing TCAV with default classifier and experimental sets of varying lengths
def test_exp_sets_with_diffent_lengths(self) -> None:
try:
import sklearn
import sklearn.linear_model
import sklearn.svm # noqa: F401
except ImportError:
raise unittest.SkipTest("sklearn is not available.")
# Create Concepts
concepts_dict = create_concepts()
# defining experimental sets of different length
experimental_set_list = [["striped", "random"], ["ceo", "striped", "random"]]
experimental_sets_diff_length = self._create_experimental_sets(
experimental_set_list, concepts_dict
)
exp_sets_striped_random = self._create_experimental_sets(
[["striped", "random"]], concepts_dict
)
exp_sets_ceo_striped_random = self._create_experimental_sets(
[["ceo", "striped", "random"]], concepts_dict
)
striped_random_str = concepts_to_str(exp_sets_striped_random[0])
ceo_striped_random_str = concepts_to_str(exp_sets_ceo_striped_random[0])
model = BasicModel_ConvNet()
model.eval()
layers = ["conv1", "conv2", "fc1", "fc2"]
inputs = torch.randn(5, 1, 10, 10)
with tempfile.TemporaryDirectory() as tmpdirname:
tcav_diff_length = TCAV(
model,
layers,
save_path=tmpdirname,
)
# computing tcav scores for `striped and random` set and
# `ceo, striped and random` set at once using one `interpret`
# call.
interpret_diff_lengths = tcav_diff_length.interpret(
inputs, experimental_sets=experimental_sets_diff_length, target=0
)
# computing tcav scores for striped and random
interpret_striped_random = tcav_diff_length.interpret(
inputs, experimental_sets=exp_sets_striped_random, target=0
)
# computing tcav scores for ceo, striped and random
interpret_ceo_striped_random = tcav_diff_length.interpret(
inputs, experimental_sets=exp_sets_ceo_striped_random, target=0
)
for combined, separate in zip(
interpret_diff_lengths[striped_random_str].items(),
interpret_striped_random[striped_random_str].items(),
):
self.assertEqual(combined[0], separate[0])
for c_tcav, s_tcav in zip(combined[1].items(), separate[1].items()):
self.assertEqual(c_tcav[0], s_tcav[0])
assertTensorAlmostEqual(self, c_tcav[1], s_tcav[1])
for combined, separate in zip(
interpret_diff_lengths[ceo_striped_random_str].items(),
interpret_ceo_striped_random[ceo_striped_random_str].items(),
):
self.assertEqual(combined[0], separate[0])
for c_tcav, s_tcav in zip(combined[1].items(), separate[1].items()):
self.assertEqual(c_tcav[0], s_tcav[0])
assertTensorAlmostEqual(self, c_tcav[1], s_tcav[1])
def test_model_ids_in_tcav(
self,
) -> None:
# creating concepts and mapping between concepts and their names
concepts_dict = create_concepts()
# defining experimental sets of different length
experimental_set_list = [["striped", "random"], ["dotted", "random"]]
experimental_sets = self._create_experimental_sets(
experimental_set_list, concepts_dict
)
model = BasicModel_ConvNet()
model.eval()
layer = "conv2"
inputs = 100 * get_inputs_tensor()
with tempfile.TemporaryDirectory() as tmpdirname:
tcav1 = TCAV(
model,
layer,
model_id="my_basic_model1",
classifier=CustomClassifier(),
save_path=tmpdirname,
)
interpret1 = tcav1.interpret(
inputs, experimental_sets=experimental_sets, target=0
)
tcav2 = TCAV(
model,
layer,
model_id="my_basic_model2",
classifier=CustomClassifier(),
save_path=tmpdirname,
)
interpret2 = tcav2.interpret(
inputs, experimental_sets=experimental_sets, target=0
)
# testing that different folders were created for two different
# ids of the model
self.assertTrue(
AV.exists(
tmpdirname,
"my_basic_model1",
concepts_dict["striped"].identifier,
layer,
)
)
self.assertTrue(
AV.exists(
tmpdirname,
"my_basic_model2",
concepts_dict["striped"].identifier,
layer,
)
)
for interpret1_elem, interpret2_elem in zip(interpret1, interpret2):
for interpret1_sub_elem, interpret2_sub_elem in zip(
interpret1[interpret1_elem], interpret2[interpret2_elem]
):
assertTensorAlmostEqual(
self,
interpret1[interpret1_elem][interpret1_sub_elem]["sign_count"],
interpret2[interpret2_elem][interpret2_sub_elem]["sign_count"],
0.0,
)
assertTensorAlmostEqual(
self,
interpret1[interpret1_elem][interpret1_sub_elem]["magnitude"],
interpret2[interpret2_elem][interpret2_sub_elem]["magnitude"],
0.0,
)
self.assertEqual(interpret1_sub_elem, interpret2_sub_elem)
self.assertEqual(interpret1_elem, interpret2_elem)
|
#!/usr/bin/env python3
import torch
import torch.nn as nn
class SigmoidModel(nn.Module):
"""
Model architecture from:
https://medium.com/coinmonks/create-a-neural-network-in
-pytorch-and-make-your-life-simpler-ec5367895199
"""
def __init__(self, num_in, num_hidden, num_out) -> None:
super().__init__()
self.num_in = num_in
self.num_hidden = num_hidden
self.num_out = num_out
self.lin1 = nn.Linear(num_in, num_hidden)
self.lin2 = nn.Linear(num_hidden, num_out)
self.relu1 = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, input):
lin1 = self.lin1(input)
lin2 = self.lin2(self.relu1(lin1))
return self.sigmoid(lin2)
class SoftmaxModel(nn.Module):
"""
Model architecture from:
https://adventuresinmachinelearning.com/pytorch-tutorial-deep-learning/
"""
def __init__(self, num_in, num_hidden, num_out, inplace=False) -> None:
super().__init__()
self.num_in = num_in
self.num_hidden = num_hidden
self.num_out = num_out
self.lin1 = nn.Linear(num_in, num_hidden)
self.lin2 = nn.Linear(num_hidden, num_hidden)
self.lin3 = nn.Linear(num_hidden, num_out)
self.relu1 = nn.ReLU(inplace=inplace)
self.relu2 = nn.ReLU(inplace=inplace)
self.softmax = nn.Softmax(dim=1)
def forward(self, input):
lin1 = self.relu1(self.lin1(input))
lin2 = self.relu2(self.lin2(lin1))
lin3 = self.lin3(lin2)
return self.softmax(lin3)
class SigmoidDeepLiftModel(nn.Module):
"""
Model architecture from:
https://medium.com/coinmonks/create-a-neural-network-in
-pytorch-and-make-your-life-simpler-ec5367895199
"""
def __init__(self, num_in, num_hidden, num_out) -> None:
super().__init__()
self.num_in = num_in
self.num_hidden = num_hidden
self.num_out = num_out
self.lin1 = nn.Linear(num_in, num_hidden, bias=False)
self.lin2 = nn.Linear(num_hidden, num_out, bias=False)
self.lin1.weight = nn.Parameter(torch.ones(num_hidden, num_in))
self.lin2.weight = nn.Parameter(torch.ones(num_out, num_hidden))
self.relu1 = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, input):
lin1 = self.lin1(input)
lin2 = self.lin2(self.relu1(lin1))
return self.sigmoid(lin2)
class SoftmaxDeepLiftModel(nn.Module):
"""
Model architecture from:
https://adventuresinmachinelearning.com/pytorch-tutorial-deep-learning/
"""
def __init__(self, num_in, num_hidden, num_out) -> None:
super().__init__()
self.num_in = num_in
self.num_hidden = num_hidden
self.num_out = num_out
self.lin1 = nn.Linear(num_in, num_hidden)
self.lin2 = nn.Linear(num_hidden, num_hidden)
self.lin3 = nn.Linear(num_hidden, num_out)
self.lin1.weight = nn.Parameter(torch.ones(num_hidden, num_in))
self.lin2.weight = nn.Parameter(torch.ones(num_hidden, num_hidden))
self.lin3.weight = nn.Parameter(torch.ones(num_out, num_hidden))
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, input):
lin1 = self.relu1(self.lin1(input))
lin2 = self.relu2(self.lin2(lin1))
lin3 = self.lin3(lin2)
return self.softmax(lin3)
|
#!/usr/bin/env python3
from typing import no_type_check, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
"""
@no_type_check annotation is applied to type-hinted models to avoid errors
related to mismatch with parent (nn.Module) signature. # type_ignore is not
possible here, since it causes errors in JIT scripting code which parses
the relevant type hints.
"""
class BasicLinearReLULinear(nn.Module):
def __init__(self, in_features, out_features=5, bias=False) -> None:
super().__init__()
self.fc1 = nn.Linear(in_features, out_features, bias=bias)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(out_features, 1, bias=bias)
def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)
x = self.fc2(x)
return x
class MixedKwargsAndArgsModule(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x, y=None):
if y is not None:
return x + y
return x
class BasicModel(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, input):
input = 1 - F.relu(1 - input)
return input
class BasicModel2(nn.Module):
"""
Example model one from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1) - 1 - ReLU(x2))
"""
def __init__(self) -> None:
super().__init__()
def forward(self, input1, input2):
relu_out1 = F.relu(input1)
relu_out2 = F.relu(input2)
return F.relu(relu_out1 - 1 - relu_out2)
class BasicModel3(nn.Module):
"""
Example model two from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1 - 1) - ReLU(x2))
"""
def __init__(self) -> None:
super().__init__()
def forward(self, input1, input2):
relu_out1 = F.relu(input1 - 1)
relu_out2 = F.relu(input2)
return F.relu(relu_out1 - relu_out2)
class BasicModel4_MultiArgs(nn.Module):
"""
Slightly modified example model from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1 - 1) - ReLU(x2) / x3)
"""
def __init__(self) -> None:
super().__init__()
def forward(self, input1, input2, additional_input1, additional_input2=0):
relu_out1 = F.relu(input1 - 1)
relu_out2 = F.relu(input2)
relu_out2 = relu_out2.div(additional_input1)
return F.relu(relu_out1 - relu_out2)[:, additional_input2]
class BasicModel5_MultiArgs(nn.Module):
"""
Slightly modified example model from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1 - 1) * x3[0] - ReLU(x2) * x3[1])
"""
def __init__(self) -> None:
super().__init__()
def forward(self, input1, input2, additional_input1, additional_input2=0):
relu_out1 = F.relu(input1 - 1) * additional_input1[0]
relu_out2 = F.relu(input2)
relu_out2 = relu_out2 * additional_input1[1]
return F.relu(relu_out1 - relu_out2)[:, additional_input2]
class BasicModel6_MultiTensor(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, input1, input2):
input = input1 + input2
return 1 - F.relu(1 - input)[:, 1]
class BasicLinearModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(7, 1)
def forward(self, x1, x2):
return self.linear(torch.cat((x1, x2), dim=-1))
class BasicLinearModel2(nn.Module):
def __init__(self, in_features, out_features) -> None:
super().__init__()
self.linear = nn.Linear(in_features, out_features, bias=False)
def forward(self, input):
return self.linear(input)
class BasicLinearModel_Multilayer(nn.Module):
def __init__(self, in_features, hidden_nodes, out_features) -> None:
super().__init__()
self.linear1 = nn.Linear(in_features, hidden_nodes, bias=False)
self.linear2 = nn.Linear(hidden_nodes, out_features, bias=False)
def forward(self, input):
x = self.linear1(input)
return self.linear2(x)
class ReLUDeepLiftModel(nn.Module):
r"""
https://www.youtube.com/watch?v=f_iAM0NPwnM
"""
def __init__(self) -> None:
super().__init__()
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
def forward(self, x1, x2, x3=2):
return 2 * self.relu1(x1) + x3 * self.relu2(x2 - 1.5)
class LinearMaxPoolLinearModel(nn.Module):
def __init__(self) -> None:
super().__init__()
# kernel size -> 4
self.lin1 = nn.Linear(4, 4, bias=False)
self.lin1.weight = nn.Parameter(torch.eye(4, 4))
self.pool1 = nn.MaxPool1d(4)
self.lin2 = nn.Linear(1, 1, bias=False)
self.lin2.weight = nn.Parameter(torch.ones(1, 1))
def forward(self, x):
x = x.unsqueeze(1)
return self.lin2(self.pool1(self.lin1(x))[:, 0, :])
class BasicModelWithReusedModules(nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin1 = nn.Linear(3, 2)
self.relu = nn.ReLU()
self.lin2 = nn.Linear(2, 2)
def forward(self, inputs):
return self.relu(self.lin2(self.relu(self.lin1(inputs))))
class BasicModelWithReusedLinear(nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin1 = nn.Linear(3, 3)
self.relu = nn.ReLU()
def forward(self, inputs):
return self.relu(self.lin1(self.relu(self.lin1(inputs))))
class BasicModelWithSparseInputs(nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin1 = nn.Linear(3, 1)
self.lin1.weight = nn.Parameter(torch.tensor([[3.0, 1.0, 2.0]]))
self.lin1.bias = nn.Parameter(torch.zeros(1))
def forward(self, inputs, sparse_list):
return (
self.lin1(inputs) + (sparse_list[0] if torch.numel(sparse_list) > 0 else 0)
).sum()
class BasicModel_MaxPool_ReLU(nn.Module):
def __init__(self, inplace=False) -> None:
super().__init__()
self.maxpool = nn.MaxPool1d(3)
self.relu = nn.ReLU(inplace=inplace)
def forward(self, x):
return self.relu(self.maxpool(x)).sum(dim=1)
class TanhDeepLiftModel(nn.Module):
r"""
Same as the ReLUDeepLiftModel, but with activations
that can have negative outputs
"""
def __init__(self) -> None:
super().__init__()
self.tanh1 = nn.Tanh()
self.tanh2 = nn.Tanh()
def forward(self, x1, x2):
return 2 * self.tanh1(x1) + 2 * self.tanh2(x2 - 1.5)
class ReLULinearModel(nn.Module):
r"""
Simple architecture similar to:
https://github.com/marcoancona/DeepExplain/blob/master/deepexplain/tests/test_tensorflow.py#L65
"""
def __init__(self, inplace: bool = False) -> None:
super().__init__()
self.l1 = nn.Linear(3, 1, bias=False)
self.l2 = nn.Linear(3, 1, bias=False)
self.l1.weight = nn.Parameter(torch.tensor([[3.0, 1.0, 0.0]])) # type: ignore
self.l2.weight = nn.Parameter(torch.tensor([[2.0, 3.0, 0.0]])) # type: ignore
self.relu = nn.ReLU(inplace=inplace)
self.l3 = nn.Linear(2, 1, bias=False)
self.l3.weight = nn.Parameter(torch.tensor([[1.0, 1.0]])) # type: ignore
@no_type_check
def forward(self, x1: Tensor, x2: Tensor, x3: int = 1) -> Tensor:
return self.l3(self.relu(torch.cat([self.l1(x1), x3 * self.l2(x2)], dim=1)))
class SimpleLRPModel(nn.Module):
def __init__(self, inplace) -> None:
super().__init__()
self.linear = nn.Linear(3, 3, bias=False)
self.linear.weight.data.fill_(2.0)
self.relu = torch.nn.ReLU(inplace=inplace)
self.linear2 = nn.Linear(3, 1, bias=False)
self.linear2.weight.data.fill_(3.0)
self.dropout = torch.nn.Dropout(p=0.01)
def forward(self, x):
return self.dropout(self.linear2(self.relu(self.linear(x))))
class Conv1dSeqModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.seq = nn.Sequential(nn.Conv1d(4, 2, 1), nn.ReLU(), nn.Linear(1000, 1))
def forward(self, inputs):
return self.seq(inputs)
class TextModule(nn.Module):
r"""Basic model that has inner embedding layer. This layer can be pluged
into a larger network such as `BasicEmbeddingModel` and help us to test
nested embedding layers
"""
def __init__(self, num_embeddings, embedding_dim, second_embedding=False) -> None:
super().__init__()
self.inner_embedding = nn.Embedding(num_embeddings, embedding_dim)
self.second_embedding = second_embedding
if self.second_embedding:
self.inner_embedding2 = nn.Embedding(num_embeddings, embedding_dim)
def forward(self, input=None, another_input=None):
assert input is not None, "The inputs to embedding module must be specified"
embedding = self.inner_embedding(input)
if self.second_embedding:
another_embedding = self.inner_embedding2(
input if another_input is None else another_input
)
return embedding if another_input is None else embedding + another_embedding
class BasicEmbeddingModel(nn.Module):
r"""
Implements basic model with nn.Embedding layer. This simple model
will help us to test nested InterpretableEmbedding layers
The model has the following structure:
BasicEmbeddingModel(
(embedding1): Embedding(30, 100)
(embedding2): TextModule(
(inner_embedding): Embedding(30, 100)
)
(linear1): Linear(in_features=100, out_features=256, bias=True)
(relu): ReLU()
(linear2): Linear(in_features=256, out_features=1, bias=True)
)
"""
def __init__(
self,
num_embeddings=30,
embedding_dim=100,
hidden_dim=256,
output_dim=1,
nested_second_embedding=False,
) -> None:
super().__init__()
self.embedding1 = nn.Embedding(num_embeddings, embedding_dim)
self.embedding2 = TextModule(
num_embeddings, embedding_dim, nested_second_embedding
)
self.linear1 = nn.Linear(embedding_dim, hidden_dim, bias=False)
self.linear1.weight = nn.Parameter(torch.ones(hidden_dim, embedding_dim))
self.relu = nn.ReLU()
self.linear2 = nn.Linear(hidden_dim, output_dim)
self.linear2.weight = nn.Parameter(torch.ones(output_dim, hidden_dim))
def forward(self, input1, input2, input3=None):
embedding1 = self.embedding1(input1)
embedding2 = self.embedding2(input2, input3)
embeddings = embedding1 + embedding2
return self.linear2(self.relu(self.linear1(embeddings))).sum(1)
class MultiRelu(nn.Module):
def __init__(self, inplace: bool = False) -> None:
super().__init__()
self.relu1 = nn.ReLU(inplace=inplace)
self.relu2 = nn.ReLU(inplace=inplace)
@no_type_check
def forward(self, arg1: Tensor, arg2: Tensor) -> Tuple[Tensor, Tensor]:
return (self.relu1(arg1), self.relu2(arg2))
class BasicModel_MultiLayer(nn.Module):
def __init__(self, inplace=False, multi_input_module=False) -> None:
super().__init__()
# Linear 0 is simply identity transform
self.multi_input_module = multi_input_module
self.linear0 = nn.Linear(3, 3)
self.linear0.weight = nn.Parameter(torch.eye(3))
self.linear0.bias = nn.Parameter(torch.zeros(3))
self.linear1 = nn.Linear(3, 4)
self.linear1.weight = nn.Parameter(torch.ones(4, 3))
self.linear1.bias = nn.Parameter(torch.tensor([-10.0, 1.0, 1.0, 1.0]))
self.linear1_alt = nn.Linear(3, 4)
self.linear1_alt.weight = nn.Parameter(torch.ones(4, 3))
self.linear1_alt.bias = nn.Parameter(torch.tensor([-10.0, 1.0, 1.0, 1.0]))
self.multi_relu = MultiRelu(inplace=inplace)
self.relu = nn.ReLU(inplace=inplace)
self.linear2 = nn.Linear(4, 2)
self.linear2.weight = nn.Parameter(torch.ones(2, 4))
self.linear2.bias = nn.Parameter(torch.tensor([-1.0, 1.0]))
@no_type_check
def forward(
self,
x: Tensor,
add_input: Optional[Tensor] = None,
multidim_output: bool = False,
):
input = x if add_input is None else x + add_input
lin0_out = self.linear0(input)
lin1_out = self.linear1(lin0_out)
if self.multi_input_module:
relu_out1, relu_out2 = self.multi_relu(lin1_out, self.linear1_alt(input))
relu_out = relu_out1 + relu_out2
else:
relu_out = self.relu(lin1_out)
lin2_out = self.linear2(relu_out)
if multidim_output:
stack_mid = torch.stack((lin2_out, 2 * lin2_out), dim=2)
return torch.stack((stack_mid, 4 * stack_mid), dim=3)
else:
return lin2_out
class BasicModelBoolInput(nn.Module):
def __init__(self) -> None:
super().__init__()
self.mod = BasicModel_MultiLayer()
def forward(
self,
x: Tensor,
add_input: Optional[Tensor] = None,
mult: float = 10.0,
):
assert x.dtype is torch.bool, "Input must be boolean"
return self.mod(x.float() * mult, add_input)
class BasicModel_MultiLayer_MultiInput(nn.Module):
def __init__(self) -> None:
super().__init__()
self.model = BasicModel_MultiLayer()
@no_type_check
def forward(self, x1: Tensor, x2: Tensor, x3: Tensor, scale: int):
return self.model(scale * (x1 + x2 + x3))
class BasicModel_MultiLayer_TrueMultiInput(nn.Module):
def __init__(self) -> None:
super().__init__()
self.m1 = BasicModel_MultiLayer()
self.m234 = BasicModel_MultiLayer_MultiInput()
@no_type_check
def forward(
self, x1: Tensor, x2: Tensor, x3: Tensor, x4: Optional[Tensor] = None
) -> Tensor:
a = self.m1(x1)
if x4 is None:
b = self.m234(x2, x3, x1, scale=1)
else:
b = self.m234(x2, x3, x4, scale=1)
return a + b
class BasicModel_ConvNet_One_Conv(nn.Module):
def __init__(self, inplace: bool = False) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 2, 3, 1)
self.relu1 = nn.ReLU(inplace=inplace)
self.fc1 = nn.Linear(8, 4)
self.conv1.weight = nn.Parameter(torch.ones(2, 1, 3, 3)) # type: ignore
self.conv1.bias = nn.Parameter(torch.tensor([-50.0, -75.0])) # type: ignore
self.fc1.weight = nn.Parameter( # type: ignore
torch.cat([torch.ones(4, 5), -1 * torch.ones(4, 3)], dim=1)
)
self.fc1.bias = nn.Parameter(torch.zeros(4)) # type: ignore
self.relu2 = nn.ReLU(inplace=inplace)
@no_type_check
def forward(self, x: Tensor, x2: Optional[Tensor] = None):
if x2 is not None:
x = x + x2
x = self.relu1(self.conv1(x))
x = x.view(-1, 8)
return self.relu2(self.fc1(x))
class BasicModel_ConvNetWithPaddingDilation(nn.Module):
def __init__(self, inplace: bool = False) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 2, 3, padding=3, stride=2, dilation=2)
self.relu1 = nn.ReLU(inplace=inplace)
self.fc1 = nn.Linear(16, 4)
@no_type_check
def forward(self, x: Tensor):
bsz = x.shape[0]
x = self.relu1(self.conv1(x))
x = x.reshape(bsz, 2, -1)
return self.fc1(x).reshape(bsz, -1)
class BasicModel_ConvNet(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 2, 3, 1)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(2, 4, 3, 1)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(2)
self.fc1 = nn.Linear(4, 8)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(8, 10)
self.softmax = nn.Softmax(dim=1)
self.fc1.weight = nn.Parameter(torch.ones(8, 4))
self.fc2.weight = nn.Parameter(torch.ones(10, 8))
@no_type_check
def forward(self, x: Tensor) -> Tensor:
x = self.relu1(self.conv1(x))
x = self.pool1(x)
x = self.relu2(self.conv2(x))
x = self.pool2(x)
x = x.view(-1, 4)
x = self.relu3(self.fc1(x))
x = self.fc2(x)
return self.softmax(x)
class BasicModel_ConvNet_MaxPool1d(nn.Module):
"""Same as above, but with the MaxPool2d replaced
with a MaxPool1d. This is useful because the MaxPool modules
behave differently to other modules from the perspective
of the DeepLift Attributions
"""
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv1d(1, 2, 3)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool1d(2)
self.conv2 = nn.Conv1d(2, 4, 3)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool1d(2)
self.fc1 = nn.Linear(4, 8)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(8, 10)
self.softmax = nn.Softmax(dim=1)
self.fc1.weight = nn.Parameter(torch.ones(8, 4))
self.fc2.weight = nn.Parameter(torch.ones(10, 8))
@no_type_check
def forward(self, x: Tensor) -> Tensor:
x = self.relu1(self.conv1(x))
x = self.pool1(x)
x = self.relu2(self.conv2(x))
x = self.pool2(x)
x = x.view(-1, 4)
x = self.relu3(self.fc1(x))
x = self.fc2(x)
return self.softmax(x)
class BasicModel_ConvNet_MaxPool3d(nn.Module):
"""Same as above, but with the MaxPool1d replaced
with a MaxPool3d. This is useful because the MaxPool modules
behave differently to other modules from the perspective
of the DeepLift Attributions
"""
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv3d(1, 2, 3)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool3d(2)
self.conv2 = nn.Conv3d(2, 4, 3)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool3d(2)
self.fc1 = nn.Linear(4, 8)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(8, 10)
self.softmax = nn.Softmax(dim=1)
self.fc1.weight = nn.Parameter(torch.ones(8, 4))
self.fc2.weight = nn.Parameter(torch.ones(10, 8))
def forward(self, x):
x = self.relu1(self.conv1(x))
x = self.pool1(x)
x = self.relu2(self.conv2(x))
x = self.pool2(x)
x = x.view(-1, 4)
x = self.relu3(self.fc1(x))
x = self.fc2(x)
return self.softmax(x)
|
#!/usr/bin/env python3
import copy
import random
import unittest
from typing import Callable
import numpy as np
import torch
from captum.log import patch_methods
def deep_copy_args(func: Callable):
def copy_args(*args, **kwargs):
return func(
*(copy.deepcopy(x) for x in args),
**{k: copy.deepcopy(v) for k, v in kwargs.items()},
)
return copy_args
def assertTensorAlmostEqual(test, actual, expected, delta=0.0001, mode="sum"):
assert isinstance(actual, torch.Tensor), (
"Actual parameter given for " "comparison must be a tensor."
)
if not isinstance(expected, torch.Tensor):
expected = torch.tensor(expected, dtype=actual.dtype)
assert (
actual.shape == expected.shape
), f"Expected tensor with shape: {expected.shape}. Actual shape {actual.shape}."
actual = actual.cpu()
expected = expected.cpu()
if mode == "sum":
test.assertAlmostEqual(
torch.sum(torch.abs(actual - expected)).item(), 0.0, delta=delta
)
elif mode == "max":
# if both tensors are empty, they are equal but there is no max
if actual.numel() == expected.numel() == 0:
return
if actual.size() == torch.Size([]):
test.assertAlmostEqual(
torch.max(torch.abs(actual - expected)).item(), 0.0, delta=delta
)
else:
for index, (input, ref) in enumerate(zip(actual, expected)):
almost_equal = abs(input - ref) <= delta
if hasattr(almost_equal, "__iter__"):
almost_equal = almost_equal.all()
assert (
almost_equal
), "Values at index {}, {} and {}, differ more than by {}".format(
index, input, ref, delta
)
else:
raise ValueError("Mode for assertion comparison must be one of `max` or `sum`.")
def assertTensorTuplesAlmostEqual(test, actual, expected, delta=0.0001, mode="sum"):
if isinstance(expected, tuple):
assert len(actual) == len(
expected
), f"the length of actual {len(actual)} != expected {len(expected)}"
for i in range(len(expected)):
assertTensorAlmostEqual(test, actual[i], expected[i], delta, mode)
else:
assertTensorAlmostEqual(test, actual, expected, delta, mode)
def assertAttributionComparision(test, attributions1, attributions2):
for attribution1, attribution2 in zip(attributions1, attributions2):
for attr_row1, attr_row2 in zip(attribution1, attribution2):
assertTensorAlmostEqual(test, attr_row1, attr_row2, 0.05, "max")
def assert_delta(test, delta):
delta_condition = (delta.abs() < 0.00001).all()
test.assertTrue(
delta_condition,
"The sum of attribution values {} for relu layer is not "
"nearly equal to the difference between the endpoint for "
"some samples".format(delta),
)
def set_all_random_seeds(seed: int = 1234) -> None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
class BaseTest(unittest.TestCase):
"""
This class provides a basic framework for all Captum tests by providing
a set up fixture, which sets a fixed random seed. Since many torch
initializations are random, this ensures that tests run deterministically.
"""
def setUp(self) -> None:
set_all_random_seeds(1234)
patch_methods(self)
|
#! /usr/bin/env python3
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
import os
import re
import sys
from typing import List
base_path = os.path.abspath(os.path.join(__file__, "..", "..", ".."))
# read module from src instead of installation
sys.path.insert(0, base_path)
print("base path for Captum module:", base_path)
# -- Project information -----------------------------------------------------
project = "Captum"
copyright = "2019, Facebook, Inc."
author = "The PyTorch Team"
# import captum from base_path to get the version
# but the version is no longer used
# since version is trimmed in sphinx pages to embed into docusaurus
import captum # noqa: E402
version = captum.__version__
# -- General configuration ---------------------------------------------------
# Sphinx extension modules
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx_autodoc_typehints",
"sphinx.ext.doctest",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.intersphinx",
"sphinxcontrib.katex",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames
# source_suffix = [".rst", ".md"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# Default options for autodoc directives. Applied to all autodoc directives
autodoc_default_options = {}
# Inlcude init docstrings into body of autoclass directives
autoclass_content = "both"
# Preserve signature defaults
# Prevents entire tensors from being printed, & gives callable functions
# proper names
autodoc_preserve_defaults = True
# Configuration for intersphinx: refer to the Python standard library and PyTorch
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"pytorch": ("https://pytorch.org/docs/stable", None),
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
html_static_path = [] # for now we have no static files to track
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, the reST sources are included in the HTML build as _sources/name.
# The default is True.
# Uncomment the following line after sphinx 4.5.0 release
# https://github.com/sphinx-doc/sphinx/issues/9456
# html_copy_source = False
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "captumdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "captum.tex", "Captum Documentation", "Facebook, Inc.", "manual")
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "captum", "captum Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"captum",
"Captum Documentation",
author,
"Captum",
"Model interpretability and understanding for PyTorch.",
"Miscellaneous",
)
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Docstring Improvements --------------------------------------------------
# Regex code for typing replacements.
# The "(?<![\.])" part checks to see if the string
# starts with a period, and "\b" denotes word boundaries.
# Only words that don't start with a period are replaced.
_rt = [r"(?<![\.])(\b", r"\b)"]
def autodoc_process_docstring(
app, what: str, name: str, obj, options, lines: List[str]
) -> None:
"""
Modify docstrings before creating html files.
Sphinx converts the 'Args:' and 'Returns:' sections of docstrings into
reStructuredText (rST) syntax, which can then be found via ':type' & ':rtype'.
See here for more information:
https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html
"""
for i in range(len(lines)):
# Skip unless line is an parameter doc or a return doc
if not lines[i].startswith(":type"):
continue
if ":py:data:" in lines[i]:
continue
# Ensure Any, Callable, & Iterator types are hyperlinked with intersphinx.
# The tilde '~' character hides the 'typing.' portion of the string.
lines[i] = re.sub(_rt[0] + r"Any" + _rt[1], "~typing.Any", lines[i])
lines[i] = re.sub(_rt[0] + r"Callable" + _rt[1], "~typing.Callable", lines[i])
lines[i] = re.sub(_rt[0] + r"Iterator" + _rt[1], "~typing.Iterator", lines[i])
lines[i] = re.sub(_rt[0] + r"Iterable" + _rt[1], "~typing.Iterable", lines[i])
# Ensure Tensor type is hyperlinked by interpshinx
lines[i] = re.sub(_rt[0] + r"Tensor" + _rt[1], "~torch.Tensor", lines[i])
def setup(app) -> None:
app.connect("autodoc-process-docstring", autodoc_process_docstring)
|
#!/usr/bin/env python3
import argparse
import json
import os
import nbformat
from bs4 import BeautifulSoup
from nbconvert import HTMLExporter, ScriptExporter
TEMPLATE = """const CWD = process.cwd();
const React = require('react');
const Tutorial = require(`${{CWD}}/core/Tutorial.js`);
class TutorialPage extends React.Component {{
render() {{
const {{config: siteConfig}} = this.props;
const {{baseUrl}} = siteConfig;
return <Tutorial baseUrl={{baseUrl}} tutorialID="{}"/>;
}}
}}
module.exports = TutorialPage;
"""
JS_SCRIPTS = """
<script
src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js">
</script>
<script
src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js">
</script>
""" # noqa: E501
def gen_tutorials(repo_dir: str) -> None:
"""Generate HTML tutorials for captum Docusaurus site from Jupyter notebooks.
Also create ipynb and py versions of tutorial in Docusaurus site for
download.
"""
with open(os.path.join(repo_dir, "website", "tutorials.json"), "r") as infile:
tutorial_config = json.loads(infile.read())
tutorial_ids = []
for category_items in tutorial_config.values():
for item in category_items:
if "id" in item:
tutorial_ids.append(item["id"])
else:
for sub_item in item["children"]:
tutorial_ids.append(sub_item["id"])
for tid in tutorial_ids:
print("Generating {} tutorial".format(tid))
# convert notebook to HTML
ipynb_in_path = os.path.join(repo_dir, "tutorials", "{}.ipynb".format(tid))
with open(ipynb_in_path, "r") as infile:
nb_str = infile.read()
nb = nbformat.reads(nb_str, nbformat.NO_CONVERT)
# displayname is absent from notebook metadata
nb["metadata"]["kernelspec"]["display_name"] = "python3"
exporter = HTMLExporter()
html, meta = exporter.from_notebook_node(nb)
# pull out html div for notebook
soup = BeautifulSoup(html, "html.parser")
nb_meat = soup.find("div", {"id": "notebook-container"})
del nb_meat.attrs["id"]
nb_meat.attrs["class"] = ["notebook"]
html_out = JS_SCRIPTS + str(nb_meat)
# generate html file
html_out_path = os.path.join(
repo_dir, "website", "_tutorials", "{}.html".format(tid)
)
with open(html_out_path, "w") as html_outfile:
html_outfile.write(html_out)
# generate JS file
script = TEMPLATE.format(tid)
js_out_path = os.path.join(
repo_dir, "website", "pages", "tutorials", "{}.js".format(tid)
)
with open(js_out_path, "w") as js_outfile:
js_outfile.write(script)
# output tutorial in both ipynb & py form
ipynb_out_path = os.path.join(
repo_dir, "website", "static", "files", "{}.ipynb".format(tid)
)
with open(ipynb_out_path, "w") as ipynb_outfile:
ipynb_outfile.write(nb_str)
exporter = ScriptExporter()
script, meta = exporter.from_notebook_node(nb)
py_out_path = os.path.join(
repo_dir, "website", "static", "files", "{}.py".format(tid)
)
with open(py_out_path, "w") as py_outfile:
py_outfile.write(script)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate JS, HTML, ipynb, and py files for tutorials."
)
parser.add_argument(
"-w", "--repo_dir", metavar="path", required=True, help="captum repo directory."
)
args = parser.parse_args()
gen_tutorials(args.repo_dir)
|
#!/usr/bin/env python3
import argparse
import json
from bs4 import BeautifulSoup
BASE_URL = "/"
def updateVersionHTML(base_path, base_url=BASE_URL):
with open(base_path + "/captum-master/website/_versions.json", "rb") as infile:
versions = json.loads(infile.read())
with open(base_path + "/new-site/versions.html", "rb") as infile:
html = infile.read()
versions.append("latest")
def prepend_url(a_tag, base_url, version):
href = a_tag.attrs["href"]
if href.startswith("https://") or href.startswith("http://"):
return href
else:
return "{base_url}versions/{version}{original_url}".format(
base_url=base_url, version=version, original_url=href
)
for v in versions:
soup = BeautifulSoup(html, "html.parser")
# title
title_link = soup.find("header").find("a")
title_link.attrs["href"] = prepend_url(title_link, base_url, v)
# nav
nav_links = soup.find("nav").findAll("a")
for link in nav_links:
link.attrs["href"] = prepend_url(link, base_url, v)
# version link
t = soup.find("h2", {"class": "headerTitleWithLogo"}).find_next("a")
t.string = v
t.attrs["href"] = prepend_url(t, base_url, v)
# output files
with open(
base_path + "/new-site/versions/{}/versions.html".format(v), "w"
) as outfile:
outfile.write(str(soup))
with open(
base_path + "/new-site/versions/{}/en/versions.html".format(v), "w"
) as outfile:
outfile.write(str(soup))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=(
"Fix links in version.html files for Docusaurus site."
"This is used to ensure that the versions.js for older "
"versions in versions subdirectory are up-to-date and "
"will have a way to navigate back to newer versions."
)
)
parser.add_argument(
"-p",
"--base_path",
metavar="path",
required=True,
help="Input directory for rolling out new version of site.",
)
args = parser.parse_args()
updateVersionHTML(args.base_path)
|
#!/usr/bin/env python3
import argparse
import os
from bs4 import BeautifulSoup
# no need to import css from built path
# coz docusaurus merge all css files within static folder automatically
# https://v1.docusaurus.io/docs/en/api-pages#styles
base_scripts = """
<script type="text/javascript" id="documentation_options" data-url_root="./"
src="/_sphinx/documentation_options.js"></script>
<script type="text/javascript" src="/_sphinx/jquery.js"></script>
<script type="text/javascript" src="/_sphinx/underscore.js"></script>
<script type="text/javascript" src="/_sphinx/doctools.js"></script>
<script type="text/javascript" src="/_sphinx/language_data.js"></script>
<script type="text/javascript" src="/_sphinx/searchtools.js"></script>
""" # noqa: E501
search_js_scripts = """
<script type="text/javascript">
jQuery(function() { Search.loadIndex("/_sphinx/searchindex.js"); });
</script>
<script type="text/javascript" id="searchindexloader"></script>
"""
katex_scripts = """
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/contrib/auto-render.min.js"></script>
<script src="/_sphinx/katex_autorenderer.js"></script>
<link rel="stylesheet" type="text/css" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css" />
""" # noqa: E501
def parse_sphinx(input_dir, output_dir):
for cur, _, files in os.walk(input_dir):
for fname in files:
if fname.endswith(".html"):
with open(os.path.join(cur, fname), "r") as f:
soup = BeautifulSoup(f.read(), "html.parser")
doc = soup.find("div", {"class": "document"})
wrapped_doc = doc.wrap(
soup.new_tag("div", **{"class": "sphinx wrapper"})
)
# add scripts that sphinx pages need
if fname == "search.html":
out = (
base_scripts
+ search_js_scripts
+ katex_scripts
+ str(wrapped_doc)
)
else:
out = base_scripts + katex_scripts + str(wrapped_doc)
output_path = os.path.join(output_dir, os.path.relpath(cur, input_dir))
os.makedirs(output_path, exist_ok=True)
with open(os.path.join(output_path, fname), "w") as fout:
fout.write(out)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Strip HTML body from Sphinx docs.")
parser.add_argument(
"-i",
"--input_dir",
metavar="path",
required=True,
help="Input directory for Sphinx HTML.",
)
parser.add_argument(
"-o",
"--output_dir",
metavar="path",
required=True,
help="Output directory in website.",
)
args = parser.parse_args()
parse_sphinx(args.input_dir, args.output_dir)
|
#!/usr/bin/env python3
import captum.attr as attr # noqa
import captum.concept as concept # noqa
import captum.influence as influence # noqa
import captum.log as log # noqa
import captum.metrics as metrics # noqa
import captum.robust as robust # noqa
__version__ = "0.6.0"
|
#!/usr/bin/env python3
from captum.metrics._core.infidelity import ( # noqa
infidelity,
infidelity_perturb_func_decorator,
)
from captum.metrics._core.sensitivity import sensitivity_max # noqa
|
#!/usr/bin/env python3
import warnings
from typing import Callable, Tuple
import torch
from torch import Tensor
def _divide_and_aggregate_metrics(
inputs: Tuple[Tensor, ...],
n_perturb_samples: int,
metric_func: Callable,
agg_func: Callable = torch.add,
max_examples_per_batch: int = None,
) -> Tensor:
r"""
This function is used to slice large number of samples `n_perturb_samples` per
input example into smaller pieces, computing the metrics for each small piece and
aggregating the results across all `n_perturb_samples` per example. The function
returns overall aggregated metric per sample. The size of each slice is determined
by the `max_examples_per_batch` input parameter.
Args:
inputs (tuple): The original inputs formatted in a tuple that are passed to
the metrics function and that are used to compute the
attributions for.
n_perturb_samples (int): The number of samples per example that are used for
perturbation purposes for example.
metric_func (Callable): This function takes the number of samples per
input batch and returns an overall metric for each example.
agg_func (Callable, optional): This function is used to aggregate the
metrics across multiple sub-batches and that are
generated by `metric_func`.
max_examples_per_batch (int, optional): The maximum number of allowed examples
per batch.
Returns:
metric (Tensor): A metric score estimated by `metric_func` per
input example.
"""
bsz = inputs[0].size(0)
if max_examples_per_batch is not None and (
max_examples_per_batch // bsz < 1
or max_examples_per_batch // bsz > n_perturb_samples
):
warnings.warn(
(
"`max_examples_per_batch` must be at least equal to the"
" input batch size and at most to "
"`input batch size` * `n_perturb_samples`."
"`max_examples_per_batch` is: {} and the input batch size is: {}."
"This is necessary because we require that each sub-batch that is used "
"to compute the metrics, contains at least an instance of "
"the original example and doesn't exceed the number of "
"expanded n_perturb_samples."
).format(max_examples_per_batch, bsz)
)
max_inps_per_batch = (
n_perturb_samples
if max_examples_per_batch is None
else min(max(max_examples_per_batch // bsz, 1), n_perturb_samples)
)
current_n_steps = max_inps_per_batch
metrics_sum = metric_func(max_inps_per_batch)
while current_n_steps < n_perturb_samples:
current_n_steps += max_inps_per_batch
metric = metric_func(
max_inps_per_batch
if current_n_steps <= n_perturb_samples
else max_inps_per_batch - (current_n_steps - n_perturb_samples)
)
current_n_steps = min(current_n_steps, n_perturb_samples)
metrics_sum = agg_func(metrics_sum, metric)
return metrics_sum
|
#!/usr/bin/env python3
|
#!/usr/bin/env python3
from typing import Any, Callable, cast, Tuple, Union
import torch
from captum._utils.common import (
_expand_additional_forward_args,
_expand_target,
_format_additional_forward_args,
_format_baseline,
_format_tensor_into_tuples,
_run_forward,
ExpansionTypes,
safe_div,
)
from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric
from captum.log import log_usage
from captum.metrics._utils.batching import _divide_and_aggregate_metrics
from torch import Tensor
def infidelity_perturb_func_decorator(multipy_by_inputs: bool = True) -> Callable:
r"""An auxiliary, decorator function that helps with computing
perturbations given perturbed inputs. It can be useful for cases
when `pertub_func` returns only perturbed inputs and we
internally compute the perturbations as
(input - perturbed_input) / (input - baseline) if
multipy_by_inputs is set to True and
(input - perturbed_input) otherwise.
If users decorate their `pertub_func` with
`@infidelity_perturb_func_decorator` function then their `pertub_func`
needs to only return perturbed inputs.
Args:
multipy_by_inputs (bool): Indicates whether model inputs'
multiplier is factored in the computation of
attribution scores.
"""
def sub_infidelity_perturb_func_decorator(pertub_func: Callable) -> Callable:
r"""
Args:
pertub_func(Callable): Input perturbation function that takes inputs
and optionally baselines and returns perturbed inputs
Returns:
default_perturb_func(Callable): Internal default perturbation
function that computes the perturbations internally and returns
perturbations and perturbed inputs.
Examples::
>>> @infidelity_perturb_func_decorator(True)
>>> def perturb_fn(inputs):
>>> noise = torch.tensor(np.random.normal(0, 0.003,
>>> inputs.shape)).float()
>>> return inputs - noise
>>> # Computes infidelity score using `perturb_fn`
>>> infidelity = infidelity(model, perturb_fn, input, ...)
"""
def default_perturb_func(
inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None
):
r""" """
inputs_perturbed = (
pertub_func(inputs, baselines)
if baselines is not None
else pertub_func(inputs)
)
inputs_perturbed = _format_tensor_into_tuples(inputs_perturbed)
inputs = _format_tensor_into_tuples(inputs)
baselines = _format_baseline(baselines, inputs)
if baselines is None:
perturbations = tuple(
safe_div(
input - input_perturbed,
input,
default_denom=1.0,
)
if multipy_by_inputs
else input - input_perturbed
for input, input_perturbed in zip(inputs, inputs_perturbed)
)
else:
perturbations = tuple(
safe_div(
input - input_perturbed,
input - baseline,
default_denom=1.0,
)
if multipy_by_inputs
else input - input_perturbed
for input, input_perturbed, baseline in zip(
inputs, inputs_perturbed, baselines
)
)
return perturbations, inputs_perturbed
return default_perturb_func
return sub_infidelity_perturb_func_decorator
@log_usage()
def infidelity(
forward_func: Callable,
perturb_func: Callable,
inputs: TensorOrTupleOfTensorsGeneric,
attributions: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
additional_forward_args: Any = None,
target: TargetType = None,
n_perturb_samples: int = 10,
max_examples_per_batch: int = None,
normalize: bool = False,
) -> Tensor:
r"""
Explanation infidelity represents the expected mean-squared error
between the explanation multiplied by a meaningful input perturbation
and the differences between the predictor function at its input
and perturbed input.
More details about the measure can be found in the following paper:
https://arxiv.org/abs/1901.09392
It is derived from the completeness property of well-known attribution
algorithms and is a computationally more efficient and generalized
notion of Sensitivy-n. The latter measures correlations between the sum
of the attributions and the differences of the predictor function at
its input and fixed baseline. More details about the Sensitivity-n can
be found here:
https://arxiv.org/abs/1711.06104
The users can perturb the inputs any desired way by providing any
perturbation function that takes the inputs (and optionally baselines)
and returns perturbed inputs or perturbed inputs and corresponding
perturbations.
This specific implementation is primarily tested for attribution-based
explanation methods but the idea can be expanded to use for non
attribution-based interpretability methods as well.
Args:
forward_func (Callable):
The forward function of the model or any modification of it.
perturb_func (Callable):
The perturbation function of model inputs. This function takes
model inputs and optionally baselines as input arguments and returns
either a tuple of perturbations and perturbed inputs or just
perturbed inputs. For example:
>>> def my_perturb_func(inputs):
>>> <MY-LOGIC-HERE>
>>> return perturbations, perturbed_inputs
If we want to only return perturbed inputs and compute
perturbations internally then we can wrap perturb_func with
`infidelity_perturb_func_decorator` decorator such as:
>>> from captum.metrics import infidelity_perturb_func_decorator
>>> @infidelity_perturb_func_decorator(<multipy_by_inputs flag>)
>>> def my_perturb_func(inputs):
>>> <MY-LOGIC-HERE>
>>> return perturbed_inputs
In case `multipy_by_inputs` is False we compute perturbations by
`input - perturbed_input` difference and in case `multipy_by_inputs`
flag is True we compute it by dividing
(input - perturbed_input) by (input - baselines).
The user needs to only return perturbed inputs in `perturb_func`
as described above.
`infidelity_perturb_func_decorator` needs to be used with
`multipy_by_inputs` flag set to False in case infidelity
score is being computed for attribution maps that are local aka
that do not factor in inputs in the final attribution score.
Such attribution algorithms include Saliency, GradCam, Guided Backprop,
or Integrated Gradients and DeepLift attribution scores that are already
computed with `multipy_by_inputs=False` flag.
If there are more than one inputs passed to infidelity function those
will be passed to `perturb_func` as tuples in the same order as they
are passed to infidelity function.
If inputs
- is a single tensor, the function needs to return a tuple
of perturbations and perturbed input such as:
perturb, perturbed_input and only perturbed_input in case
`infidelity_perturb_func_decorator` is used.
- is a tuple of tensors, corresponding perturbations and perturbed
inputs must be computed and returned as tuples in the
following format:
(perturb1, perturb2, ... perturbN), (perturbed_input1,
perturbed_input2, ... perturbed_inputN)
Similar to previous case here as well we need to return only
perturbed inputs in case `infidelity_perturb_func_decorator`
decorates out `perturb_func`.
It is important to note that for performance reasons `perturb_func`
isn't called for each example individually but on a batch of
input examples that are repeated `max_examples_per_batch / batch_size`
times within the batch.
inputs (Tensor or tuple[Tensor, ...]): Input for which
attributions are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define reference values which sometimes represent ablated
values and are used to compare with the actual inputs to compute
importance scores in attribution algorithms. They can be represented
as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
Default: None
attributions (Tensor or tuple[Tensor, ...]):
Attribution scores computed based on an attribution algorithm.
This attribution scores can be computed using the implementations
provided in the `captum.attr` package. Some of those attribution
approaches are so called global methods, which means that
they factor in model inputs' multiplier, as described in:
https://arxiv.org/abs/1711.06104
Many global attribution algorithms can be used in local modes,
meaning that the inputs multiplier isn't factored in the
attribution scores.
This can be done duing the definition of the attribution algorithm
by passing `multipy_by_inputs=False` flag.
For example in case of Integrated Gradients (IG) we can obtain
local attribution scores if we define the constructor of IG as:
ig = IntegratedGradients(multipy_by_inputs=False)
Some attribution algorithms are inherently local.
Examples of inherently local attribution methods include:
Saliency, Guided GradCam, Guided Backprop and Deconvolution.
For local attributions we can use real-valued perturbations
whereas for global attributions that perturbation is binary.
https://arxiv.org/abs/1901.09392
If we want to compute the infidelity of global attributions we
can use a binary perturbation matrix that will allow us to select
a subset of features from `inputs` or `inputs - baselines` space.
This will allow us to approximate sensitivity-n for a global
attribution algorithm.
`infidelity_perturb_func_decorator` function decorator is a helper
function that computes perturbations under the hood if perturbed
inputs are provided.
For more details about how to use `infidelity_perturb_func_decorator`,
please, read the documentation about `perturb_func`
Attributions have the same shape and dimensionality as the inputs.
If inputs is a single tensor then the attributions is a single
tensor as well. If inputs is provided as a tuple of tensors
then attributions will be tuples of tensors as well.
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
forward_func in order, following the arguments in inputs.
Note that the perturbations are not computed with respect
to these arguments. This means that these arguments aren't
being passed to `perturb_func` as an input argument.
Default: None
target (int, tuple, Tensor, or list, optional): Indices for selecting
predictions from output(for classification cases,
this is usually the target class).
If the network returns a scalar value per example, no target
index is necessary.
For general 2D outputs, targets can be either:
- A single integer or a tensor containing a single
integer, which is applied to all input examples
- A list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
n_perturb_samples (int, optional): The number of times input tensors
are perturbed. Each input example in the inputs tensor is expanded
`n_perturb_samples`
times before calling `perturb_func` function.
Default: 10
max_examples_per_batch (int, optional): The number of maximum input
examples that are processed together. In case the number of
examples (`input batch size * n_perturb_samples`) exceeds
`max_examples_per_batch`, they will be sliced
into batches of `max_examples_per_batch` examples and processed
in a sequential order. If `max_examples_per_batch` is None, all
examples are processed together. `max_examples_per_batch` should
at least be equal `input batch size` and at most
`input batch size * n_perturb_samples`.
Default: None
normalize (bool, optional): Normalize the dot product of the input
perturbation and the attribution so the infidelity value is invariant
to constant scaling of the attribution values. The normalization factor
beta is defined as the ratio of two mean values:
.. math::
\beta = \frac{
\mathbb{E}_{I \sim \mu_I} [ I^T \Phi(f, x) (f(x) - f(x - I)) ]
}{
\mathbb{E}_{I \sim \mu_I} [ (I^T \Phi(f, x))^2 ]
}
Please refer the original paper for the meaning of the symbols. Same
normalization can be found in the paper's official implementation
https://github.com/chihkuanyeh/saliency_evaluation
Default: False
Returns:
infidelities (Tensor): A tensor of scalar infidelity scores per
input example. The first dimension is equal to the
number of examples in the input batch and the second
dimension is one.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> saliency = Saliency(net)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes saliency maps for class 3.
>>> attribution = saliency.attribute(input, target=3)
>>> # define a perturbation function for the input
>>> def perturb_fn(inputs):
>>> noise = torch.tensor(np.random.normal(0, 0.003, inputs.shape)).float()
>>> return noise, inputs - noise
>>> # Computes infidelity score for saliency maps
>>> infid = infidelity(net, perturb_fn, input, attribution)
"""
def _generate_perturbations(
current_n_perturb_samples: int,
) -> Tuple[TensorOrTupleOfTensorsGeneric, TensorOrTupleOfTensorsGeneric]:
r"""
The perturbations are generated for each example
`current_n_perturb_samples` times.
For performance reasons we are not calling `perturb_func` on each example but
on a batch that contains `current_n_perturb_samples`
repeated instances per example.
"""
def call_perturb_func():
r""" """
baselines_pert = None
inputs_pert: Union[Tensor, Tuple[Tensor, ...]]
if len(inputs_expanded) == 1:
inputs_pert = inputs_expanded[0]
if baselines_expanded is not None:
baselines_pert = cast(Tuple, baselines_expanded)[0]
else:
inputs_pert = inputs_expanded
baselines_pert = baselines_expanded
return (
perturb_func(inputs_pert, baselines_pert)
if baselines_pert is not None
else perturb_func(inputs_pert)
)
inputs_expanded = tuple(
torch.repeat_interleave(input, current_n_perturb_samples, dim=0)
for input in inputs
)
baselines_expanded = baselines
if baselines is not None:
baselines_expanded = tuple(
baseline.repeat_interleave(current_n_perturb_samples, dim=0)
if isinstance(baseline, torch.Tensor)
and baseline.shape[0] == input.shape[0]
and baseline.shape[0] > 1
else baseline
for input, baseline in zip(inputs, cast(Tuple, baselines))
)
return call_perturb_func()
def _validate_inputs_and_perturbations(
inputs: Tuple[Tensor, ...],
inputs_perturbed: Tuple[Tensor, ...],
perturbations: Tuple[Tensor, ...],
) -> None:
# asserts the sizes of the perturbations and inputs
assert len(perturbations) == len(inputs), (
"""The number of perturbed
inputs and corresponding perturbations must have the same number of
elements. Found number of inputs is: {} and perturbations:
{}"""
).format(len(perturbations), len(inputs))
# asserts the shapes of the perturbations and perturbed inputs
for perturb, input_perturbed in zip(perturbations, inputs_perturbed):
assert perturb[0].shape == input_perturbed[0].shape, (
"""Perturbed input
and corresponding perturbation must have the same shape and
dimensionality. Found perturbation shape is: {} and the input shape
is: {}"""
).format(perturb[0].shape, input_perturbed[0].shape)
def _next_infidelity_tensors(
current_n_perturb_samples: int,
) -> Union[Tuple[Tensor], Tuple[Tensor, Tensor, Tensor]]:
perturbations, inputs_perturbed = _generate_perturbations(
current_n_perturb_samples
)
perturbations = _format_tensor_into_tuples(perturbations)
inputs_perturbed = _format_tensor_into_tuples(inputs_perturbed)
_validate_inputs_and_perturbations(
cast(Tuple[Tensor, ...], inputs),
cast(Tuple[Tensor, ...], inputs_perturbed),
cast(Tuple[Tensor, ...], perturbations),
)
targets_expanded = _expand_target(
target,
current_n_perturb_samples,
expansion_type=ExpansionTypes.repeat_interleave,
)
additional_forward_args_expanded = _expand_additional_forward_args(
additional_forward_args,
current_n_perturb_samples,
expansion_type=ExpansionTypes.repeat_interleave,
)
inputs_perturbed_fwd = _run_forward(
forward_func,
inputs_perturbed,
targets_expanded,
additional_forward_args_expanded,
)
inputs_fwd = _run_forward(forward_func, inputs, target, additional_forward_args)
inputs_fwd = torch.repeat_interleave(
inputs_fwd, current_n_perturb_samples, dim=0
)
perturbed_fwd_diffs = inputs_fwd - inputs_perturbed_fwd
attributions_expanded = tuple(
torch.repeat_interleave(attribution, current_n_perturb_samples, dim=0)
for attribution in attributions
)
attributions_times_perturb = tuple(
(attribution_expanded * perturbation).view(attribution_expanded.size(0), -1)
for attribution_expanded, perturbation in zip(
attributions_expanded, perturbations
)
)
attr_times_perturb_sums = sum(
torch.sum(attribution_times_perturb, dim=1)
for attribution_times_perturb in attributions_times_perturb
)
attr_times_perturb_sums = cast(Tensor, attr_times_perturb_sums)
# reshape as Tensor(bsz, current_n_perturb_samples)
attr_times_perturb_sums = attr_times_perturb_sums.view(bsz, -1)
perturbed_fwd_diffs = perturbed_fwd_diffs.view(bsz, -1)
if normalize:
# in order to normalize, we have to aggregate the following tensors
# to calculate MSE in its polynomial expansion:
# (a-b)^2 = a^2 - 2ab + b^2
return (
attr_times_perturb_sums.pow(2).sum(-1),
(attr_times_perturb_sums * perturbed_fwd_diffs).sum(-1),
perturbed_fwd_diffs.pow(2).sum(-1),
)
else:
# returns (a-b)^2 if no need to normalize
return ((attr_times_perturb_sums - perturbed_fwd_diffs).pow(2).sum(-1),)
def _sum_infidelity_tensors(agg_tensors, tensors):
return tuple(agg_t + t for agg_t, t in zip(agg_tensors, tensors))
# perform argument formattings
inputs = _format_tensor_into_tuples(inputs) # type: ignore
if baselines is not None:
baselines = _format_baseline(baselines, cast(Tuple[Tensor, ...], inputs))
additional_forward_args = _format_additional_forward_args(additional_forward_args)
attributions = _format_tensor_into_tuples(attributions) # type: ignore
# Make sure that inputs and corresponding attributions have matching sizes.
assert len(inputs) == len(attributions), (
"""The number of tensors in the inputs and
attributions must match. Found number of tensors in the inputs is: {} and in the
attributions: {}"""
).format(len(inputs), len(attributions))
for inp, attr in zip(inputs, attributions):
assert inp.shape == attr.shape, (
"""Inputs and attributions must have
matching shapes. One of the input tensor's shape is {} and the
attribution tensor's shape is: {}"""
).format(inp.shape, attr.shape)
bsz = inputs[0].size(0)
with torch.no_grad():
# if not normalize, directly return aggrgated MSE ((a-b)^2,)
# else return aggregated MSE's polynomial expansion tensors (a^2, ab, b^2)
agg_tensors = _divide_and_aggregate_metrics(
cast(Tuple[Tensor, ...], inputs),
n_perturb_samples,
_next_infidelity_tensors,
agg_func=_sum_infidelity_tensors,
max_examples_per_batch=max_examples_per_batch,
)
if normalize:
beta_num = agg_tensors[1]
beta_denorm = agg_tensors[0]
beta = safe_div(beta_num, beta_denorm)
infidelity_values = (
beta**2 * agg_tensors[0] - 2 * beta * agg_tensors[1] + agg_tensors[2]
)
else:
infidelity_values = agg_tensors[0]
infidelity_values /= n_perturb_samples
return infidelity_values
|
#!/usr/bin/env python3
from copy import deepcopy
from inspect import signature
from typing import Any, Callable, cast, Tuple, Union
import torch
from captum._utils.common import (
_expand_and_update_additional_forward_args,
_expand_and_update_baselines,
_expand_and_update_target,
_format_baseline,
_format_tensor_into_tuples,
)
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.log import log_usage
from captum.metrics._utils.batching import _divide_and_aggregate_metrics
from torch import Tensor
def default_perturb_func(
inputs: TensorOrTupleOfTensorsGeneric, perturb_radius: float = 0.02
) -> Tuple[Tensor, ...]:
r"""A default function for generating perturbations of `inputs`
within perturbation radius of `perturb_radius`.
This function samples uniformly random from the L_Infinity ball
with `perturb_radius` radius.
The users can override this function if they prefer to use a
different perturbation function.
Args:
inputs (Tensor or tuple[Tensor, ...]): The input tensors that we'd
like to perturb by adding a random noise sampled uniformly
random from an L_infinity ball with a radius `perturb_radius`.
radius (float): A radius used for sampling from
an L_infinity ball.
Returns:
perturbed_input (tuple[Tensor, ...]): A list of perturbed inputs that
are created by adding noise sampled uniformly random
from L_infiniy ball with a radius `perturb_radius` to the
original inputs.
"""
inputs = _format_tensor_into_tuples(inputs)
perturbed_input = tuple(
input
+ torch.FloatTensor(input.size()) # type: ignore
.uniform_(-perturb_radius, perturb_radius)
.to(input.device)
for input in inputs
)
return perturbed_input
@log_usage()
def sensitivity_max(
explanation_func: Callable,
inputs: TensorOrTupleOfTensorsGeneric,
perturb_func: Callable = default_perturb_func,
perturb_radius: float = 0.02,
n_perturb_samples: int = 10,
norm_ord: str = "fro",
max_examples_per_batch: int = None,
**kwargs: Any,
) -> Tensor:
r"""
Explanation sensitivity measures the extent of explanation change when
the input is slightly perturbed. It has been shown that the models that
have high explanation sensitivity are prone to adversarial attacks:
`Interpretation of Neural Networks is Fragile`
https://www.aaai.org/ojs/index.php/AAAI/article/view/4252
`sensitivity_max` metric measures maximum sensitivity of an explanation
using Monte Carlo sampling-based approximation. By default in order to
do so it samples multiple data points from a sub-space of an L-Infinity
ball that has a `perturb_radius` radius using `default_perturb_func`
default perturbation function. In a general case users can
use any L_p ball or any other custom sampling technique that they
prefer by providing a custom `perturb_func`.
Note that max sensitivity is similar to Lipschitz Continuity metric
however it is more robust and easier to estimate.
Since the explanation, for instance an attribution function,
may not always be continuous, can lead to unbounded
Lipschitz continuity. Therefore the latter isn't always appropriate.
More about the Lipschitz Continuity Metric can also be found here
`On the Robustness of Interpretability Methods`
https://arxiv.org/abs/1806.08049
and
`Towards Robust Interpretability with Self-Explaining Neural Networks`
https://papers.nips.cc/paper\
8003-towards-robust-interpretability-
with-self-explaining-neural-networks.pdf
More details about sensitivity max can be found here:
`On the (In)fidelity and Sensitivity of Explanations`
https://arxiv.org/abs/1901.09392
Args:
explanation_func (Callable):
This function can be the `attribute` method of an
attribution algorithm or any other explanation method
that returns the explanations.
inputs (Tensor or tuple[Tensor, ...]): Input for which
explanations are computed. If `explanation_func` takes a
single tensor as input, a single input tensor should
be provided.
If `explanation_func` takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
perturb_func (Callable):
The perturbation function of model inputs. This function takes
model inputs and optionally `perturb_radius` if
the function takes more than one argument and returns
perturbed inputs.
If there are more than one inputs passed to sensitivity function those
will be passed to `perturb_func` as tuples in the same order as they
are passed to sensitivity function.
It is important to note that for performance reasons `perturb_func`
isn't called for each example individually but on a batch of
input examples that are repeated `max_examples_per_batch / batch_size`
times within the batch.
Default: default_perturb_func
perturb_radius (float, optional): The epsilon radius used for sampling.
In the `default_perturb_func` it is used as the radius of
the L-Infinity ball. In a general case it can serve as a radius of
any L_p norm.
This argument is passed to `perturb_func` if it takes more than
one argument.
Default: 0.02
n_perturb_samples (int, optional): The number of times input tensors
are perturbed. Each input example in the inputs tensor is
expanded `n_perturb_samples` times before calling
`perturb_func` function.
Default: 10
norm_ord (int, float, or str, optional): The type of norm that is used to
compute the norm of the sensitivity matrix which is defined as the
difference between the explanation function at its input and perturbed
input. Acceptable values are either a string of 'fro' or 'nuc', or a
number in the range of [-inf, inf] (including float("-inf") &
float("inf")).
Default: 'fro'
max_examples_per_batch (int, optional): The number of maximum input
examples that are processed together. In case the number of
examples (`input batch size * n_perturb_samples`) exceeds
`max_examples_per_batch`, they will be sliced
into batches of `max_examples_per_batch` examples and processed
in a sequential order. If `max_examples_per_batch` is None, all
examples are processed together. `max_examples_per_batch` should
at least be equal `input batch size` and at most
`input batch size * n_perturb_samples`.
Default: None
**kwargs (Any, optional): Contains a list of arguments that are passed
to `explanation_func` explanation function which in some cases
could be the `attribute` function of an attribution algorithm.
Any additional arguments that need be passed to the explanation
function should be included here.
For instance, such arguments include:
`additional_forward_args`, `baselines` and `target`.
Returns:
sensitivities (Tensor): A tensor of scalar sensitivity scores per
input example. The first dimension is equal to the
number of examples in the input batch and the second
dimension is one. Returned sensitivities are normalized by
the magnitudes of the input explanations.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> saliency = Saliency(net)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes sensitivity score for saliency maps of class 3
>>> sens = sensitivity_max(saliency.attribute, input, target = 3)
"""
def _generate_perturbations(
current_n_perturb_samples: int,
) -> TensorOrTupleOfTensorsGeneric:
r"""
The perturbations are generated for each example
`current_n_perturb_samples` times.
For perfomance reasons we are not calling `perturb_func` on each example but
on a batch that contains `current_n_perturb_samples` repeated instances
per example.
"""
inputs_expanded: Union[Tensor, Tuple[Tensor, ...]] = tuple(
torch.repeat_interleave(input, current_n_perturb_samples, dim=0)
for input in inputs
)
if len(inputs_expanded) == 1:
inputs_expanded = inputs_expanded[0]
return (
perturb_func(inputs_expanded, perturb_radius)
if len(signature(perturb_func).parameters) > 1
else perturb_func(inputs_expanded)
)
def max_values(input_tnsr: Tensor) -> Tensor:
return torch.max(input_tnsr, dim=1).values # type: ignore
kwarg_expanded_for = None
kwargs_copy: Any = None
def _next_sensitivity_max(current_n_perturb_samples: int) -> Tensor:
inputs_perturbed = _generate_perturbations(current_n_perturb_samples)
# copy kwargs and update some of the arguments that need to be expanded
nonlocal kwarg_expanded_for
nonlocal kwargs_copy
if (
kwarg_expanded_for is None
or kwarg_expanded_for != current_n_perturb_samples
):
kwarg_expanded_for = current_n_perturb_samples
kwargs_copy = deepcopy(kwargs)
_expand_and_update_additional_forward_args(
current_n_perturb_samples, kwargs_copy
)
_expand_and_update_target(current_n_perturb_samples, kwargs_copy)
if "baselines" in kwargs:
baselines = kwargs["baselines"]
baselines = _format_baseline(
baselines, cast(Tuple[Tensor, ...], inputs)
)
if (
isinstance(baselines[0], Tensor)
and baselines[0].shape == inputs[0].shape
):
_expand_and_update_baselines(
cast(Tuple[Tensor, ...], inputs),
current_n_perturb_samples,
kwargs_copy,
)
expl_perturbed_inputs = explanation_func(inputs_perturbed, **kwargs_copy)
# tuplize `expl_perturbed_inputs` in case it is not
expl_perturbed_inputs = _format_tensor_into_tuples(expl_perturbed_inputs)
expl_inputs_expanded = tuple(
expl_input.repeat_interleave(current_n_perturb_samples, dim=0)
for expl_input in expl_inputs
)
sensitivities = torch.cat(
[
(expl_input - expl_perturbed).view(expl_perturbed.size(0), -1)
for expl_perturbed, expl_input in zip(
expl_perturbed_inputs, expl_inputs_expanded
)
],
dim=1,
)
# compute the norm of original input explanations
expl_inputs_norm_expanded = torch.norm(
torch.cat(
[expl_input.view(expl_input.size(0), -1) for expl_input in expl_inputs],
dim=1,
),
p=norm_ord,
dim=1,
keepdim=True,
).repeat_interleave(current_n_perturb_samples, dim=0)
expl_inputs_norm_expanded = torch.where(
expl_inputs_norm_expanded == 0.0,
torch.tensor(
1.0,
device=expl_inputs_norm_expanded.device,
dtype=expl_inputs_norm_expanded.dtype,
),
expl_inputs_norm_expanded,
)
# compute the norm for each input noisy example
sensitivities_norm = (
torch.norm(sensitivities, p=norm_ord, dim=1, keepdim=True)
/ expl_inputs_norm_expanded
)
return max_values(sensitivities_norm.view(bsz, -1))
inputs = _format_tensor_into_tuples(inputs) # type: ignore
bsz = inputs[0].size(0)
with torch.no_grad():
expl_inputs = explanation_func(inputs, **kwargs)
metrics_max = _divide_and_aggregate_metrics(
cast(Tuple[Tensor, ...], inputs),
n_perturb_samples,
_next_sensitivity_max,
max_examples_per_batch=max_examples_per_batch,
agg_func=torch.max,
)
return metrics_max
|
#!/usr/bin/env python3
import threading
import typing
import warnings
from collections import defaultdict
from typing import Any, Callable, cast, Dict, List, Optional, Sequence, Tuple, Union
import torch
from captum._utils.common import (
_reduce_list,
_run_forward,
_sort_key_list,
_verify_select_neuron,
)
from captum._utils.sample_gradient import SampleGradientWrapper
from captum._utils.typing import (
Literal,
ModuleOrModuleList,
TargetType,
TensorOrTupleOfTensorsGeneric,
)
from torch import device, Tensor
from torch.nn import Module
def apply_gradient_requirements(
inputs: Tuple[Tensor, ...], warn: bool = True
) -> List[bool]:
"""
Iterates through tuple on input tensors and sets requires_grad to be true on
each Tensor, and ensures all grads are set to zero. To ensure that the input
is returned to its initial state, a list of flags representing whether or not
a tensor originally required grad is returned.
"""
assert isinstance(
inputs, tuple
), "Inputs should be wrapped in a tuple prior to preparing for gradients"
grad_required = []
for index, input in enumerate(inputs):
assert isinstance(input, torch.Tensor), "Given input is not a torch.Tensor"
grad_required.append(input.requires_grad)
inputs_dtype = input.dtype
# Note: torch 1.2 doesn't support is_complex for dtype that's why we check
# on the existance of is_complex method.
if not inputs_dtype.is_floating_point and not (
hasattr(inputs_dtype, "is_complex") and inputs_dtype.is_complex
):
if warn:
warnings.warn(
"""Input Tensor %d has a dtype of %s.
Gradients cannot be activated
for these data types."""
% (index, str(inputs_dtype))
)
elif not input.requires_grad:
if warn:
warnings.warn(
"Input Tensor %d did not already require gradients, "
"required_grads has been set automatically." % index
)
input.requires_grad_()
return grad_required
def undo_gradient_requirements(
inputs: Tuple[Tensor, ...], grad_required: List[bool]
) -> None:
"""
Iterates through list of tensors, zeros each gradient, and sets required
grad to false if the corresponding index in grad_required is False.
This method is used to undo the effects of prepare_gradient_inputs, making
grads not required for any input tensor that did not initially require
gradients.
"""
assert isinstance(
inputs, tuple
), "Inputs should be wrapped in a tuple prior to preparing for gradients."
assert len(inputs) == len(
grad_required
), "Input tuple length should match gradient mask."
for index, input in enumerate(inputs):
assert isinstance(input, torch.Tensor), "Given input is not a torch.Tensor"
if not grad_required[index]:
input.requires_grad_(False)
def compute_gradients(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
) -> Tuple[Tensor, ...]:
r"""
Computes gradients of the output with respect to inputs for an
arbitrary forward function.
Args:
forward_fn: forward function. This can be for example model's
forward function.
input: Input at which gradients are evaluated,
will be passed to forward_fn.
target_ind: Index of the target class for which gradients
must be computed (classification only).
additional_forward_args: Additional input arguments that forward
function requires. It takes an empty tuple (no additional
arguments) if no additional arguments are required
"""
with torch.autograd.set_grad_enabled(True):
# runs forward pass
outputs = _run_forward(forward_fn, inputs, target_ind, additional_forward_args)
assert outputs[0].numel() == 1, (
"Target not provided when necessary, cannot"
" take gradient with respect to multiple outputs."
)
# torch.unbind(forward_out) is a list of scalar tensor tuples and
# contains batch_size * #steps elements
grads = torch.autograd.grad(torch.unbind(outputs), inputs)
return grads
def _neuron_gradients(
inputs: Union[Tensor, Tuple[Tensor, ...]],
saved_layer: Dict[device, Tuple[Tensor, ...]],
key_list: List[device],
gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
) -> Tuple[Tensor, ...]:
with torch.autograd.set_grad_enabled(True):
gradient_tensors = []
for key in key_list:
current_out_tensor = _verify_select_neuron(
saved_layer[key], gradient_neuron_selector
)
gradient_tensors.append(
torch.autograd.grad(
torch.unbind(current_out_tensor)
if current_out_tensor.numel() > 1
else current_out_tensor,
inputs,
)
)
_total_gradients = _reduce_list(gradient_tensors, sum)
return _total_gradients
@typing.overload
def _forward_layer_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: Module,
additional_forward_args: Any = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
grad_enabled: bool = False,
) -> Tuple[Tensor, ...]:
...
@typing.overload
def _forward_layer_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: List[Module],
additional_forward_args: Any = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
grad_enabled: bool = False,
) -> List[Tuple[Tensor, ...]]:
...
def _forward_layer_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: ModuleOrModuleList,
additional_forward_args: Any = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
grad_enabled: bool = False,
) -> Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]:
return _forward_layer_eval_with_neuron_grads(
forward_fn,
inputs,
layer,
additional_forward_args=additional_forward_args,
gradient_neuron_selector=None,
grad_enabled=grad_enabled,
device_ids=device_ids,
attribute_to_layer_input=attribute_to_layer_input,
)
@typing.overload
def _forward_layer_distributed_eval(
forward_fn: Callable,
inputs: Any,
layer: ModuleOrModuleList,
target_ind: TargetType = None,
additional_forward_args: Any = None,
attribute_to_layer_input: bool = False,
forward_hook_with_return: Literal[False] = False,
require_layer_grads: bool = False,
) -> Dict[Module, Dict[device, Tuple[Tensor, ...]]]:
...
@typing.overload
def _forward_layer_distributed_eval(
forward_fn: Callable,
inputs: Any,
layer: ModuleOrModuleList,
target_ind: TargetType = None,
additional_forward_args: Any = None,
attribute_to_layer_input: bool = False,
*,
forward_hook_with_return: Literal[True],
require_layer_grads: bool = False,
) -> Tuple[Dict[Module, Dict[device, Tuple[Tensor, ...]]], Tensor]:
...
def _forward_layer_distributed_eval(
forward_fn: Callable,
inputs: Any,
layer: ModuleOrModuleList,
target_ind: TargetType = None,
additional_forward_args: Any = None,
attribute_to_layer_input: bool = False,
forward_hook_with_return: bool = False,
require_layer_grads: bool = False,
) -> Union[
Tuple[Dict[Module, Dict[device, Tuple[Tensor, ...]]], Tensor],
Dict[Module, Dict[device, Tuple[Tensor, ...]]],
]:
r"""
A helper function that allows to set a hook on model's `layer`, run the forward
pass and returns intermediate layer results, stored in a dictionary,
and optionally also the output of the forward function. The keys in the
dictionary are the device ids and the values are corresponding intermediate layer
results, either the inputs or the outputs of the layer depending on whether we set
`attribute_to_layer_input` to True or False.
This is especially useful when we execute forward pass in a distributed setting,
using `DataParallel`s for example.
"""
saved_layer: Dict[Module, Dict[device, Tuple[Tensor, ...]]] = defaultdict(dict)
lock = threading.Lock()
all_layers: List[Module] = [layer] if isinstance(layer, Module) else layer
# Set a forward hook on specified module and run forward pass to
# get layer output tensor(s).
# For DataParallel models, each partition adds entry to dictionary
# with key as device and value as corresponding Tensor.
def hook_wrapper(original_module):
def forward_hook(module, inp, out=None):
eval_tsrs = inp if attribute_to_layer_input else out
is_eval_tuple = isinstance(eval_tsrs, tuple)
if not is_eval_tuple:
eval_tsrs = (eval_tsrs,)
if require_layer_grads:
apply_gradient_requirements(eval_tsrs, warn=False)
with lock:
nonlocal saved_layer
# Note that cloning behaviour of `eval_tsr` is different
# when `forward_hook_with_return` is set to True. This is because
# otherwise `backward()` on the last output layer won't execute.
if forward_hook_with_return:
saved_layer[original_module][eval_tsrs[0].device] = eval_tsrs
eval_tsrs_to_return = tuple(
eval_tsr.clone() for eval_tsr in eval_tsrs
)
if not is_eval_tuple:
eval_tsrs_to_return = eval_tsrs_to_return[0]
return eval_tsrs_to_return
else:
saved_layer[original_module][eval_tsrs[0].device] = tuple(
eval_tsr.clone() for eval_tsr in eval_tsrs
)
return forward_hook
all_hooks = []
try:
for single_layer in all_layers:
if attribute_to_layer_input:
all_hooks.append(
single_layer.register_forward_pre_hook(hook_wrapper(single_layer))
)
else:
all_hooks.append(
single_layer.register_forward_hook(hook_wrapper(single_layer))
)
output = _run_forward(
forward_fn,
inputs,
target=target_ind,
additional_forward_args=additional_forward_args,
)
finally:
for hook in all_hooks:
hook.remove()
if len(saved_layer) == 0:
raise AssertionError("Forward hook did not obtain any outputs for given layer")
if forward_hook_with_return:
return saved_layer, output
return saved_layer
def _gather_distributed_tensors(
saved_layer: Dict[device, Tuple[Tensor, ...]],
device_ids: Union[None, List[int]] = None,
key_list: Union[None, List[device]] = None,
) -> Tuple[Tensor, ...]:
r"""
A helper function to concatenate intermediate layer results stored on
different devices in `saved_layer`. `saved_layer` is a dictionary that
contains `device_id` as a key and intermediate layer results (either
the input or the output of the layer) stored on the device corresponding to
the key.
`key_list` is a list of devices in appropriate ordering for concatenation
and if not provided, keys are sorted based on device ids.
If only one key exists (standard model), key list simply has one element.
"""
if key_list is None:
key_list = _sort_key_list(list(saved_layer.keys()), device_ids)
return _reduce_list([saved_layer[device_id] for device_id in key_list])
def _extract_device_ids(
forward_fn: Callable,
saved_layer: Dict[Module, Dict[device, Tuple[Tensor, ...]]],
device_ids: Union[None, List[int]],
) -> Union[None, List[int]]:
r"""
A helper function to extract device_ids from `forward_function` in case it is
provided as part of a `DataParallel` model or if is accessible from
`forward_fn`.
In case input device_ids is not None, this function returns that value.
"""
# Multiple devices / keys implies a DataParallel model, so we look for
# device IDs if given or available from forward function
# (DataParallel model object).
if (
max(len(saved_layer[single_layer]) for single_layer in saved_layer) > 1
and device_ids is None
):
if (
hasattr(forward_fn, "device_ids")
and cast(Any, forward_fn).device_ids is not None
):
device_ids = cast(Any, forward_fn).device_ids
else:
raise AssertionError(
"Layer tensors are saved on multiple devices, however unable to access"
" device ID list from the `forward_fn`. Device ID list must be"
" accessible from `forward_fn`. For example, they can be retrieved"
" if `forward_fn` is a model of type `DataParallel`. It is used"
" for identifying device batch ordering."
)
return device_ids
@typing.overload
def _forward_layer_eval_with_neuron_grads(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: Module,
additional_forward_args: Any = None,
*,
gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
grad_enabled: bool = False,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
@typing.overload
def _forward_layer_eval_with_neuron_grads(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: Module,
additional_forward_args: Any = None,
gradient_neuron_selector: None = None,
grad_enabled: bool = False,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
) -> Tuple[Tensor, ...]:
...
@typing.overload
def _forward_layer_eval_with_neuron_grads(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: List[Module],
additional_forward_args: Any = None,
gradient_neuron_selector: None = None,
grad_enabled: bool = False,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
) -> List[Tuple[Tensor, ...]]:
...
def _forward_layer_eval_with_neuron_grads(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: ModuleOrModuleList,
additional_forward_args: Any = None,
gradient_neuron_selector: Union[
None, int, Tuple[Union[int, slice], ...], Callable
] = None,
grad_enabled: bool = False,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
) -> Union[
Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]],
Tuple[Tensor, ...],
List[Tuple[Tensor, ...]],
]:
"""
This method computes forward evaluation for a particular layer using a
forward hook. If a gradient_neuron_selector is provided, then gradients with
respect to that neuron in the layer output are also returned.
These functionalities are combined due to the behavior of DataParallel models
with hooks, in which hooks are executed once per device. We need to internally
combine the separated tensors from devices by concatenating based on device_ids.
Any necessary gradients must be taken with respect to each independent batched
tensor, so the gradients are computed and combined appropriately.
More information regarding the behavior of forward hooks with DataParallel models
can be found in the PyTorch data parallel documentation. We maintain the separate
evals in a dictionary protected by a lock, analogous to the gather implementation
for the core PyTorch DataParallel implementation.
"""
grad_enabled = True if gradient_neuron_selector is not None else grad_enabled
with torch.autograd.set_grad_enabled(grad_enabled):
saved_layer = _forward_layer_distributed_eval(
forward_fn,
inputs,
layer,
additional_forward_args=additional_forward_args,
attribute_to_layer_input=attribute_to_layer_input,
)
device_ids = _extract_device_ids(forward_fn, saved_layer, device_ids)
# Identifies correct device ordering based on device ids.
# key_list is a list of devices in appropriate ordering for concatenation.
# If only one key exists (standard model), key list simply has one element.
key_list = _sort_key_list(list(next(iter(saved_layer.values())).keys()), device_ids)
if gradient_neuron_selector is not None:
assert isinstance(
layer, Module
), "Cannot compute neuron gradients for multiple layers simultaneously!"
inp_grads = _neuron_gradients(
inputs, saved_layer[layer], key_list, gradient_neuron_selector
)
return (
_gather_distributed_tensors(saved_layer[layer], key_list=key_list),
inp_grads,
)
else:
if isinstance(layer, Module):
return _gather_distributed_tensors(saved_layer[layer], key_list=key_list)
else:
return [
_gather_distributed_tensors(saved_layer[curr_layer], key_list=key_list)
for curr_layer in layer
]
@typing.overload
def compute_layer_gradients_and_eval(
forward_fn: Callable,
layer: Module,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
*,
gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
output_fn: Union[None, Callable] = None,
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
@typing.overload
def compute_layer_gradients_and_eval(
forward_fn: Callable,
layer: List[Module],
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
gradient_neuron_selector: None = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
output_fn: Union[None, Callable] = None,
) -> Tuple[List[Tuple[Tensor, ...]], List[Tuple[Tensor, ...]]]:
...
@typing.overload
def compute_layer_gradients_and_eval(
forward_fn: Callable,
layer: Module,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
gradient_neuron_selector: None = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
output_fn: Union[None, Callable] = None,
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
def compute_layer_gradients_and_eval(
forward_fn: Callable,
layer: ModuleOrModuleList,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
gradient_neuron_selector: Union[
None, int, Tuple[Union[int, slice], ...], Callable
] = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
output_fn: Union[None, Callable] = None,
) -> Union[
Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]],
Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...], Tuple[Tensor, ...]],
Tuple[List[Tuple[Tensor, ...]], List[Tuple[Tensor, ...]]],
]:
r"""
Computes gradients of the output with respect to a given layer as well
as the output evaluation of the layer for an arbitrary forward function
and given input.
For data parallel models, hooks are executed once per device ,so we
need to internally combine the separated tensors from devices by
concatenating based on device_ids. Any necessary gradients must be taken
with respect to each independent batched tensor, so the gradients are
computed and combined appropriately.
More information regarding the behavior of forward hooks with DataParallel
models can be found in the PyTorch data parallel documentation. We maintain
the separate inputs in a dictionary protected by a lock, analogous to the
gather implementation for the core PyTorch DataParallel implementation.
NOTE: To properly handle inplace operations, a clone of the layer output
is stored. This structure inhibits execution of a backward hook on the last
module for the layer output when computing the gradient with respect to
the input, since we store an intermediate clone, as
opposed to the true module output. If backward module hooks are necessary
for the final module when computing input gradients, utilize
_forward_layer_eval_with_neuron_grads instead.
Args:
forward_fn: forward function. This can be for example model's
forward function.
layer: Layer for which gradients / output will be evaluated.
inputs: Input at which gradients are evaluated,
will be passed to forward_fn.
target_ind: Index of the target class for which gradients
must be computed (classification only).
output_fn: An optional function that is applied to the layer inputs or
outputs depending whether the `attribute_to_layer_input` is
set to `True` or `False`
args: Additional input arguments that forward function requires.
It takes an empty tuple (no additional arguments) if no
additional arguments are required
Returns:
tuple[**gradients**, **evals**]:
- **gradients**:
Gradients of output with respect to target layer output.
- **evals**:
Target layer output for given input.
"""
with torch.autograd.set_grad_enabled(True):
# saved_layer is a dictionary mapping device to a tuple of
# layer evaluations on that device.
saved_layer, output = _forward_layer_distributed_eval(
forward_fn,
inputs,
layer,
target_ind=target_ind,
additional_forward_args=additional_forward_args,
attribute_to_layer_input=attribute_to_layer_input,
forward_hook_with_return=True,
require_layer_grads=True,
)
assert output[0].numel() == 1, (
"Target not provided when necessary, cannot"
" take gradient with respect to multiple outputs."
)
device_ids = _extract_device_ids(forward_fn, saved_layer, device_ids)
# Identifies correct device ordering based on device ids.
# key_list is a list of devices in appropriate ordering for concatenation.
# If only one key exists (standard model), key list simply has one element.
key_list = _sort_key_list(
list(next(iter(saved_layer.values())).keys()), device_ids
)
all_outputs: Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]
if isinstance(layer, Module):
all_outputs = _reduce_list(
[
saved_layer[layer][device_id]
if output_fn is None
else output_fn(saved_layer[layer][device_id])
for device_id in key_list
]
)
else:
all_outputs = [
_reduce_list(
[
saved_layer[single_layer][device_id]
if output_fn is None
else output_fn(saved_layer[single_layer][device_id])
for device_id in key_list
]
)
for single_layer in layer
]
all_layers: List[Module] = [layer] if isinstance(layer, Module) else layer
grad_inputs = tuple(
layer_tensor
for single_layer in all_layers
for device_id in key_list
for layer_tensor in saved_layer[single_layer][device_id]
)
saved_grads = torch.autograd.grad(torch.unbind(output), grad_inputs)
offset = 0
all_grads: List[Tuple[Tensor, ...]] = []
for single_layer in all_layers:
num_tensors = len(next(iter(saved_layer[single_layer].values())))
curr_saved_grads = [
saved_grads[i : i + num_tensors]
for i in range(
offset, offset + len(key_list) * num_tensors, num_tensors
)
]
offset += len(key_list) * num_tensors
if output_fn is not None:
curr_saved_grads = [
output_fn(curr_saved_grad) for curr_saved_grad in curr_saved_grads
]
all_grads.append(_reduce_list(curr_saved_grads))
layer_grads: Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]
layer_grads = all_grads
if isinstance(layer, Module):
layer_grads = all_grads[0]
if gradient_neuron_selector is not None:
assert isinstance(
layer, Module
), "Cannot compute neuron gradients for multiple layers simultaneously!"
inp_grads = _neuron_gradients(
inputs, saved_layer[layer], key_list, gradient_neuron_selector
)
return (
cast(Tuple[Tensor, ...], layer_grads),
cast(Tuple[Tensor, ...], all_outputs),
inp_grads,
)
return layer_grads, all_outputs # type: ignore
def construct_neuron_grad_fn(
layer: Module,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
device_ids: Union[None, List[int]] = None,
attribute_to_neuron_input: bool = False,
) -> Callable:
def grad_fn(
forward_fn: Callable,
inputs: TensorOrTupleOfTensorsGeneric,
target_ind: TargetType = None,
additional_forward_args: Any = None,
) -> Tuple[Tensor, ...]:
_, grads = _forward_layer_eval_with_neuron_grads(
forward_fn,
inputs,
layer,
additional_forward_args,
gradient_neuron_selector=neuron_selector,
device_ids=device_ids,
attribute_to_layer_input=attribute_to_neuron_input,
)
return grads
return grad_fn
def _extract_parameters_from_layers(layer_modules):
layer_parameters = []
if layer_modules is not None:
layer_parameters = [
parameter
for layer_module in layer_modules
for parameter in layer_module.parameters()
]
assert (
len(layer_parameters) > 0
), "No parameters are available for modules for provided input `layers`"
return layer_parameters
def _compute_jacobian_wrt_params(
model: Module,
inputs: Tuple[Any, ...],
labels: Optional[Tensor] = None,
loss_fn: Optional[Union[Module, Callable]] = None,
layer_modules: List[Module] = None,
) -> Tuple[Tensor, ...]:
r"""
Computes the Jacobian of a batch of test examples given a model, and optional
loss function and target labels. This method is equivalent to calculating the
gradient for every individual example in the minibatch.
Args:
model (torch.nn.Module): The trainable model providing the forward pass
inputs (tuple[Any, ...]): The minibatch for which the forward pass is computed.
It is unpacked before passing to `model`, so it must be a tuple. The
individual elements of `inputs` can be anything.
labels (Tensor, optional): Labels for input if computing a loss function.
loss_fn (torch.nn.Module or Callable, optional): The loss function. If a library
defined loss function is provided, it would be expected to be a
torch.nn.Module. If a custom loss is provided, it can be either type,
but must behave as a library loss function would if `reduction='none'`.
layer_modules (List[torch.nn.Module], optional): A list of PyTorch modules
w.r.t. which jacobian gradients are computed.
Returns:
grads (tuple[Tensor, ...]): Returns the Jacobian for the minibatch as a
tuple of gradients corresponding to the tuple of trainable parameters
returned by `model.parameters()`. Each object grads[i] references to the
gradients for the parameters in the i-th trainable layer of the model.
Each grads[i] object is a tensor with the gradients for the `inputs`
batch. For example, grads[i][j] would reference the gradients for the
parameters of the i-th layer, for the j-th member of the minibatch.
"""
with torch.autograd.set_grad_enabled(True):
out = model(*inputs)
assert out.dim() != 0, "Please ensure model output has at least one dimension."
if labels is not None and loss_fn is not None:
loss = loss_fn(out, labels)
if hasattr(loss_fn, "reduction"):
msg0 = "Please ensure loss_fn.reduction is set to `none`"
assert loss_fn.reduction == "none", msg0 # type: ignore
else:
msg1 = (
"Loss function is applying a reduction. Please ensure "
f"Output shape: {out.shape} and Loss shape: {loss.shape} "
"are matching."
)
assert loss.dim() != 0, msg1
assert out.shape[0] == loss.shape[0], msg1
out = loss
if layer_modules is not None:
layer_parameters = _extract_parameters_from_layers(layer_modules)
grads_list = [
torch.autograd.grad(
outputs=out[i],
inputs=cast(
Union[Tensor, Sequence[Tensor]],
model.parameters() if layer_modules is None else layer_parameters,
),
grad_outputs=torch.ones_like(out[i]),
retain_graph=True,
)
for i in range(out.shape[0])
]
grads = tuple([torch.stack(x) for x in zip(*grads_list)])
return tuple(grads)
def _compute_jacobian_wrt_params_with_sample_wise_trick(
model: Module,
inputs: Tuple[Any, ...],
labels: Optional[Tensor] = None,
loss_fn: Optional[Union[Module, Callable]] = None,
reduction_type: Optional[str] = "sum",
layer_modules: List[Module] = None,
) -> Tuple[Any, ...]:
r"""
Computes the Jacobian of a batch of test examples given a model, and optional
loss function and target labels. This method uses sample-wise gradients per
batch trick to fully vectorize the Jacobian calculation. Currently, only
linear and conv2d layers are supported.
User must `add_hooks(model)` before calling this function.
Args:
model (torch.nn.Module): The trainable model providing the forward pass
inputs (tuple[Any, ...]): The minibatch for which the forward pass is computed.
It is unpacked before passing to `model`, so it must be a tuple. The
individual elements of `inputs` can be anything.
labels (Tensor, optional): Labels for input if computing a loss function.
loss_fn (torch.nn.Module or Callable, optional): The loss function. If a library
defined loss function is provided, it would be expected to be a
torch.nn.Module. If a custom loss is provided, it can be either type,
but must behave as a library loss function would if `reduction='sum'` or
`reduction='mean'`.
reduction_type (str, optional): The type of reduction applied. If a loss_fn is
passed, this should match `loss_fn.reduction`. Else if gradients are
being computed on direct model outputs (scores), then 'sum' should be
used.
Defaults to 'sum'.
layer_modules (torch.nn.Module, optional): A list of PyTorch modules w.r.t.
which jacobian gradients are computed.
Returns:
grads (tuple[Tensor, ...]): Returns the Jacobian for the minibatch as a
tuple of gradients corresponding to the tuple of trainable parameters
returned by `model.parameters()`. Each object grads[i] references to the
gradients for the parameters in the i-th trainable layer of the model.
Each grads[i] object is a tensor with the gradients for the `inputs`
batch. For example, grads[i][j] would reference the gradients for the
parameters of the i-th layer, for the j-th member of the minibatch.
"""
with torch.autograd.set_grad_enabled(True):
inputs = tuple(inp.clone() for inp in inputs)
apply_gradient_requirements(inputs)
sample_grad_wrapper = SampleGradientWrapper(model, layer_modules)
try:
sample_grad_wrapper.add_hooks()
out = model(*inputs)
assert (
out.dim() != 0
), "Please ensure model output has at least one dimension."
if labels is not None and loss_fn is not None:
loss = loss_fn(out, labels)
# TODO: allow loss_fn to be Callable
if (isinstance(loss_fn, Module) or callable(loss_fn)) and hasattr(
loss_fn, "reduction"
):
reduction = loss_fn.reduction # type: ignore
msg0 = (
"Please ensure that loss_fn.reduction is set to `sum` or `mean`"
)
assert reduction != "none", msg0
msg1 = (
f"loss_fn.reduction ({reduction}) does not match"
f"reduction type ({reduction_type}). Please ensure they are"
" matching."
)
assert reduction == reduction_type, msg1
msg2 = (
"Please ensure custom loss function is applying either a "
"sum or mean reduction."
)
assert out.shape != loss.shape, msg2
if reduction_type != "sum" and reduction_type != "mean":
raise ValueError(
f"{reduction_type} is not a valid value for reduction_type. "
"Must be either 'sum' or 'mean'."
)
out = loss
sample_grad_wrapper.compute_param_sample_gradients(
out, loss_mode=reduction_type
)
if layer_modules is not None:
layer_parameters = _extract_parameters_from_layers(layer_modules)
grads = tuple(
param.sample_grad # type: ignore
for param in (
model.parameters() if layer_modules is None else layer_parameters
)
if hasattr(param, "sample_grad")
)
finally:
sample_grad_wrapper.remove_hooks()
return grads
|
from collections import defaultdict
from enum import Enum
from typing import cast, DefaultDict, Iterable, List, Optional, Tuple, Union
import torch
from captum._utils.common import _format_tensor_into_tuples, _register_backward_hook
from torch import Tensor
from torch.nn import Module
def _reset_sample_grads(module: Module) -> None:
module.weight.sample_grad = 0 # type: ignore
if module.bias is not None:
module.bias.sample_grad = 0 # type: ignore
def linear_param_grads(
module: Module, activation: Tensor, gradient_out: Tensor, reset: bool = False
) -> None:
r"""
Computes parameter gradients per sample for nn.Linear module, given module
input activations and output gradients.
Gradients are accumulated in the sample_grad attribute of each parameter
(weight and bias). If reset = True, any current sample_grad values are reset,
otherwise computed gradients are accumulated and added to the existing
stored gradients.
Inputs with more than 2 dimensions are only supported with torch 1.8 or later
"""
if reset:
_reset_sample_grads(module)
module.weight.sample_grad += torch.einsum( # type: ignore
"n...i,n...j->nij", gradient_out, activation
)
if module.bias is not None:
module.bias.sample_grad += torch.einsum( # type: ignore
"n...i->ni", gradient_out
)
def conv2d_param_grads(
module: Module, activation: Tensor, gradient_out: Tensor, reset: bool = False
) -> None:
r"""
Computes parameter gradients per sample for nn.Conv2d module, given module
input activations and output gradients.
nn.Conv2d modules with padding set to a string option ('same' or 'valid') are
currently unsupported.
Gradients are accumulated in the sample_grad attribute of each parameter
(weight and bias). If reset = True, any current sample_grad values are reset,
otherwise computed gradients are accumulated and added to the existing
stored gradients.
"""
if reset:
_reset_sample_grads(module)
batch_size = cast(int, activation.shape[0])
unfolded_act = torch.nn.functional.unfold(
activation,
cast(Union[int, Tuple[int, ...]], module.kernel_size),
dilation=cast(Union[int, Tuple[int, ...]], module.dilation),
padding=cast(Union[int, Tuple[int, ...]], module.padding),
stride=cast(Union[int, Tuple[int, ...]], module.stride),
)
reshaped_grad = gradient_out.reshape(batch_size, -1, unfolded_act.shape[-1])
grad1 = torch.einsum("ijk,ilk->ijl", reshaped_grad, unfolded_act)
shape = [batch_size] + list(cast(Iterable[int], module.weight.shape))
module.weight.sample_grad += grad1.reshape(shape) # type: ignore
if module.bias is not None:
module.bias.sample_grad += torch.sum(reshaped_grad, dim=2) # type: ignore
SUPPORTED_MODULES = {
torch.nn.Conv2d: conv2d_param_grads,
torch.nn.Linear: linear_param_grads,
}
class LossMode(Enum):
SUM = 0
MEAN = 1
class SampleGradientWrapper:
r"""
Wrapper which allows computing sample-wise gradients in a single backward pass.
This is accomplished by adding hooks to capture activations and output
gradients for supported modules, and using these activations and gradients
to compute the parameter gradients per-sample.
Currently, only nn.Linear and nn.Conv2d modules are supported.
Similar reference implementations of sample-based gradients include:
- https://github.com/cybertronai/autograd-hacks
- https://github.com/pytorch/opacus/tree/main/opacus/grad_sample
"""
def __init__(self, model, layer_modules=None) -> None:
self.model = model
self.hooks_added = False
self.activation_dict: DefaultDict[Module, List[Tensor]] = defaultdict(list)
self.gradient_dict: DefaultDict[Module, List[Tensor]] = defaultdict(list)
self.forward_hooks: List[torch.utils.hooks.RemovableHandle] = []
self.backward_hooks: List[torch.utils.hooks.RemovableHandle] = []
self.layer_modules: Optional[List[Module]] = layer_modules
def add_hooks(self) -> None:
self.hooks_added = True
self.model.apply(self._register_module_hooks)
def _register_module_hooks(self, module: torch.nn.Module) -> None:
if (self.layer_modules is None or module in self.layer_modules) and isinstance(
module, tuple(SUPPORTED_MODULES.keys())
):
self.forward_hooks.append(
module.register_forward_hook(self._forward_hook_fn)
)
self.backward_hooks.extend(
_register_backward_hook(module, self._backward_hook_fn, None)
)
def _forward_hook_fn(
self,
module: Module,
module_input: Union[Tensor, Tuple[Tensor, ...]],
module_output: Union[Tensor, Tuple[Tensor, ...]],
) -> None:
inp_tuple = _format_tensor_into_tuples(module_input)
self.activation_dict[module].append(inp_tuple[0].clone().detach())
def _backward_hook_fn(
self,
module: Module,
grad_input: Union[Tensor, Tuple[Tensor, ...]],
grad_output: Union[Tensor, Tuple[Tensor, ...]],
) -> None:
grad_output_tuple = _format_tensor_into_tuples(grad_output)
self.gradient_dict[module].append(grad_output_tuple[0].clone().detach())
def remove_hooks(self) -> None:
self.hooks_added = False
for hook in self.forward_hooks:
hook.remove()
for hook in self.backward_hooks:
hook.remove()
self.forward_hooks = []
self.backward_hooks = []
def _reset(self) -> None:
self.activation_dict = defaultdict(list)
self.gradient_dict = defaultdict(list)
def compute_param_sample_gradients(self, loss_blob, loss_mode="mean") -> None:
assert (
loss_mode.upper() in LossMode.__members__
), f"Provided loss mode {loss_mode} is not valid"
mode = LossMode[loss_mode.upper()]
self.model.zero_grad()
loss_blob.backward(gradient=torch.ones_like(loss_blob))
for module in self.gradient_dict:
sample_grad_fn = SUPPORTED_MODULES[type(module)]
activations = self.activation_dict[module]
gradients = self.gradient_dict[module]
assert len(activations) == len(gradients), (
"Number of saved activations do not match number of saved gradients."
" This may occur if multiple forward passes are run without calling"
" reset or computing param gradients."
)
# Reversing grads since when a module is used multiple times,
# the activations will be aligned with the reverse order of the gradients,
# since the order is reversed in backprop.
for i, (act, grad) in enumerate(
zip(activations, list(reversed(gradients)))
):
mult = 1 if mode is LossMode.SUM else act.shape[0]
sample_grad_fn(module, act, grad * mult, reset=(i == 0))
self._reset()
|
#!/usr/bin/env python3
import typing
from enum import Enum
from functools import reduce
from inspect import signature
from typing import Any, Callable, cast, Dict, List, overload, Tuple, Union
import numpy as np
import torch
from captum._utils.typing import (
BaselineType,
Literal,
TargetType,
TensorOrTupleOfTensorsGeneric,
TupleOrTensorOrBoolGeneric,
)
from torch import device, Tensor
from torch.nn import Module
def _parse_version(v: str) -> Tuple[int, ...]:
"""
Parse version strings into tuples for comparison.
Versions should be in the form of "<major>.<minor>.<patch>", "<major>.<minor>",
or "<major>". The "dev", "post" and other letter portions of the given version will
be ignored.
Args:
v (str): A version string.
Returns:
version_tuple (tuple[int]): A tuple of integer values to use for version
comparison.
"""
v = [n for n in v.split(".") if n.isdigit()]
assert v != []
return tuple(map(int, v))
class ExpansionTypes(Enum):
repeat = 1
repeat_interleave = 2
def safe_div(
numerator: Tensor,
denom: Union[Tensor, int, float],
default_denom: Union[Tensor, int, float] = 1.0,
) -> Tensor:
r"""
A simple utility function to perform `numerator / denom`
if the statement is undefined => result will be `numerator / default_denorm`
"""
if isinstance(denom, (int, float)):
return numerator / (denom if denom != 0 else default_denom)
# convert default_denom to tensor if it is float
if not torch.is_tensor(default_denom):
default_denom = torch.tensor(
default_denom, dtype=denom.dtype, device=denom.device
)
return numerator / torch.where(denom != 0, denom, default_denom)
@typing.overload
def _is_tuple(inputs: Tensor) -> Literal[False]:
...
@typing.overload
def _is_tuple(inputs: Tuple[Tensor, ...]) -> Literal[True]:
...
def _is_tuple(inputs: Union[Tensor, Tuple[Tensor, ...]]) -> bool:
return isinstance(inputs, tuple)
def _validate_target(num_samples: int, target: TargetType) -> None:
if isinstance(target, list) or (
isinstance(target, torch.Tensor) and torch.numel(target) > 1
):
assert num_samples == len(target), (
"The number of samples provied in the"
"input {} does not match with the number of targets. {}".format(
num_samples, len(target)
)
)
def _validate_input(
inputs: Tuple[Tensor, ...],
baselines: Tuple[Union[Tensor, int, float], ...],
draw_baseline_from_distrib: bool = False,
) -> None:
assert len(inputs) == len(baselines), (
"Input and baseline must have the same "
"dimensions, baseline has {} features whereas input has {}.".format(
len(baselines), len(inputs)
)
)
for input, baseline in zip(inputs, baselines):
if draw_baseline_from_distrib:
assert (
isinstance(baseline, (int, float))
or input.shape[1:] == baseline.shape[1:]
), (
"The samples in input and baseline batches must have"
" the same shape or the baseline corresponding to the"
" input tensor must be a scalar."
" Found baseline: {} and input: {} ".format(baseline, input)
)
else:
assert (
isinstance(baseline, (int, float))
or input.shape == baseline.shape
or baseline.shape[0] == 1
), (
"Baseline can be provided as a tensor for just one input and"
" broadcasted to the batch or input and baseline must have the"
" same shape or the baseline corresponding to each input tensor"
" must be a scalar. Found baseline: {} and input: {}".format(
baseline, input
)
)
def _zeros(inputs: Tuple[Tensor, ...]) -> Tuple[int, ...]:
r"""
Takes a tuple of tensors as input and returns a tuple that has the same
length as `inputs` with each element as the integer 0.
"""
return tuple(0 if input.dtype is not torch.bool else False for input in inputs)
def _format_baseline(
baselines: BaselineType, inputs: Tuple[Tensor, ...]
) -> Tuple[Union[Tensor, int, float], ...]:
if baselines is None:
return _zeros(inputs)
if not isinstance(baselines, tuple):
baselines = (baselines,)
for baseline in baselines:
assert isinstance(
baseline, (torch.Tensor, int, float)
), "baseline input argument must be either a torch.Tensor or a number \
however {} detected".format(
type(baseline)
)
return baselines
def _format_feature_mask(
feature_mask: Union[None, Tensor, Tuple[Tensor, ...]],
inputs: Tuple[Tensor, ...],
) -> Tuple[Tensor, ...]:
"""
Format a feature mask into a tuple of tensors.
The `inputs` should be correctly formatted first
If `feature_mask` is None, assign each non-batch dimension with a consecutive
integer from 0.
If `feature_mask` is a tensor, wrap it in a tuple.
"""
if feature_mask is None:
formatted_mask = []
current_num_features = 0
for inp in inputs:
# the following can handle empty tensor where numel is 0
# empty tensor will be added to the feature mask
num_features = torch.numel(inp[0:1])
formatted_mask.append(
current_num_features
+ torch.reshape(
torch.arange(num_features, device=inp.device),
inp[0:1].shape,
)
)
current_num_features += num_features
formatted_mask = tuple(formatted_mask)
else:
formatted_mask = _format_tensor_into_tuples(feature_mask)
return formatted_mask
@overload
def _format_tensor_into_tuples(inputs: None) -> None:
...
@overload
def _format_tensor_into_tuples(
inputs: Union[Tensor, Tuple[Tensor, ...]]
) -> Tuple[Tensor, ...]:
...
def _format_tensor_into_tuples(
inputs: Union[None, Tensor, Tuple[Tensor, ...]]
) -> Union[None, Tuple[Tensor, ...]]:
if inputs is None:
return None
if not isinstance(inputs, tuple):
assert isinstance(inputs, torch.Tensor), (
"`inputs` must be a torch.Tensor or a tuple[torch.Tensor] "
f"but found: {type(inputs)}"
)
inputs = (inputs,)
return inputs
def _format_inputs(inputs: Any, unpack_inputs: bool = True) -> Any:
return (
inputs
if (isinstance(inputs, tuple) or isinstance(inputs, list)) and unpack_inputs
else (inputs,)
)
def _format_float_or_tensor_into_tuples(
inputs: Union[float, Tensor, Tuple[Union[float, Tensor], ...]]
) -> Tuple[Union[float, Tensor], ...]:
if not isinstance(inputs, tuple):
assert isinstance(
inputs, (torch.Tensor, float)
), "`inputs` must have type float or torch.Tensor but {} found: ".format(
type(inputs)
)
inputs = (inputs,)
return inputs
@overload
def _format_additional_forward_args(additional_forward_args: None) -> None:
...
@overload
def _format_additional_forward_args(
additional_forward_args: Union[Tensor, Tuple]
) -> Tuple:
...
@overload
def _format_additional_forward_args(additional_forward_args: Any) -> Union[None, Tuple]:
...
def _format_additional_forward_args(additional_forward_args: Any) -> Union[None, Tuple]:
if additional_forward_args is not None and not isinstance(
additional_forward_args, tuple
):
additional_forward_args = (additional_forward_args,)
return additional_forward_args
def _expand_additional_forward_args(
additional_forward_args: Any,
n_steps: int,
expansion_type: ExpansionTypes = ExpansionTypes.repeat,
) -> Union[None, Tuple]:
def _expand_tensor_forward_arg(
additional_forward_arg: Tensor,
n_steps: int,
expansion_type: ExpansionTypes = ExpansionTypes.repeat,
) -> Tensor:
if len(additional_forward_arg.size()) == 0:
return additional_forward_arg
if expansion_type == ExpansionTypes.repeat:
return torch.cat([additional_forward_arg] * n_steps, dim=0)
elif expansion_type == ExpansionTypes.repeat_interleave:
return additional_forward_arg.repeat_interleave(n_steps, dim=0)
else:
raise NotImplementedError(
"Currently only `repeat` and `repeat_interleave`"
" expansion_types are supported"
)
if additional_forward_args is None:
return None
return tuple(
_expand_tensor_forward_arg(additional_forward_arg, n_steps, expansion_type)
if isinstance(additional_forward_arg, torch.Tensor)
else additional_forward_arg
for additional_forward_arg in additional_forward_args
)
def _expand_target(
target: TargetType,
n_steps: int,
expansion_type: ExpansionTypes = ExpansionTypes.repeat,
) -> TargetType:
if isinstance(target, list):
if expansion_type == ExpansionTypes.repeat:
return target * n_steps
elif expansion_type == ExpansionTypes.repeat_interleave:
expanded_target = []
for i in target:
expanded_target.extend([i] * n_steps)
return cast(Union[List[Tuple[int, ...]], List[int]], expanded_target)
else:
raise NotImplementedError(
"Currently only `repeat` and `repeat_interleave`"
" expansion_types are supported"
)
elif isinstance(target, torch.Tensor) and torch.numel(target) > 1:
if expansion_type == ExpansionTypes.repeat:
return torch.cat([target] * n_steps, dim=0)
elif expansion_type == ExpansionTypes.repeat_interleave:
return target.repeat_interleave(n_steps, dim=0)
else:
raise NotImplementedError(
"Currently only `repeat` and `repeat_interleave`"
" expansion_types are supported"
)
return target
def _expand_feature_mask(
feature_mask: Union[Tensor, Tuple[Tensor, ...]], n_samples: int
):
is_feature_mask_tuple = _is_tuple(feature_mask)
feature_mask = _format_tensor_into_tuples(feature_mask)
feature_mask_new = tuple(
feature_mask_elem.repeat_interleave(n_samples, dim=0)
if feature_mask_elem.size(0) > 1
else feature_mask_elem
for feature_mask_elem in feature_mask
)
return _format_output(is_feature_mask_tuple, feature_mask_new)
def _expand_and_update_baselines(
inputs: Tuple[Tensor, ...],
n_samples: int,
kwargs: dict,
draw_baseline_from_distrib: bool = False,
):
def get_random_baseline_indices(bsz, baseline):
num_ref_samples = baseline.shape[0]
return np.random.choice(num_ref_samples, n_samples * bsz).tolist()
# expand baselines to match the sizes of input
if "baselines" not in kwargs:
return
baselines = kwargs["baselines"]
baselines = _format_baseline(baselines, inputs)
_validate_input(
inputs, baselines, draw_baseline_from_distrib=draw_baseline_from_distrib
)
if draw_baseline_from_distrib:
bsz = inputs[0].shape[0]
baselines = tuple(
baseline[get_random_baseline_indices(bsz, baseline)]
if isinstance(baseline, torch.Tensor)
else baseline
for baseline in baselines
)
else:
baselines = tuple(
baseline.repeat_interleave(n_samples, dim=0)
if isinstance(baseline, torch.Tensor)
and baseline.shape[0] == input.shape[0]
and baseline.shape[0] > 1
else baseline
for input, baseline in zip(inputs, baselines)
)
# update kwargs with expanded baseline
kwargs["baselines"] = baselines
def _expand_and_update_additional_forward_args(n_samples: int, kwargs: dict):
if "additional_forward_args" not in kwargs:
return
additional_forward_args = kwargs["additional_forward_args"]
additional_forward_args = _format_additional_forward_args(additional_forward_args)
if additional_forward_args is None:
return
additional_forward_args = _expand_additional_forward_args(
additional_forward_args,
n_samples,
expansion_type=ExpansionTypes.repeat_interleave,
)
# update kwargs with expanded baseline
kwargs["additional_forward_args"] = additional_forward_args
def _expand_and_update_target(n_samples: int, kwargs: dict):
if "target" not in kwargs:
return
target = kwargs["target"]
target = _expand_target(
target, n_samples, expansion_type=ExpansionTypes.repeat_interleave
)
# update kwargs with expanded baseline
kwargs["target"] = target
def _expand_and_update_feature_mask(n_samples: int, kwargs: dict):
if "feature_mask" not in kwargs:
return
feature_mask = kwargs["feature_mask"]
if feature_mask is None:
return
feature_mask = _expand_feature_mask(feature_mask, n_samples)
kwargs["feature_mask"] = feature_mask
@typing.overload
def _format_output(
is_inputs_tuple: Literal[True], output: Tuple[Tensor, ...]
) -> Tuple[Tensor, ...]:
...
@typing.overload
def _format_output(
is_inputs_tuple: Literal[False], output: Tuple[Tensor, ...]
) -> Tensor:
...
@typing.overload
def _format_output(
is_inputs_tuple: bool, output: Tuple[Tensor, ...]
) -> Union[Tensor, Tuple[Tensor, ...]]:
...
def _format_output(
is_inputs_tuple: bool, output: Tuple[Tensor, ...]
) -> Union[Tensor, Tuple[Tensor, ...]]:
r"""
In case input is a tensor and the output is returned in form of a
tuple we take the first element of the output's tuple to match the
same shape signatues of the inputs
"""
assert isinstance(output, tuple), "Output must be in shape of a tuple"
assert is_inputs_tuple or len(output) == 1, (
"The input is a single tensor however the output isn't."
"The number of output tensors is: {}".format(len(output))
)
return output if is_inputs_tuple else output[0]
@typing.overload
def _format_outputs(
is_multiple_inputs: Literal[False], outputs: List[Tuple[Tensor, ...]]
) -> Union[Tensor, Tuple[Tensor, ...]]:
...
@typing.overload
def _format_outputs(
is_multiple_inputs: Literal[True], outputs: List[Tuple[Tensor, ...]]
) -> List[Union[Tensor, Tuple[Tensor, ...]]]:
...
@typing.overload
def _format_outputs(
is_multiple_inputs: bool, outputs: List[Tuple[Tensor, ...]]
) -> Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]]:
...
def _format_outputs(
is_multiple_inputs: bool, outputs: List[Tuple[Tensor, ...]]
) -> Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]]:
assert isinstance(outputs, list), "Outputs must be a list"
assert is_multiple_inputs or len(outputs) == 1, (
"outputs should contain multiple inputs or have a single output"
f"however the number of outputs is: {len(outputs)}"
)
return (
[_format_output(len(output) > 1, output) for output in outputs]
if is_multiple_inputs
else _format_output(len(outputs[0]) > 1, outputs[0])
)
def _run_forward(
forward_func: Callable,
inputs: Any,
target: TargetType = None,
additional_forward_args: Any = None,
) -> Tensor:
forward_func_args = signature(forward_func).parameters
if len(forward_func_args) == 0:
output = forward_func()
return output if target is None else _select_targets(output, target)
# make everything a tuple so that it is easy to unpack without
# using if-statements
inputs = _format_inputs(inputs)
additional_forward_args = _format_additional_forward_args(additional_forward_args)
output = forward_func(
*(*inputs, *additional_forward_args)
if additional_forward_args is not None
else inputs
)
return _select_targets(output, target)
def _select_targets(output: Tensor, target: TargetType) -> Tensor:
if target is None:
return output
num_examples = output.shape[0]
dims = len(output.shape)
device = output.device
if isinstance(target, (int, tuple)):
return _verify_select_column(output, target)
elif isinstance(target, torch.Tensor):
if torch.numel(target) == 1 and isinstance(target.item(), int):
return _verify_select_column(output, cast(int, target.item()))
elif len(target.shape) == 1 and torch.numel(target) == num_examples:
assert dims == 2, "Output must be 2D to select tensor of targets."
return torch.gather(output, 1, target.reshape(len(output), 1))
else:
raise AssertionError(
"Tensor target dimension %r is not valid. %r"
% (target.shape, output.shape)
)
elif isinstance(target, list):
assert len(target) == num_examples, "Target list length does not match output!"
if isinstance(target[0], int):
assert dims == 2, "Output must be 2D to select tensor of targets."
return torch.gather(
output, 1, torch.tensor(target, device=device).reshape(len(output), 1)
)
elif isinstance(target[0], tuple):
return torch.stack(
[
output[(i,) + cast(Tuple, targ_elem)]
for i, targ_elem in enumerate(target)
]
)
else:
raise AssertionError(
f"Target element type {type(target[0])} in list is not valid."
)
else:
raise AssertionError(f"Target type {type(target)} is not valid.")
def _contains_slice(target: Union[int, Tuple[Union[int, slice], ...]]) -> bool:
if isinstance(target, tuple):
for index in target:
if isinstance(index, slice):
return True
return False
return isinstance(target, slice)
def _verify_select_column(
output: Tensor, target: Union[int, Tuple[Union[int, slice], ...]]
) -> Tensor:
target = (target,) if isinstance(target, int) else target
assert (
len(target) <= len(output.shape) - 1
), "Cannot choose target column with output shape %r." % (output.shape,)
return output[(slice(None), *target)]
def _verify_select_neuron(
layer_output: Tuple[Tensor, ...],
selector: Union[int, Tuple[Union[int, slice], ...], Callable],
) -> Tensor:
if callable(selector):
return selector(layer_output if len(layer_output) > 1 else layer_output[0])
assert len(layer_output) == 1, (
"Cannot select neuron index from layer with multiple tensors,"
"consider providing a neuron selector function instead."
)
selected_neurons = _verify_select_column(layer_output[0], selector)
if _contains_slice(selector):
return selected_neurons.reshape(selected_neurons.shape[0], -1).sum(1)
return selected_neurons
def _extract_device(
module: Module,
hook_inputs: Union[None, Tensor, Tuple[Tensor, ...]],
hook_outputs: Union[None, Tensor, Tuple[Tensor, ...]],
) -> device:
params = list(module.parameters())
if (
(hook_inputs is None or len(hook_inputs) == 0)
and (hook_outputs is None or len(hook_outputs) == 0)
and len(params) == 0
):
raise RuntimeError(
"""Unable to extract device information for the module
{}. Both inputs and outputs to the forward hook and
`module.parameters()` are empty.
The reason that the inputs to the forward hook are empty
could be due to the fact that the arguments to that
module {} are all named and are passed as named
variables to its forward function.
""".format(
module, module
)
)
if hook_inputs is not None and len(hook_inputs) > 0:
return hook_inputs[0].device
if hook_outputs is not None and len(hook_outputs) > 0:
return hook_outputs[0].device
return params[0].device
def _reduce_list(
val_list: List[TupleOrTensorOrBoolGeneric],
red_func: Callable[[List], Any] = torch.cat,
) -> TupleOrTensorOrBoolGeneric:
"""
Applies reduction function to given list. If each element in the list is
a Tensor, applies reduction function to all elements of the list, and returns
the output Tensor / value. If each element is a boolean, apply any method (or).
If each element is a tuple, applies reduction
function to corresponding elements of each tuple in the list, and returns
tuple of reduction function outputs with length matching the length of tuple
val_list[0]. It is assumed that all tuples in the list have the same length
and red_func can be applied to all elements in each corresponding position.
"""
assert len(val_list) > 0, "Cannot reduce empty list!"
if isinstance(val_list[0], torch.Tensor):
first_device = val_list[0].device
return red_func([elem.to(first_device) for elem in val_list])
elif isinstance(val_list[0], bool):
return any(val_list)
elif isinstance(val_list[0], tuple):
final_out = []
for i in range(len(val_list[0])):
final_out.append(
_reduce_list([val_elem[i] for val_elem in val_list], red_func)
)
else:
raise AssertionError(
"Elements to be reduced can only be"
"either Tensors or tuples containing Tensors."
)
return tuple(final_out)
def _sort_key_list(
keys: List[device], device_ids: Union[None, List[int]] = None
) -> List[device]:
"""
Sorts list of torch devices (keys) by given index list, device_ids. If keys
contains only one device, then the list is returned unchanged. If keys
contains a device for which the id is not contained in device_ids, then
an error is returned. This method is used to identify the order of DataParallel
batched devices, given the device ID ordering.
"""
if len(keys) == 1:
return keys
id_dict: Dict[int, device] = {}
assert device_ids is not None, "Device IDs must be provided with multiple devices."
for key in keys:
if key.index in id_dict:
raise AssertionError("Duplicate CUDA Device ID identified in device list.")
id_dict[key.index] = key
out_list = [
id_dict[device_id]
for device_id in filter(lambda device_id: device_id in id_dict, device_ids)
]
assert len(out_list) == len(keys), "Given Device ID List does not match"
"devices with computed tensors."
return out_list
def _flatten_tensor_or_tuple(inp: TensorOrTupleOfTensorsGeneric) -> Tensor:
if isinstance(inp, Tensor):
return inp.flatten()
return torch.cat([single_inp.flatten() for single_inp in inp])
def _get_module_from_name(model: Module, layer_name: str) -> Any:
r"""
Returns the module (layer) object, given its (string) name
in the model.
Args:
name (str): Module or nested modules name string in self.model
Returns:
The module (layer) in self.model.
"""
return reduce(getattr, layer_name.split("."), model)
def _register_backward_hook(
module: Module, hook: Callable, attr_obj: Any
) -> List[torch.utils.hooks.RemovableHandle]:
grad_out: Dict[device, Tensor] = {}
def forward_hook(
module: Module,
inp: Union[Tensor, Tuple[Tensor, ...]],
out: Union[Tensor, Tuple[Tensor, ...]],
) -> None:
nonlocal grad_out
grad_out = {}
def output_tensor_hook(output_grad: Tensor) -> None:
grad_out[output_grad.device] = output_grad
if isinstance(out, tuple):
assert (
len(out) == 1
), "Backward hooks not supported for module with >1 output"
out[0].register_hook(output_tensor_hook)
else:
out.register_hook(output_tensor_hook)
def pre_hook(module, inp):
def input_tensor_hook(input_grad: Tensor):
if len(grad_out) == 0:
return
hook_out = hook(module, input_grad, grad_out[input_grad.device])
if hook_out is not None:
return hook_out[0] if isinstance(hook_out, tuple) else hook_out
if isinstance(inp, tuple):
assert (
len(inp) == 1
), "Backward hooks not supported for module with >1 input"
inp[0].register_hook(input_tensor_hook)
return inp[0].clone()
else:
inp.register_hook(input_tensor_hook)
return inp.clone()
return [
module.register_forward_pre_hook(pre_hook),
module.register_forward_hook(forward_hook),
]
def _get_max_feature_index(feature_mask: Tuple[Tensor, ...]):
"""
Returns the max feature mask index
The feature mask should be formatted to tuple of tensors at first.
Note: This util is commonly used to identify the number of features (max_index + 1),
as we expect user to be resposible to ensure consecutive feature mask indices from 0
"""
return int(max(torch.max(mask).item() for mask in feature_mask if mask.numel()))
|
#!/usr/bin/env python3
import glob
import os
import re
import warnings
from typing import Any, List, Optional, Tuple, Union
import captum._utils.common as common
import torch
from captum.attr import LayerActivation
from torch import Tensor
from torch.nn import Module
from torch.utils.data import DataLoader, Dataset
class AV:
r"""
This class provides functionality to store and load activation vectors
generated for pre-defined neural network layers.
It also provides functionality to check if activation vectors already
exist in the manifold and other auxiliary functions.
This class also defines a torch `Dataset`, representing Activation Vectors,
which enables lazy access to activation vectors and layer stored in the manifold.
"""
r"""
The name of the subfolder in the manifold where the activation vectors
are stored.
"""
class AVDataset(Dataset):
r"""
This dataset enables access to activation vectors for a given `model` stored
under a pre-defined path.
The iterator of this dataset returns a batch of data tensors.
Additionally, subsets of the model activations can be loaded based on layer
or identifier or num_id (representing batch number in source dataset).
"""
def __init__(
self,
path: str,
model_id: str,
identifier: Optional[str] = None,
layer: Optional[str] = None,
num_id: Optional[str] = None,
) -> None:
r"""
Loads into memory the list of all activation file paths associated
with the input `model_id`.
Args:
path (str): The path where the activation vectors
for the `layer` are stored.
model_id (str): The name/version of the model for which layer
activations are being computed and stored.
identifier (str or None): An optional identifier for the layer
activations. Can be used to distinguish between activations for
different training batches.
layer (str or None): The layer for which the activation vectors
are computed.
num_id (str): An optional string representing the batch number for
which the activation vectors are computed
"""
self.av_filesearch = AV._construct_file_search(
path, model_id, identifier, layer, num_id
)
files = glob.glob(self.av_filesearch)
self.files = AV.sort_files(files)
def __getitem__(self, idx: int) -> Union[Tensor, Tuple[Tensor, ...]]:
assert idx < len(self.files), "Layer index is out of bounds!"
fl = self.files[idx]
av = torch.load(fl)
return av
def __len__(self) -> int:
return len(self.files)
AV_DIR_NAME: str = "av"
def __init__(self) -> None:
pass
@staticmethod
def _assemble_model_dir(path: str, model_id: str) -> str:
r"""
Returns a directory path for the given source path `path` and `model_id.`
This path is suffixed with the '/' delimiter.
"""
return "/".join([path, AV.AV_DIR_NAME, model_id, ""])
@staticmethod
def _assemble_file_path(source_dir: str, identifier: str, layer: str) -> str:
r"""
Returns a full filepath given a source directory, layer, and required
identifier. The source dir is not required to end with a "/" delimiter.
"""
if not source_dir.endswith("/"):
source_dir += "/"
filepath = os.path.join(source_dir, identifier)
filepath = os.path.join(filepath, layer)
return filepath
@staticmethod
def _construct_file_search(
source_dir: str,
model_id: str,
identifier: Optional[str] = None,
layer: Optional[str] = None,
num_id: Optional[str] = None,
) -> str:
r"""
Returns a search string that can be used by glob to search `source_dir/model_id`
for the desired layer/identifier pair. Leaving `layer` as None will search ids
over all layers, and leaving `identifier` as none will search layers over all
ids. Leaving both as none will return a path to glob for every activation.
Assumes identifier is always specified when saving activations, so that
activations live at source_dir/model_id/identifier/layer
(and never source_dir/model_id/layer)
"""
av_filesearch = AV._assemble_model_dir(source_dir, model_id)
av_filesearch = os.path.join(
av_filesearch, "*" if identifier is None else identifier
)
av_filesearch = os.path.join(av_filesearch, "*" if layer is None else layer)
av_filesearch = os.path.join(
av_filesearch, "*.pt" if num_id is None else "%s.pt" % num_id
)
return av_filesearch
@staticmethod
def exists(
path: str,
model_id: str,
identifier: Optional[str] = None,
layer: Optional[str] = None,
num_id: Optional[str] = None,
) -> bool:
r"""
Verifies whether the model + layer activations exist
under the path.
Args:
path (str): The path where the activation vectors
for the `model_id` are stored.
model_id (str): The name/version of the model for which layer activations
are being computed and stored.
identifier (str or None): An optional identifier for the layer activations.
Can be used to distinguish between activations for different
training batches. For example, the id could be a suffix composed of
a train/test label and numerical value, such as "-train-xxxxx".
The numerical id is often a monotonic sequence taken from datetime.
layer (str or None): The layer for which the activation vectors are
computed.
num_id (str): An optional string representing the batch number for which
the activation vectors are computed
Returns:
exists (bool): Indicating whether the activation vectors for the `layer`
and `identifier` (if provided) and num_id (if provided) were stored
in the manifold. If no `identifier` is provided, will return `True`
if any layer activation exists, whether it has an identifier or
not, and vice-versa.
"""
av_dir = AV._assemble_model_dir(path, model_id)
av_filesearch = AV._construct_file_search(
path, model_id, identifier, layer, num_id
)
return os.path.exists(av_dir) and len(glob.glob(av_filesearch)) > 0
@staticmethod
def save(
path: str,
model_id: str,
identifier: str,
layers: Union[str, List[str]],
act_tensors: Union[Tensor, List[Tensor]],
num_id: str,
) -> None:
r"""
Saves the activation vectors `act_tensor` for the
`layer` under the manifold `path`.
Args:
path (str): The path where the activation vectors
for the `layer` are stored.
model_id (str): The name/version of the model for which layer activations
are being computed and stored.
identifier (str or None): An optional identifier for the layer
activations. Can be used to distinguish between activations for
different training batches. For example, the identifier could be
a suffix composed of a train/test label and numerical value, such
as "-src-abc".
Additionally, (abc) could be a unique identifying number. For
example, it is automatically created in
AV.generate_dataset_activations from batch index.
It assumes identifier is same for all layers if a list of
`layers` is provided.
layers (str or list[str]): The layer(s) for which the activation vectors
are computed.
act_tensors (tensor or list of tensor): A batch of activation vectors.
This must match the dimension of `layers`.
num_id (str): string representing the batch number for which the activation
vectors are computed
"""
if isinstance(layers, str):
layers = [layers]
if isinstance(act_tensors, Tensor):
act_tensors = [act_tensors]
if len(layers) != len(act_tensors):
raise ValueError("The dimension of `layers` and `act_tensors` must match!")
av_dir = AV._assemble_model_dir(path, model_id)
for i, layer in enumerate(layers):
av_save_fl_path = os.path.join(
AV._assemble_file_path(av_dir, identifier, layer), "%s.pt" % num_id
)
layer_dir = os.path.dirname(av_save_fl_path)
if not os.path.exists(layer_dir):
os.makedirs(layer_dir)
torch.save(act_tensors[i], av_save_fl_path)
@staticmethod
def load(
path: str,
model_id: str,
identifier: Optional[str] = None,
layer: Optional[str] = None,
num_id: Optional[str] = None,
) -> AVDataset:
r"""
Loads lazily the activation vectors for given `model_id` and
`layer` saved under the `path`.
Args:
path (str): The path where the activation vectors
for the `layer` are stored.
model_id (str): The name/version of the model for which layer activations
are being computed and stored.
identifier (str or None): An optional identifier for the layer
activations. Can be used to distinguish between activations for
different training batches.
layer (str or None): The layer for which the activation vectors
are computed.
num_id (str): An optional string representing the batch number for which
the activation vectors are computed
Returns:
dataset (AV.AVDataset): AV.AVDataset that allows to iterate
over the activation vectors for given layer, identifier (if
provided), num_id (if provided). Returning an AV.AVDataset as
opposed to a DataLoader constructed from it offers more
flexibility. Raises RuntimeError if activation vectors are not
found.
"""
av_save_dir = AV._assemble_model_dir(path, model_id)
if os.path.exists(av_save_dir):
avdataset = AV.AVDataset(path, model_id, identifier, layer, num_id)
return avdataset
else:
raise RuntimeError(
f"Activation vectors for model {model_id} was not found at path {path}"
)
@staticmethod
def _manage_loading_layers(
path: str,
model_id: str,
layers: Union[str, List[str]],
load_from_disk: bool = True,
identifier: Optional[str] = None,
num_id: Optional[str] = None,
) -> List[str]:
r"""
Returns unsaved layers, and deletes saved layers if load_from_disk is False.
Args:
path (str): The path where the activation vectors
for the `layer` are stored.
model_id (str): The name/version of the model for which layer activations
are being computed and stored.
layers (str or list[str]): The layer(s) for which the activation vectors
are computed.
load_from_disk (bool, optional): Whether or not to load from disk.
Default: True
identifier (str or None): An optional identifier for the layer
activations. Can be used to distinguish between activations for
different training batches.
num_id (str, optional): An optional string representing the batch number
for which the activation vectors are computed.
Returns:
List of layer names for which activations should be generated
"""
layers = [layers] if isinstance(layers, str) else layers
unsaved_layers = []
if load_from_disk:
for layer in layers:
if not AV.exists(path, model_id, identifier, layer, num_id):
unsaved_layers.append(layer)
else:
unsaved_layers = layers
warnings.warn(
"Overwriting activations: load_from_disk is set to False. Removing all "
f"activations matching specified parameters {{path: {path}, "
f"model_id: {model_id}, layers: {layers}, identifier: {identifier}}} "
"before generating new activations."
)
for layer in layers:
files = glob.glob(
AV._construct_file_search(path, model_id, identifier, layer)
)
for filename in files:
os.remove(filename)
return unsaved_layers
@staticmethod
def _compute_and_save_activations(
path: str,
model: Module,
model_id: str,
layers: Union[str, List[str]],
inputs: Union[Tensor, Tuple[Tensor, ...]],
identifier: str,
num_id: str,
additional_forward_args: Any = None,
load_from_disk: bool = True,
) -> None:
r"""
Computes layer activations for the given inputs and specified `layers`
Args:
path (str): The path where the activation vectors
for the `layer` are stored.
model (torch.nn.Module): An instance of pytorch model. This model should
define all of its layers as attributes of the model.
model_id (str): The name/version of the model for which layer activations
are being computed and stored.
layers (str or list[str]): The layer(s) for which the activation vectors
are computed.
inputs (Tensor or tuple[Tensor, ...]): Batch of examples for
which influential instances are computed. They are passed to the
input `model`. The first dimension in `inputs` tensor or tuple of
tensors corresponds to the batch size.
identifier (str or None): An optional identifier for the layer
activations. Can be used to distinguish between activations for
different training batches.
num_id (str): An required string representing the batch number for which the
activation vectors are computed
additional_forward_args (Any, optional): Additional arguments that will be
passed to `model` after inputs.
Default: None
load_from_disk (bool): Forces function to regenerate activations if False.
Default: True
"""
unsaved_layers = AV._manage_loading_layers(
path,
model_id,
layers,
load_from_disk,
identifier,
num_id,
)
layer_modules = [
common._get_module_from_name(model, layer) for layer in unsaved_layers
]
if len(unsaved_layers) > 0:
layer_act = LayerActivation(model, layer_modules)
new_activations = layer_act.attribute.__wrapped__( # type: ignore
layer_act, inputs, additional_forward_args
)
AV.save(path, model_id, identifier, unsaved_layers, new_activations, num_id)
@staticmethod
def _unpack_data(data: Union[Any, Tuple[Any, Any]]) -> Any:
r"""
Helper to extract input from labels when getting items from a Dataset. Assumes
that data is either a single value, or a tuple containing two elements.
The input could itself be a Tuple containing multiple values. If your
dataset returns a Tuple with more than 2 elements, please reformat it such that
all inputs are formatted into a tuple stored at the first position.
"""
if isinstance(data, tuple) or isinstance(data, list):
data = data[0]
return data
r"""TODO:
1. Can propagate saving labels along with activations.
2. Use of additional_forward_args when sourcing from dataset?
"""
@staticmethod
def generate_dataset_activations(
path: str,
model: Module,
model_id: str,
layers: Union[str, List[str]],
dataloader: DataLoader,
identifier: str = "default",
load_from_disk: bool = True,
return_activations: bool = False,
) -> Optional[Union[AVDataset, List[AVDataset]]]:
r"""
Computes layer activations for a source dataset and specified `layers`. Assumes
that the dataset returns a single value, or a tuple containing two elements
(see AV._unpack_data).
Args:
path (str): The path where the activation vectors
for the `layer` are stored.
module (torch.nn.Module): An instance of pytorch model. This model should
define all of its layers as attributes of the model.
model_id (str): The name/version of the model for which layer activations
are being computed and stored.
layers (str or list[str]): The layer(s) for which the activation vectors
are computed.
dataloader (torch.utils.data.DataLoader): DataLoader that yields Dataset
for which influential instances are computed. They are passed to
input `model`.
identifier (str or None): An identifier for the layer
activations. Can be used to distinguish between activations for
different training batches.
Default: "default"
load_from_disk (bool): Forces function to regenerate activations if False.
Default: True
return_activations (bool, optional): Whether to return the activations.
Default: False
Returns: If `return_activations == True`, returns a single `AVDataset` if
`layers` is a str, otherwise, a list of `AVDataset`s of the length
of `layers`, where each element corresponds to a layer. In either
case, `AVDataset`'s represent the activations for a single layer,
over the entire `dataloader`. If `return_activations == False`,
does not return anything.
"""
unsaved_layers = AV._manage_loading_layers(
path,
model_id,
layers,
load_from_disk,
identifier,
)
if len(unsaved_layers) > 0:
for i, data in enumerate(dataloader):
AV._compute_and_save_activations(
path,
model,
model_id,
layers,
AV._unpack_data(data),
identifier,
str(i),
)
if not return_activations:
return None
if isinstance(layers, str):
return AV.load(path, model_id, identifier, layers)
else:
return [AV.load(path, model_id, identifier, layer) for layer in layers]
@staticmethod
def sort_files(files: List[str]) -> List[str]:
r"""
Utility for sorting files based on natural sorting instead of the default
lexigraphical sort.
"""
def split_alphanum(s):
r"""
Splits string into a list of strings and numbers
"z23a" -> ["z", 23, "a"]
"""
return [int(x) if x.isdigit() else x for x in re.split("([0-9]+)", s)]
return sorted(files, key=split_alphanum)
|
#!/usr/bin/env python3
import sys
import warnings
from time import time
from typing import cast, Iterable, Sized, TextIO
from captum._utils.typing import Literal
try:
from tqdm.auto import tqdm
except ImportError:
tqdm = None
class DisableErrorIOWrapper(object):
def __init__(self, wrapped: TextIO) -> None:
"""
The wrapper around a TextIO object to ignore write errors like tqdm
https://github.com/tqdm/tqdm/blob/bcce20f771a16cb8e4ac5cc5b2307374a2c0e535/tqdm/utils.py#L131
"""
self._wrapped = wrapped
def __getattr__(self, name):
return getattr(self._wrapped, name)
@staticmethod
def _wrapped_run(func, *args, **kwargs):
try:
return func(*args, **kwargs)
except OSError as e:
if e.errno != 5:
raise
except ValueError as e:
if "closed" not in str(e):
raise
def write(self, *args, **kwargs):
return self._wrapped_run(self._wrapped.write, *args, **kwargs)
def flush(self, *args, **kwargs):
return self._wrapped_run(self._wrapped.flush, *args, **kwargs)
class NullProgress:
"""Passthrough class that implements the progress API.
This class implements the tqdm and SimpleProgressBar api but
does nothing. This class can be used as a stand-in for an
optional progressbar, most commonly in the case of nested
progress bars.
"""
def __init__(self, iterable: Iterable = None, *args, **kwargs):
del args, kwargs
self.iterable = iterable
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback) -> Literal[False]:
return False
def __iter__(self):
if not self.iterable:
return
for it in self.iterable:
yield it
def update(self, amount: int = 1):
pass
def close(self):
pass
class SimpleProgress:
def __init__(
self,
iterable: Iterable = None,
desc: str = None,
total: int = None,
file: TextIO = None,
mininterval: float = 0.5,
) -> None:
"""
Simple progress output used when tqdm is unavailable.
Same as tqdm, output to stderr channel.
If you want to do nested Progressbars with simple progress
the parent progress bar should be used as a context
(i.e. with statement) and the nested progress bar should be
created inside this context.
"""
self.cur = 0
self.iterable = iterable
self.total = total
if total is None and hasattr(iterable, "__len__"):
self.total = len(cast(Sized, iterable))
self.desc = desc
file = DisableErrorIOWrapper(file if file else sys.stderr)
cast(TextIO, file)
self.file = file
self.mininterval = mininterval
self.last_print_t = 0.0
self.closed = False
self._is_parent = False
def __enter__(self):
self._is_parent = True
self._refresh()
return self
def __exit__(self, exc_type, exc_value, exc_traceback) -> Literal[False]:
self.close()
return False
def __iter__(self):
if self.closed or not self.iterable:
return
self._refresh()
for it in self.iterable:
yield it
self.update()
self.close()
def _refresh(self):
progress_str = self.desc + ": " if self.desc else ""
if self.total:
# e.g., progress: 60% 3/5
progress_str += f"{100 * self.cur // self.total}% {self.cur}/{self.total}"
else:
# e.g., progress: .....
progress_str += "." * self.cur
end = "\n" if self._is_parent else ""
print("\r" + progress_str, end=end, file=self.file)
def update(self, amount: int = 1):
if self.closed:
return
self.cur += amount
cur_t = time()
if cur_t - self.last_print_t >= self.mininterval:
self._refresh()
self.last_print_t = cur_t
def close(self):
if not self.closed and not self._is_parent:
self._refresh()
print(file=self.file) # end with new line
self.closed = True
def progress(
iterable: Iterable = None,
desc: str = None,
total: int = None,
use_tqdm=True,
file: TextIO = None,
mininterval: float = 0.5,
**kwargs,
):
# Try to use tqdm is possible. Fall back to simple progress print
if tqdm and use_tqdm:
return tqdm(
iterable,
desc=desc,
total=total,
file=file,
mininterval=mininterval,
**kwargs,
)
else:
if not tqdm and use_tqdm:
warnings.warn(
"Tried to show progress with tqdm "
"but tqdm is not installed. "
"Fall back to simply print out the progress."
)
return SimpleProgress(
iterable, desc=desc, total=total, file=file, mininterval=mininterval
)
|
#!/usr/bin/env python3
from typing import List, Tuple, TYPE_CHECKING, TypeVar, Union
from torch import Tensor
from torch.nn import Module
if TYPE_CHECKING:
import sys
if sys.version_info >= (3, 8):
from typing import Literal # noqa: F401
else:
from typing_extensions import Literal # noqa: F401
else:
Literal = {True: bool, False: bool, (True, False): bool}
TensorOrTupleOfTensorsGeneric = TypeVar(
"TensorOrTupleOfTensorsGeneric", Tensor, Tuple[Tensor, ...]
)
TupleOrTensorOrBoolGeneric = TypeVar("TupleOrTensorOrBoolGeneric", Tuple, Tensor, bool)
ModuleOrModuleList = TypeVar("ModuleOrModuleList", Module, List[Module])
TargetType = Union[None, int, Tuple[int, ...], Tensor, List[Tuple[int, ...]], List[int]]
BaselineType = Union[None, Tensor, int, float, Tuple[Union[Tensor, int, float], ...]]
TensorLikeList1D = List[float]
TensorLikeList2D = List[TensorLikeList1D]
TensorLikeList3D = List[TensorLikeList2D]
TensorLikeList4D = List[TensorLikeList3D]
TensorLikeList5D = List[TensorLikeList4D]
TensorLikeList = Union[
TensorLikeList1D,
TensorLikeList2D,
TensorLikeList3D,
TensorLikeList4D,
TensorLikeList5D,
]
|
from captum._utils.models.linear_model import (
LinearModel,
SGDLasso,
SGDLinearModel,
SGDLinearRegression,
SGDRidge,
SkLearnLasso,
SkLearnLinearModel,
SkLearnLinearRegression,
SkLearnRidge,
)
from captum._utils.models.model import Model
__all__ = [
"Model",
"LinearModel",
"SGDLinearModel",
"SGDLasso",
"SGDRidge",
"SGDLinearRegression",
"SkLearnLinearModel",
"SkLearnLasso",
"SkLearnRidge",
"SkLearnLinearRegression",
]
|
#!/usr/bin/env python3
from abc import ABC, abstractmethod
from typing import Dict, Optional, Union
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from torch import Tensor
from torch.utils.data import DataLoader
class Model(ABC):
r"""
Abstract Class to describe the interface of a trainable model to be used
within the algorithms of captum.
Please note that this is an experimental feature.
"""
@abstractmethod
def fit(
self, train_data: DataLoader, **kwargs
) -> Optional[Dict[str, Union[int, float, Tensor]]]:
r"""
Override this method to actually train your model.
The specification of the dataloader will be supplied by the algorithm
you are using within captum. This will likely be a supervised learning
task, thus you should expect batched (x, y) pairs or (x, y, w) triples.
Args:
train_data (DataLoader):
The data to train on
Returns:
Optional statistics about training, e.g. iterations it took to
train, training loss, etc.
"""
pass
@abstractmethod
def representation(self) -> Tensor:
r"""
Returns the underlying representation of the interpretable model. For a
linear model this is simply a tensor (the concatenation of weights
and bias). For something slightly more complicated, such as a decision
tree, this could be the nodes of a decision tree.
Returns:
A Tensor describing the representation of the model.
"""
pass
@abstractmethod
def __call__(
self, x: TensorOrTupleOfTensorsGeneric
) -> TensorOrTupleOfTensorsGeneric:
r"""
Predicts with the interpretable model.
Args:
x (TensorOrTupleOfTensorsGeneric)
A batched input of tensor(s) to the model to predict
Returns:
The prediction of the input as a TensorOrTupleOfTensorsGeneric.
"""
pass
|
from captum._utils.models.linear_model.model import (
LinearModel,
SGDLasso,
SGDLinearModel,
SGDLinearRegression,
SGDRidge,
SkLearnLasso,
SkLearnLinearModel,
SkLearnLinearRegression,
SkLearnRidge,
)
__all__ = [
"LinearModel",
"SGDLinearModel",
"SGDLasso",
"SGDRidge",
"SGDLinearRegression",
"SkLearnLinearModel",
"SkLearnLasso",
"SkLearnRidge",
"SkLearnLinearRegression",
]
|
from typing import Callable, cast, List, Optional
import torch.nn as nn
from captum._utils.models.model import Model
from torch import Tensor
from torch.utils.data import DataLoader
class LinearModel(nn.Module, Model):
SUPPORTED_NORMS: List[Optional[str]] = [None, "batch_norm", "layer_norm"]
def __init__(self, train_fn: Callable, **kwargs) -> None:
r"""
Constructs a linear model with a training function and additional
construction arguments that will be sent to
`self._construct_model_params` after a `self.fit` is called. Please note
that this assumes the `self.train_fn` will call
`self._construct_model_params`.
Please note that this is an experimental feature.
Args:
train_fn (Callable)
The function to train with. See
`captum._utils.models.linear_model.train.sgd_train_linear_model`
and
`captum._utils.models.linear_model.train.sklearn_train_linear_model`
for examples
kwargs
Any additional keyword arguments to send to
`self._construct_model_params` once a `self.fit` is called.
"""
super().__init__()
self.norm: Optional[nn.Module] = None
self.linear: Optional[nn.Linear] = None
self.train_fn = train_fn
self.construct_kwargs = kwargs
def _construct_model_params(
self,
in_features: Optional[int] = None,
out_features: Optional[int] = None,
norm_type: Optional[str] = None,
affine_norm: bool = False,
bias: bool = True,
weight_values: Optional[Tensor] = None,
bias_value: Optional[Tensor] = None,
classes: Optional[Tensor] = None,
):
r"""
Lazily initializes a linear model. This will be called for you in a
train method.
Args:
in_features (int):
The number of input features
output_features (int):
The number of output features.
norm_type (str, optional):
The type of normalization that can occur. Please assign this
to one of `PyTorchLinearModel.SUPPORTED_NORMS`.
affine_norm (bool):
Whether or not to learn an affine transformation of the
normalization parameters used.
bias (bool):
Whether to add a bias term. Not needed if normalized input.
weight_values (Tensor, optional):
The values to initialize the linear model with. This must be a
1D or 2D tensor, and of the form `(num_outputs, num_features)` or
`(num_features,)`. Additionally, if this is provided you need not
to provide `in_features` or `out_features`.
bias_value (Tensor, optional):
The bias value to initialize the model with.
classes (Tensor, optional):
The list of prediction classes supported by the model in case it
performs classificaton. In case of regression it is set to None.
Default: None
"""
if norm_type not in LinearModel.SUPPORTED_NORMS:
raise ValueError(
f"{norm_type} not supported. Please use {LinearModel.SUPPORTED_NORMS}"
)
if weight_values is not None:
in_features = weight_values.shape[-1]
out_features = (
1 if len(weight_values.shape) == 1 else weight_values.shape[0]
)
if in_features is None or out_features is None:
raise ValueError(
"Please provide `in_features` and `out_features` or `weight_values`"
)
if norm_type == "batch_norm":
self.norm = nn.BatchNorm1d(in_features, eps=1e-8, affine=affine_norm)
elif norm_type == "layer_norm":
self.norm = nn.LayerNorm(
in_features, eps=1e-8, elementwise_affine=affine_norm
)
else:
self.norm = None
self.linear = nn.Linear(in_features, out_features, bias=bias)
if weight_values is not None:
self.linear.weight.data = weight_values
if bias_value is not None:
if not bias:
raise ValueError("`bias_value` is not None and bias is False")
self.linear.bias.data = bias_value
if classes is not None:
self.linear.classes = classes
def fit(self, train_data: DataLoader, **kwargs):
r"""
Calls `self.train_fn`
"""
return self.train_fn(
self,
dataloader=train_data,
construct_kwargs=self.construct_kwargs,
**kwargs,
)
def forward(self, x: Tensor) -> Tensor:
assert self.linear is not None
if self.norm is not None:
x = self.norm(x)
return self.linear(x)
def representation(self) -> Tensor:
r"""
Returns a tensor which describes the hyper-plane input space. This does
not include the bias. For bias/intercept, please use `self.bias`
"""
assert self.linear is not None
return self.linear.weight.detach()
def bias(self) -> Optional[Tensor]:
r"""
Returns the bias of the linear model
"""
if self.linear is None or self.linear.bias is None:
return None
return self.linear.bias.detach()
def classes(self) -> Optional[Tensor]:
if self.linear is None or self.linear.classes is None:
return None
return cast(Tensor, self.linear.classes).detach()
class SGDLinearModel(LinearModel):
def __init__(self, **kwargs) -> None:
r"""
Factory class. Construct a a `LinearModel` with the
`sgd_train_linear_model` as the train method
Args:
kwargs
Arguments send to `self._construct_model_params` after
`self.fit` is called. Please refer to that method for parameter
documentation.
"""
# avoid cycles
from captum._utils.models.linear_model.train import sgd_train_linear_model
super().__init__(train_fn=sgd_train_linear_model, **kwargs)
class SGDLasso(SGDLinearModel):
def __init__(self, **kwargs) -> None:
r"""
Factory class to train a `LinearModel` with SGD
(`sgd_train_linear_model`) whilst setting appropriate parameters to
optimize for ridge regression loss. This optimizes L2 loss + alpha * L1
regularization.
Please note that with SGD it is not guaranteed that weights will
converge to 0.
"""
super().__init__(**kwargs)
def fit(self, train_data: DataLoader, **kwargs):
# avoid cycles
from captum._utils.models.linear_model.train import l2_loss
return super().fit(train_data=train_data, loss_fn=l2_loss, reg_term=1, **kwargs)
class SGDRidge(SGDLinearModel):
def __init__(self, **kwargs) -> None:
r"""
Factory class to train a `LinearModel` with SGD
(`sgd_train_linear_model`) whilst setting appropriate parameters to
optimize for ridge regression loss. This optimizes L2 loss + alpha *
L2 regularization.
"""
super().__init__(**kwargs)
def fit(self, train_data: DataLoader, **kwargs):
# avoid cycles
from captum._utils.models.linear_model.train import l2_loss
return super().fit(train_data=train_data, loss_fn=l2_loss, reg_term=2, **kwargs)
class SGDLinearRegression(SGDLinearModel):
def __init__(self, **kwargs) -> None:
r"""
Factory class to train a `LinearModel` with SGD
(`sgd_train_linear_model`). For linear regression this assigns the loss
to L2 and no regularization.
"""
super().__init__(**kwargs)
def fit(self, train_data: DataLoader, **kwargs):
# avoid cycles
from captum._utils.models.linear_model.train import l2_loss
return super().fit(
train_data=train_data, loss_fn=l2_loss, reg_term=None, **kwargs
)
class SkLearnLinearModel(LinearModel):
def __init__(self, sklearn_module: str, **kwargs) -> None:
r"""
Factory class to construct a `LinearModel` with sklearn training method.
Please note that this assumes:
0. You have sklearn and numpy installed
1. The dataset can fit into memory
SkLearn support does introduce some slight overhead as we convert the
tensors to numpy and then convert the resulting trained model to a
`LinearModel` object. However, this conversion should be negligible.
Args:
sklearn_module
The module under sklearn to construct and use for training, e.g.
use "svm.LinearSVC" for an SVM or "linear_model.Lasso" for Lasso.
There are factory classes defined for you for common use cases,
such as `SkLearnLasso`.
kwargs
The kwargs to pass to the construction of the sklearn model
"""
# avoid cycles
from captum._utils.models.linear_model.train import sklearn_train_linear_model
super().__init__(train_fn=sklearn_train_linear_model, **kwargs)
self.sklearn_module = sklearn_module
def fit(self, train_data: DataLoader, **kwargs):
r"""
Args:
train_data
Train data to use
kwargs
Arguments to feed to `.fit` method for sklearn
"""
return super().fit(
train_data=train_data, sklearn_trainer=self.sklearn_module, **kwargs
)
class SkLearnLasso(SkLearnLinearModel):
def __init__(self, **kwargs) -> None:
r"""
Factory class. Trains a `LinearModel` model with
`sklearn.linear_model.Lasso`. You will need sklearn version >= 0.23 to
support sample weights.
"""
super().__init__(sklearn_module="linear_model.Lasso", **kwargs)
def fit(self, train_data: DataLoader, **kwargs):
return super().fit(train_data=train_data, **kwargs)
class SkLearnRidge(SkLearnLinearModel):
def __init__(self, **kwargs) -> None:
r"""
Factory class. Trains a model with `sklearn.linear_model.Ridge`.
Any arguments provided to the sklearn constructor can be provided
as kwargs here.
"""
super().__init__(sklearn_module="linear_model.Ridge", **kwargs)
def fit(self, train_data: DataLoader, **kwargs):
return super().fit(train_data=train_data, **kwargs)
class SkLearnLinearRegression(SkLearnLinearModel):
def __init__(self, **kwargs) -> None:
r"""
Factory class. Trains a model with `sklearn.linear_model.LinearRegression`.
Any arguments provided to the sklearn constructor can be provided
as kwargs here.
"""
super().__init__(sklearn_module="linear_model.LinearRegression", **kwargs)
def fit(self, train_data: DataLoader, **kwargs):
return super().fit(train_data=train_data, **kwargs)
class SkLearnLogisticRegression(SkLearnLinearModel):
def __init__(self, **kwargs) -> None:
r"""
Factory class. Trains a model with `sklearn.linear_model.LogisticRegression`.
Any arguments provided to the sklearn constructor can be provided
as kwargs here.
"""
super().__init__(sklearn_module="linear_model.LogisticRegression", **kwargs)
def fit(self, train_data: DataLoader, **kwargs):
return super().fit(train_data=train_data, **kwargs)
class SkLearnSGDClassifier(SkLearnLinearModel):
def __init__(self, **kwargs) -> None:
r"""
Factory class. Trains a model with `sklearn.linear_model.SGDClassifier(`.
Any arguments provided to the sklearn constructor can be provided
as kwargs here.
"""
super().__init__(sklearn_module="linear_model.SGDClassifier", **kwargs)
def fit(self, train_data: DataLoader, **kwargs):
return super().fit(train_data=train_data, **kwargs)
|
import time
import warnings
from typing import Any, Callable, Dict, List, Optional
import torch
import torch.nn as nn
from captum._utils.models.linear_model.model import LinearModel
from torch.utils.data import DataLoader
def l2_loss(x1, x2, weights=None):
if weights is None:
return torch.mean((x1 - x2) ** 2) / 2.0
else:
return torch.sum((weights / weights.norm(p=1)) * ((x1 - x2) ** 2)) / 2.0
def sgd_train_linear_model(
model: LinearModel,
dataloader: DataLoader,
construct_kwargs: Dict[str, Any],
max_epoch: int = 100,
reduce_lr: bool = True,
initial_lr: float = 0.01,
alpha: float = 1.0,
loss_fn: Callable = l2_loss,
reg_term: Optional[int] = 1,
patience: int = 10,
threshold: float = 1e-4,
running_loss_window: Optional[int] = None,
device: Optional[str] = None,
init_scheme: str = "zeros",
debug: bool = False,
) -> Dict[str, float]:
r"""
Trains a linear model with SGD. This will continue to iterate your
dataloader until we converged to a solution or alternatively until we have
exhausted `max_epoch`.
Convergence is defined by the loss not changing by `threshold` amount for
`patience` number of iterations.
Args:
model
The model to train
dataloader
The data to train it with. We will assume the dataloader produces
either pairs or triples of the form (x, y) or (x, y, w). Where x and
y are typical pairs for supervised learning and w is a weight
vector.
We will call `model._construct_model_params` with construct_kwargs
and the input features set to `x.shape[1]` (`x.shape[0]` corresponds
to the batch size). We assume that `len(x.shape) == 2`, i.e. the
tensor is flat. The number of output features will be set to
y.shape[1] or 1 (if `len(y.shape) == 1`); we require `len(y.shape)
<= 2`.
max_epoch
The maximum number of epochs to exhaust
reduce_lr
Whether or not to reduce the learning rate as iterations progress.
Halves the learning rate when the training loss does not move. This
uses torch.optim.lr_scheduler.ReduceLROnPlateau and uses the
parameters `patience` and `threshold`
initial_lr
The initial learning rate to use.
alpha
A constant for the regularization term.
loss_fn
The loss to optimise for. This must accept three parameters:
x1 (predicted), x2 (labels) and a weight vector
reg_term
Regularization is defined by the `reg_term` norm of the weights.
Please use `None` if you do not wish to use regularization.
patience
Defines the number of iterations in a row the loss must remain
within `threshold` in order to be classified as converged.
threshold
Threshold for convergence detection.
running_loss_window
Used to report the training loss once we have finished training and
to determine when we have converged (along with reducing the
learning rate).
The reported training loss will take the last `running_loss_window`
iterations and average them.
If `None` we will approximate this to be the number of examples in
an epoch.
init_scheme
Initialization to use prior to training the linear model.
device
The device to send the model and data to. If None then no `.to` call
will be used.
debug
Whether to print the loss, learning rate per iteration
Returns
This will return the final training loss (averaged with
`running_loss_window`)
"""
loss_window: List[torch.Tensor] = []
min_avg_loss = None
convergence_counter = 0
converged = False
def get_point(datapoint):
if len(datapoint) == 2:
x, y = datapoint
w = None
else:
x, y, w = datapoint
if device is not None:
x = x.to(device)
y = y.to(device)
if w is not None:
w = w.to(device)
return x, y, w
# get a point and construct the model
data_iter = iter(dataloader)
x, y, w = get_point(next(data_iter))
model._construct_model_params(
in_features=x.shape[1],
out_features=y.shape[1] if len(y.shape) == 2 else 1,
**construct_kwargs,
)
model.train()
assert model.linear is not None
if init_scheme is not None:
assert init_scheme in ["xavier", "zeros"]
with torch.no_grad():
if init_scheme == "xavier":
torch.nn.init.xavier_uniform_(model.linear.weight)
else:
model.linear.weight.zero_()
if model.linear.bias is not None:
model.linear.bias.zero_()
with torch.enable_grad():
optim = torch.optim.SGD(model.parameters(), lr=initial_lr)
if reduce_lr:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optim, factor=0.5, patience=patience, threshold=threshold
)
t1 = time.time()
epoch = 0
i = 0
while epoch < max_epoch:
while True: # for x, y, w in dataloader
if running_loss_window is None:
running_loss_window = x.shape[0] * len(dataloader)
y = y.view(x.shape[0], -1)
if w is not None:
w = w.view(x.shape[0], -1)
i += 1
out = model(x)
loss = loss_fn(y, out, w)
if reg_term is not None:
reg = torch.norm(model.linear.weight, p=reg_term)
loss += reg.sum() * alpha
if len(loss_window) >= running_loss_window:
loss_window = loss_window[1:]
loss_window.append(loss.clone().detach())
assert len(loss_window) <= running_loss_window
average_loss = torch.mean(torch.stack(loss_window))
if min_avg_loss is not None:
# if we haven't improved by at least `threshold`
if average_loss > min_avg_loss or torch.isclose(
min_avg_loss, average_loss, atol=threshold
):
convergence_counter += 1
if convergence_counter >= patience:
converged = True
break
else:
convergence_counter = 0
if min_avg_loss is None or min_avg_loss >= average_loss:
min_avg_loss = average_loss.clone()
if debug:
print(
f"lr={optim.param_groups[0]['lr']}, Loss={loss},"
+ "Aloss={average_loss}, min_avg_loss={min_avg_loss}"
)
loss.backward()
optim.step()
model.zero_grad()
if scheduler:
scheduler.step(average_loss)
temp = next(data_iter, None)
if temp is None:
break
x, y, w = get_point(temp)
if converged:
break
epoch += 1
data_iter = iter(dataloader)
x, y, w = get_point(next(data_iter))
t2 = time.time()
return {
"train_time": t2 - t1,
"train_loss": torch.mean(torch.stack(loss_window)).item(),
"train_iter": i,
"train_epoch": epoch,
}
class NormLayer(nn.Module):
def __init__(self, mean, std, n=None, eps=1e-8) -> None:
super().__init__()
self.mean = mean
self.std = std
self.eps = eps
def forward(self, x):
return (x - self.mean) / (self.std + self.eps)
def sklearn_train_linear_model(
model: LinearModel,
dataloader: DataLoader,
construct_kwargs: Dict[str, Any],
sklearn_trainer: str = "Lasso",
norm_input: bool = False,
**fit_kwargs,
):
r"""
Alternative method to train with sklearn. This does introduce some slight
overhead as we convert the tensors to numpy and then convert the resulting
trained model to a `LinearModel` object. However, this conversion
should be negligible.
Please note that this assumes:
0. You have sklearn and numpy installed
1. The dataset can fit into memory
Args
model
The model to train.
dataloader
The data to use. This will be exhausted and converted to numpy
arrays. Therefore please do not feed an infinite dataloader.
norm_input
Whether or not to normalize the input
sklearn_trainer
The sklearn model to use to train the model. Please refer to
sklearn.linear_model for a list of modules to use.
construct_kwargs
Additional arguments provided to the `sklearn_trainer` constructor
fit_kwargs
Other arguments to send to `sklearn_trainer`'s `.fit` method
"""
from functools import reduce
try:
import numpy as np
except ImportError:
raise ValueError("numpy is not available. Please install numpy.")
try:
import sklearn
import sklearn.linear_model
import sklearn.svm
except ImportError:
raise ValueError("sklearn is not available. Please install sklearn >= 0.23")
if not sklearn.__version__ >= "0.23.0":
warnings.warn(
"Must have sklearn version 0.23.0 or higher to use "
"sample_weight in Lasso regression."
)
num_batches = 0
xs, ys, ws = [], [], []
for data in dataloader:
if len(data) == 3:
x, y, w = data
else:
assert len(data) == 2
x, y = data
w = None
xs.append(x.cpu().numpy())
ys.append(y.cpu().numpy())
if w is not None:
ws.append(w.cpu().numpy())
num_batches += 1
x = np.concatenate(xs, axis=0)
y = np.concatenate(ys, axis=0)
if len(ws) > 0:
w = np.concatenate(ws, axis=0)
else:
w = None
if norm_input:
mean, std = x.mean(0), x.std(0)
x -= mean
x /= std
t1 = time.time()
sklearn_model = reduce(
lambda val, el: getattr(val, el), [sklearn] + sklearn_trainer.split(".")
)(**construct_kwargs)
try:
sklearn_model.fit(x, y, sample_weight=w, **fit_kwargs)
except TypeError:
sklearn_model.fit(x, y, **fit_kwargs)
warnings.warn(
"Sample weight is not supported for the provided linear model!"
" Trained model without weighting inputs. For Lasso, please"
" upgrade sklearn to a version >= 0.23.0."
)
t2 = time.time()
# Convert weights to pytorch
classes = (
torch.IntTensor(sklearn_model.classes_)
if hasattr(sklearn_model, "classes_")
else None
)
# extract model device
device = getattr(model, "device", "cpu")
num_outputs = sklearn_model.coef_.shape[0] if sklearn_model.coef_.ndim > 1 else 1
weight_values = torch.FloatTensor(sklearn_model.coef_).to(device) # type: ignore
bias_values = torch.FloatTensor([sklearn_model.intercept_]).to( # type: ignore
device # type: ignore
) # type: ignore
model._construct_model_params(
norm_type=None,
weight_values=weight_values.view(num_outputs, -1),
bias_value=bias_values.squeeze().unsqueeze(0),
classes=classes,
)
if norm_input:
model.norm = NormLayer(mean, std)
return {"train_time": t2 - t1}
|
from captum.insights.attr_vis import AttributionVisualizer, Batch, features # noqa
|
# for legacy purposes
import warnings
from captum.insights.attr_vis.example import * # noqa
warnings.warn(
"Deprecated. Please import from captum.insights.attr_vis.example instead."
)
main() # noqa
|
#!/usr/bin/env python3
import logging
import os
import socket
import threading
from time import sleep
from typing import Optional
from captum.log import log_usage
from flask import Flask, jsonify, render_template, request
from flask_compress import Compress
from torch import Tensor
app = Flask(
__name__, static_folder="frontend/build/static", template_folder="frontend/build"
)
visualizer = None
port = None
Compress(app)
def namedtuple_to_dict(obj):
if isinstance(obj, Tensor):
return obj.item()
if hasattr(obj, "_asdict"): # detect namedtuple
return dict(zip(obj._fields, (namedtuple_to_dict(item) for item in obj)))
elif isinstance(obj, str): # iterables - strings
return obj
elif hasattr(obj, "keys"): # iterables - mapping
return dict(
zip(obj.keys(), (namedtuple_to_dict(item) for item in obj.values()))
)
elif hasattr(obj, "__iter__"): # iterables - sequence
return type(obj)((namedtuple_to_dict(item) for item in obj))
else: # non-iterable cannot contain namedtuples
return obj
@app.route("/attribute", methods=["POST"])
def attribute():
# force=True needed for Colab notebooks, which doesn't use the correct
# Content-Type header when forwarding requests through the Colab proxy
r = request.get_json(force=True)
return jsonify(
namedtuple_to_dict(
visualizer._calculate_attribution_from_cache(
r["inputIndex"], r["modelIndex"], r["labelIndex"]
)
)
)
@app.route("/fetch", methods=["POST"])
def fetch():
# force=True needed, see comment for "/attribute" route above
visualizer._update_config(request.get_json(force=True))
visualizer_output = visualizer.visualize()
clean_output = namedtuple_to_dict(visualizer_output)
return jsonify(clean_output)
@app.route("/init")
def init():
return jsonify(visualizer.get_insights_config())
@app.route("/")
def index(id=0):
return render_template("index.html")
def get_free_tcp_port():
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.bind(("", 0))
addr, port = tcp.getsockname()
tcp.close()
return port
def run_app(debug: bool = True, bind_all: bool = False):
if bind_all:
app.run(port=port, use_reloader=False, debug=debug, host="0.0.0.0")
else:
app.run(port=port, use_reloader=False, debug=debug)
@log_usage()
def start_server(
_viz,
blocking: bool = False,
debug: bool = False,
_port: Optional[int] = None,
bind_all: bool = False,
):
global visualizer
visualizer = _viz
global port
if port is None:
os.environ["WERKZEUG_RUN_MAIN"] = "true" # hides starting message
if not debug:
log = logging.getLogger("werkzeug")
log.disabled = True
app.logger.disabled = True
port = _port or get_free_tcp_port()
# Start in a new thread to not block notebook execution
t = threading.Thread(
target=run_app, kwargs={"debug": debug, "bind_all": bind_all}
)
t.start()
sleep(0.01) # add a short delay to allow server to start up
if blocking:
t.join()
print(f"\nFetch data and view Captum Insights at http://localhost:{port}/\n")
return port
|
#!/usr/bin/env python3
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union
from captum.attr import (
Deconvolution,
DeepLift,
FeatureAblation,
GuidedBackprop,
InputXGradient,
IntegratedGradients,
Occlusion,
Saliency,
)
from captum.attr._utils.approximation_methods import SUPPORTED_METHODS
class NumberConfig(NamedTuple):
value: int = 1
limit: Tuple[Optional[int], Optional[int]] = (None, None)
type: str = "number"
class StrEnumConfig(NamedTuple):
value: str
limit: List[str]
type: str = "enum"
class StrConfig(NamedTuple):
value: str
type: str = "string"
Config = Union[NumberConfig, StrEnumConfig, StrConfig]
SUPPORTED_ATTRIBUTION_METHODS = [
Deconvolution,
DeepLift,
GuidedBackprop,
InputXGradient,
IntegratedGradients,
Saliency,
FeatureAblation,
Occlusion,
]
class ConfigParameters(NamedTuple):
params: Dict[str, Config]
help_info: Optional[str] = None # TODO fill out help for each method
post_process: Optional[Dict[str, Callable[[Any], Any]]] = None
ATTRIBUTION_NAMES_TO_METHODS = {
# mypy bug - treating it as a type instead of a class
cls.get_name(): cls # type: ignore
for cls in SUPPORTED_ATTRIBUTION_METHODS
}
def _str_to_tuple(s):
if isinstance(s, tuple):
return s
return tuple([int(i) for i in s.split()])
ATTRIBUTION_METHOD_CONFIG: Dict[str, ConfigParameters] = {
IntegratedGradients.get_name(): ConfigParameters(
params={
"n_steps": NumberConfig(value=25, limit=(2, None)),
"method": StrEnumConfig(limit=SUPPORTED_METHODS, value="gausslegendre"),
},
post_process={"n_steps": int},
),
FeatureAblation.get_name(): ConfigParameters(
params={"perturbations_per_eval": NumberConfig(value=1, limit=(1, 100))},
),
Occlusion.get_name(): ConfigParameters(
params={
"sliding_window_shapes": StrConfig(value=""),
"strides": StrConfig(value=""),
"perturbations_per_eval": NumberConfig(value=1, limit=(1, 100)),
},
post_process={
"sliding_window_shapes": _str_to_tuple,
"strides": _str_to_tuple,
"perturbations_per_eval": int,
},
),
}
|
from captum.insights.attr_vis.app import AttributionVisualizer, Batch # noqa
|
#!/usr/bin/env python3
import base64
import warnings
from collections import namedtuple
from io import BytesIO
from typing import Callable, List, Optional, Union
from captum._utils.common import safe_div
from captum.attr._utils import visualization as viz
from captum.insights.attr_vis._utils.transforms import format_transforms
FeatureOutput = namedtuple("FeatureOutput", "name base modified type contribution")
def _convert_figure_base64(fig):
buff = BytesIO()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fig.tight_layout() # removes padding
fig.savefig(buff, format="png")
base64img = base64.b64encode(buff.getvalue()).decode("utf-8")
return base64img
class BaseFeature:
r"""
All Feature classes extend this class to implement custom visualizations in
Insights.
It enforces child classes to implement ``visualization_type`` and ``visualize``
methods.
"""
def __init__(
self,
name: str,
baseline_transforms: Optional[Union[Callable, List[Callable]]],
input_transforms: Optional[Union[Callable, List[Callable]]],
visualization_transform: Optional[Callable],
) -> None:
r"""
Args:
name (str): The label of the specific feature. For example, an
ImageFeature's name can be "Photo".
baseline_transforms (list, Callable, optional): Optional list of
callables (e.g. functions) to be called on the input tensor
to construct multiple baselines. Currently only one baseline
is supported. See
:py:class:`.IntegratedGradients` for more
information about baselines.
input_transforms (list, Callable, optional): Optional list of callables
(e.g. functions) called on the input tensor sequentially to
convert it into the format expected by the model.
visualization_transform (Callable, optional): Optional callable (e.g.
function) applied as a postprocessing step of the original
input data (before ``input_transforms``) to convert it to a
format to be understood by the frontend visualizer as
specified in ``captum/captum/insights/frontend/App.js``.
"""
self.name = name
self.baseline_transforms = format_transforms(baseline_transforms)
self.input_transforms = format_transforms(input_transforms)
self.visualization_transform = visualization_transform
@staticmethod
def visualization_type() -> str:
raise NotImplementedError
def visualize(self, attribution, data, contribution_frac) -> FeatureOutput:
raise NotImplementedError
class ImageFeature(BaseFeature):
r"""
ImageFeature is used to visualize image features in Insights. It expects an image in
NCHW format. If C has a dimension of 1, its assumed to be a greyscale image.
If it has a dimension of 3, its expected to be in RGB format.
"""
def __init__(
self,
name: str,
baseline_transforms: Union[Callable, List[Callable]],
input_transforms: Union[Callable, List[Callable]],
visualization_transform: Optional[Callable] = None,
) -> None:
r"""
Args:
name (str): The label of the specific feature. For example, an
ImageFeature's name can be "Photo".
baseline_transforms (list, Callable, optional): Optional list of
callables (e.g. functions) to be called on the input tensor
to construct multiple baselines. Currently only one baseline
is supported. See
:py:class:`.IntegratedGradients` for more
information about baselines.
input_transforms (list, Callable, optional): A list of transforms
or transform to be applied to the input. For images,
normalization is often applied here.
visualization_transform (Callable, optional): Optional callable (e.g.
function) applied as a postprocessing step of the original
input data (before input_transforms) to convert it to a
format to be visualized.
"""
super().__init__(
name,
baseline_transforms=baseline_transforms,
input_transforms=input_transforms,
visualization_transform=visualization_transform,
)
@staticmethod
def visualization_type() -> str:
return "image"
def visualize(self, attribution, data, contribution_frac) -> FeatureOutput:
if self.visualization_transform:
data = self.visualization_transform(data)
data_t, attribution_t = [
t.detach().squeeze().permute((1, 2, 0)).cpu().numpy()
for t in (data, attribution)
]
orig_fig, _ = viz.visualize_image_attr(
attribution_t, data_t, method="original_image", use_pyplot=False
)
attr_fig, _ = viz.visualize_image_attr(
attribution_t,
data_t,
method="heat_map",
sign="absolute_value",
use_pyplot=False,
)
img_64 = _convert_figure_base64(orig_fig)
attr_img_64 = _convert_figure_base64(attr_fig)
return FeatureOutput(
name=self.name,
base=img_64,
modified=attr_img_64,
type=self.visualization_type(),
contribution=contribution_frac,
)
class TextFeature(BaseFeature):
r"""
TextFeature is used to visualize text (e.g. sentences) in Insights.
It expects the visualization transform to convert the input data (e.g. index to
string) to the raw text.
"""
def __init__(
self,
name: str,
baseline_transforms: Union[Callable, List[Callable]],
input_transforms: Union[Callable, List[Callable]],
visualization_transform: Callable,
) -> None:
r"""
Args:
name (str): The label of the specific feature. For example, an
ImageFeature's name can be "Photo".
baseline_transforms (list, Callable, optional): Optional list of
callables (e.g. functions) to be called on the input tensor
to construct multiple baselines. Currently only one baseline
is supported. See
:py:class:`.IntegratedGradients` for more
information about baselines.
For text features, a common baseline is a tensor of indices
corresponding to PAD with the same size as the input
tensor. See :py:class:`.TokenReferenceBase` for more
information.
input_transforms (list, Callable, optional): A list of transforms
or transform to be applied to the input. For text, a common
transform is to convert the tokenized input tensor into an
interpretable embedding. See
:py:class:`.InterpretableEmbeddingBase`
and
:py:func:`~.configure_interpretable_embedding_layer`
for more information.
visualization_transform (Callable, optional): Optional callable (e.g.
function) applied as a postprocessing step of the original
input data (before ``input_transforms``) to convert it to a
suitable format for visualization. For text features,
a common function is to convert the token indices to their
corresponding (sub)words.
"""
super().__init__(
name,
baseline_transforms=baseline_transforms,
input_transforms=input_transforms,
visualization_transform=visualization_transform,
)
@staticmethod
def visualization_type() -> str:
return "text"
def visualize(self, attribution, data, contribution_frac) -> FeatureOutput:
if self.visualization_transform:
text = self.visualization_transform(data)
else:
text = data
attribution = attribution.squeeze(0)
data = data.squeeze(0)
if len(attribution.shape) > 1:
attribution = attribution.sum(dim=1)
# L-Infinity norm, if norm is 0, all attr elements are 0
attr_max = attribution.abs().max()
normalized_attribution = safe_div(attribution, attr_max)
modified = [x * 100 for x in normalized_attribution.tolist()]
return FeatureOutput(
name=self.name,
base=text,
modified=modified,
type=self.visualization_type(),
contribution=contribution_frac,
)
class GeneralFeature(BaseFeature):
r"""
GeneralFeature is used for non-specified feature visualization in Insights.
It can be used for dense or sparse features.
Currently general features are only supported for 2-d tensors, in the format (N, C)
where N is the number of samples and C is the number of categories.
"""
def __init__(self, name: str, categories: List[str]) -> None:
r"""
Args:
name (str): The label of the specific feature. For example, an
ImageFeature's name can be "Photo".
categories (list[str]): Category labels for the general feature. The
order and size should match the second dimension of the
``data`` tensor parameter in ``visualize``.
"""
super().__init__(
name,
baseline_transforms=None,
input_transforms=None,
visualization_transform=None,
)
self.categories = categories
@staticmethod
def visualization_type() -> str:
return "general"
def visualize(self, attribution, data, contribution_frac) -> FeatureOutput:
attribution = attribution.squeeze(0)
data = data.squeeze(0)
# L-2 norm, if norm is 0, all attr elements are 0
l2_norm = attribution.norm()
normalized_attribution = safe_div(attribution, l2_norm)
modified = [x * 100 for x in normalized_attribution.tolist()]
base = [f"{c}: {d:.2f}" for c, d in zip(self.categories, data.tolist())]
return FeatureOutput(
name=self.name,
base=base,
modified=modified,
type=self.visualization_type(),
contribution=contribution_frac,
)
class EmptyFeature(BaseFeature):
def __init__(
self,
name: str = "empty",
baseline_transforms: Optional[Union[Callable, List[Callable]]] = None,
input_transforms: Optional[Union[Callable, List[Callable]]] = None,
visualization_transform: Optional[Callable] = None,
) -> None:
super().__init__(
name,
baseline_transforms=baseline_transforms,
input_transforms=input_transforms,
visualization_transform=visualization_transform,
)
@staticmethod
def visualization_type() -> str:
return "empty"
def visualize(self, _attribution, _data, contribution_frac) -> FeatureOutput:
return FeatureOutput(
name=self.name,
base=None,
modified=None,
type=self.visualization_type(),
contribution=contribution_frac,
)
|
#!/usr/bin/env python3
import inspect
from collections import namedtuple
from typing import (
Callable,
cast,
Dict,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
)
import torch
from captum._utils.common import _run_forward, safe_div
from captum.insights.attr_vis.config import (
ATTRIBUTION_METHOD_CONFIG,
ATTRIBUTION_NAMES_TO_METHODS,
)
from captum.insights.attr_vis.features import BaseFeature
from torch import Tensor
from torch.nn import Module
OutputScore = namedtuple("OutputScore", "score index label")
class AttributionCalculation:
def __init__(
self,
models: Sequence[Module],
classes: Sequence[str],
features: List[BaseFeature],
score_func: Optional[Callable] = None,
use_label_for_attr: bool = True,
) -> None:
self.models = models
self.classes = classes
self.features = features
self.score_func = score_func
self.use_label_for_attr = use_label_for_attr
self.baseline_cache: dict = {}
self.transformed_input_cache: dict = {}
def calculate_predicted_scores(
self, inputs, additional_forward_args, model
) -> Tuple[
List[OutputScore], Optional[List[Tuple[Tensor, ...]]], Tuple[Tensor, ...]
]:
# Check if inputs have cached baselines and transformed inputs
hashable_inputs = tuple(inputs)
if hashable_inputs in self.baseline_cache:
baselines_group = self.baseline_cache[hashable_inputs]
transformed_inputs = self.transformed_input_cache[hashable_inputs]
else:
# Initialize baselines
baseline_transforms_len = 1 # todo support multiple baselines
baselines: List[List[Optional[Tensor]]] = [
[None] * len(self.features) for _ in range(baseline_transforms_len)
]
transformed_inputs = list(inputs)
for feature_i, feature in enumerate(self.features):
transformed_inputs[feature_i] = self._transform(
feature.input_transforms, transformed_inputs[feature_i], True
)
for baseline_i in range(baseline_transforms_len):
if baseline_i > len(feature.baseline_transforms) - 1:
baselines[baseline_i][feature_i] = torch.zeros_like(
transformed_inputs[feature_i]
)
else:
baselines[baseline_i][feature_i] = self._transform(
[feature.baseline_transforms[baseline_i]],
transformed_inputs[feature_i],
True,
)
baselines = cast(List[List[Optional[Tensor]]], baselines)
baselines_group = [tuple(b) for b in baselines]
self.baseline_cache[hashable_inputs] = baselines_group
self.transformed_input_cache[hashable_inputs] = transformed_inputs
outputs = _run_forward(
model,
tuple(transformed_inputs),
additional_forward_args=additional_forward_args,
)
if self.score_func is not None:
outputs = self.score_func(outputs)
if outputs.nelement() == 1:
scores = outputs
predicted = scores.round().to(torch.int)
else:
scores, predicted = outputs.topk(min(4, outputs.shape[-1]))
scores = scores.cpu().squeeze(0)
predicted = predicted.cpu().squeeze(0)
predicted_scores = self._get_labels_from_scores(scores, predicted)
return predicted_scores, baselines_group, tuple(transformed_inputs)
def calculate_attribution(
self,
baselines: Optional[Sequence[Tuple[Tensor, ...]]],
data: Tuple[Tensor, ...],
additional_forward_args: Optional[Tuple[Tensor, ...]],
label: Optional[Union[Tensor]],
attribution_method_name: str,
attribution_arguments: Dict,
model: Module,
) -> Tuple[Tensor, ...]:
attribution_cls = ATTRIBUTION_NAMES_TO_METHODS[attribution_method_name]
attribution_method = attribution_cls(model)
if attribution_method_name in ATTRIBUTION_METHOD_CONFIG:
param_config = ATTRIBUTION_METHOD_CONFIG[attribution_method_name]
if param_config.post_process:
for k, v in attribution_arguments.items():
if k in param_config.post_process:
attribution_arguments[k] = param_config.post_process[k](v)
# TODO support multiple baselines
baseline = baselines[0] if baselines and len(baselines) > 0 else None
label = (
None
if not self.use_label_for_attr or label is None or label.nelement() == 0
else label
)
if "baselines" in inspect.signature(attribution_method.attribute).parameters:
attribution_arguments["baselines"] = baseline
attr = attribution_method.attribute.__wrapped__(
attribution_method, # self
data,
additional_forward_args=additional_forward_args,
target=label,
**attribution_arguments,
)
return attr
def calculate_net_contrib(
self, attrs_per_input_feature: Tuple[Tensor, ...]
) -> List[float]:
# get the net contribution per feature (input)
net_contrib = torch.stack(
[attrib.flatten().sum() for attrib in attrs_per_input_feature]
)
# normalise the contribution, s.t. sum(abs(x_i)) = 1
norm = torch.norm(net_contrib, p=1)
# if norm is 0, all net_contrib elements are 0
net_contrib = safe_div(net_contrib, norm)
return net_contrib.tolist()
def _transform(
self, transforms: Iterable[Callable], inputs: Tensor, batch: bool = False
) -> Tensor:
transformed_inputs = inputs
# TODO support batch size > 1
if batch:
transformed_inputs = inputs.squeeze(0)
for t in transforms:
transformed_inputs = t(transformed_inputs)
if batch:
transformed_inputs = transformed_inputs.unsqueeze(0)
return transformed_inputs
def _get_labels_from_scores(
self, scores: Tensor, indices: Tensor
) -> List[OutputScore]:
pred_scores: List[OutputScore] = []
if indices.nelement() < 2:
return pred_scores
for i in range(len(indices)):
score = scores[i]
pred_scores.append(
OutputScore(score, indices[i], self.classes[int(indices[i])])
)
return pred_scores
|
#!/usr/bin/env python3
import os
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from captum.insights import AttributionVisualizer, Batch
from captum.insights.attr_vis.features import ImageFeature
def get_classes():
classes = [
"Plane",
"Car",
"Bird",
"Cat",
"Deer",
"Dog",
"Frog",
"Horse",
"Ship",
"Truck",
]
return classes
def get_pretrained_model():
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool1 = nn.MaxPool2d(2, 2)
self.pool2 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.relu3 = nn.ReLU()
self.relu4 = nn.ReLU()
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = self.relu3(self.fc1(x))
x = self.relu4(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
pt_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "models/cifar_torchvision.pt")
)
net.load_state_dict(torch.load(pt_path))
return net
def baseline_func(input):
return input * 0
def formatted_data_iter():
dataset = torchvision.datasets.CIFAR10(
root="data/test", train=False, download=True, transform=transforms.ToTensor()
)
dataloader = iter(
torch.utils.data.DataLoader(dataset, batch_size=4, shuffle=False, num_workers=2)
)
while True:
images, labels = next(dataloader)
yield Batch(inputs=images, labels=labels)
def main():
normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
model = get_pretrained_model()
visualizer = AttributionVisualizer(
models=[model],
score_func=lambda o: torch.nn.functional.softmax(o, 1),
classes=get_classes(),
features=[
ImageFeature(
"Photo",
baseline_transforms=[baseline_func],
input_transforms=[normalize],
)
],
dataset=formatted_data_iter(),
)
visualizer.serve(debug=True)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
from collections import namedtuple
from itertools import cycle
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
NamedTuple,
Optional,
Tuple,
Union,
)
import torch
from captum.attr import IntegratedGradients
from captum.attr._utils.batching import _batched_generator
from captum.insights.attr_vis.attribution_calculation import (
AttributionCalculation,
OutputScore,
)
from captum.insights.attr_vis.config import (
ATTRIBUTION_METHOD_CONFIG,
ATTRIBUTION_NAMES_TO_METHODS,
)
from captum.insights.attr_vis.features import BaseFeature
from captum.insights.attr_vis.server import namedtuple_to_dict
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
_CONTEXT_COLAB = "_CONTEXT_COLAB"
_CONTEXT_IPYTHON = "_CONTEXT_IPYTHON"
_CONTEXT_NONE = "_CONTEXT_NONE"
def _get_context():
"""Determine the most specific context that we're in.
Implementation from TensorBoard: https://git.io/JvObD.
Returns:
_CONTEXT_COLAB: If in Colab with an IPython notebook context.
_CONTEXT_IPYTHON: If not in Colab, but we are in an IPython notebook
context (e.g., from running `jupyter notebook` at the command
line).
_CONTEXT_NONE: Otherwise (e.g., by running a Python script at the
command-line or using the `ipython` interactive shell).
"""
# In Colab, the `google.colab` module is available, but the shell
# returned by `IPython.get_ipython` does not have a `get_trait`
# method.
try:
import google.colab # noqa: F401
import IPython
except ImportError:
pass
else:
if IPython.get_ipython() is not None:
# We'll assume that we're in a Colab notebook context.
return _CONTEXT_COLAB
# In an IPython command line shell or Jupyter notebook, we can
# directly query whether we're in a notebook context.
try:
import IPython
except ImportError:
pass
else:
ipython = IPython.get_ipython()
if ipython is not None and ipython.has_trait("kernel"):
return _CONTEXT_IPYTHON
# Otherwise, we're not in a known notebook context.
return _CONTEXT_NONE
VisualizationOutput = namedtuple(
"VisualizationOutput", "feature_outputs actual predicted active_index model_index"
)
Contribution = namedtuple("Contribution", "name percent")
SampleCache = namedtuple("SampleCache", "inputs additional_forward_args label")
class FilterConfig(NamedTuple):
attribution_method: str = IntegratedGradients.get_name()
# issue with mypy github.com/python/mypy/issues/8376
attribution_arguments: Dict[str, Any] = {
arg: config.value # type: ignore
for arg, config in ATTRIBUTION_METHOD_CONFIG[
IntegratedGradients.get_name()
].params.items()
}
prediction: str = "all"
classes: List[str] = []
num_examples: int = 4
class Batch:
def __init__(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
labels: Optional[Tensor],
additional_args=None,
) -> None:
r"""
Constructs batch of inputs to be attributed and visualized.
Args:
inputs (Tensor or tuple[Tensor, ...]): Batch of inputs for a model.
These may be either a Tensor or tuple of tensors. Each tensor
must correspond to a feature for AttributionVisualizer, and
the corresponding input transform function of the feature
is applied to each input tensor prior to passing it to the
model. It is assumed that the first dimension of each
input tensor corresponds to the number of examples
(batch size) and is aligned for all input tensors.
labels (Tensor): Tensor containing correct labels for input examples.
This must be a 1D tensor with length matching the first
dimension of each input tensor.
additional_args (tuple, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to ``forward_func`` in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples.
"""
self.inputs = inputs
self.labels = labels
self.additional_args = additional_args
class AttributionVisualizer:
def __init__(
self,
models: Union[List[Module], Module],
classes: List[str],
features: Union[List[BaseFeature], BaseFeature],
dataset: Iterable[Batch],
score_func: Optional[Callable] = None,
use_label_for_attr: bool = True,
) -> None:
r"""
Args:
models (torch.nn.Module): One or more PyTorch modules (models) for
attribution visualization.
classes (list[str]): List of strings corresponding to the names of
classes for classification.
features (list[BaseFeature]): List of BaseFeatures, which correspond
to input arguments to the model. Each feature object defines
relevant transformations for converting to model input,
constructing baselines, and visualizing. The length of the
features list should exactly match the number of (tensor)
arguments expected by the given model.
For instance, an image classifier should only provide
a single BaseFeature, while a multimodal classifier may
provide a list of features, each corresponding to a different
tensor input and potentially different modalities.
dataset (Iterable of Batch): Defines the dataset to visualize attributions
for. This must be an iterable of batch objects, each of which
may contain multiple input examples.
score_func (Callable, optional): This function is applied to the model
output to obtain the score for each class. For instance,
this function could be the softmax or final non-linearity
of the network, applied to the model output. The indices
of the second dimension of the output should correspond
to the class names provided. If None, the model outputs
are taken directly and assumed to correspond to the
class scores.
Default: None
use_label_for_attr (bool, optional): If true, the class index is passed
to the relevant attribution method. This is necessary in most
cases where there is an output neuron corresponding to each
class. When the model output is a scalar and class index
(e.g. positive, negative) is inferred from the output value,
this argument should be False.
Default: True
"""
if not isinstance(models, List):
models = [models]
if not isinstance(features, List):
features = [features]
self.classes = classes
self.features = features
self.dataset = dataset
self.models = models
self.attribution_calculation = AttributionCalculation(
models, classes, features, score_func, use_label_for_attr
)
self._outputs: List[VisualizationOutput] = []
self._config = FilterConfig(prediction="all", classes=[], num_examples=4)
self._dataset_iter = iter(dataset)
self._dataset_cache: List[Batch] = []
def _calculate_attribution_from_cache(
self, input_index: int, model_index: int, target: Optional[Tensor]
) -> Optional[VisualizationOutput]:
c = self._outputs[input_index][1]
result = self._calculate_vis_output(
c.inputs,
c.additional_forward_args,
c.label,
torch.tensor(target),
model_index,
)
if not result:
return None
return result[0]
def _update_config(self, settings):
self._config = FilterConfig(
attribution_method=settings["attribution_method"],
attribution_arguments=settings["arguments"],
prediction=settings["prediction"],
classes=settings["classes"],
num_examples=4,
)
@log_usage()
def render(self, debug=True):
from captum.insights.attr_vis.widget import CaptumInsights
from IPython.display import display
widget = CaptumInsights(visualizer=self)
display(widget)
if debug:
display(widget.out)
@log_usage()
def serve(self, blocking=False, debug=False, port=None, bind_all=False):
context = _get_context()
if context == _CONTEXT_COLAB:
return self._serve_colab(blocking=blocking, debug=debug, port=port)
else:
return self._serve(
blocking=blocking, debug=debug, port=port, bind_all=bind_all
)
def _serve(self, blocking=False, debug=False, port=None, bind_all=False):
from captum.insights.attr_vis.server import start_server
return start_server(
self, blocking=blocking, debug=debug, _port=port, bind_all=bind_all
)
def _serve_colab(self, blocking=False, debug=False, port=None):
import ipywidgets as widgets
from captum.insights.attr_vis.server import start_server
from IPython.display import display, HTML
# TODO: Output widget only captures beginning of server logs. It seems
# the context manager isn't respected when the web server is run on a
# separate thread. We should fix to display entirety of the logs
out = widgets.Output()
with out:
port = start_server(self, blocking=blocking, debug=debug, _port=port)
shell = """
<div id="root"></div>
<script>
(function() {
document.querySelector("base").href = "http://localhost:%PORT%";
function reloadScriptsAndCSS(root) {
// Referencing TensorBoard's method for reloading scripts,
// we remove and reinsert each script
for (const script of root.querySelectorAll("script")) {
const newScript = document.createElement("script");
newScript.type = script.type;
if (script.src) {
newScript.src = script.src;
}
if (script.textContent) {
newScript.textContent = script.textContent;
}
root.appendChild(newScript);
script.remove();
}
// A similar method is used to reload styles
for (const link of root.querySelectorAll("link")) {
const newLink = document.createElement("link");
newLink.rel = link.rel;
newLink.href = link.href;
document.querySelector("head").appendChild(newLink);
link.remove();
}
}
const root = document.getElementById("root");
fetch(".")
.then(x => x.text())
.then(html => void (root.innerHTML = html))
.then(() => reloadScriptsAndCSS(root));
})();
</script>
""".replace(
"%PORT%", str(port)
)
html = HTML(shell)
display(html)
display(out)
def _predictions_matches_labels(
self, predicted_scores: List[OutputScore], labels: Union[str, List[str]]
) -> bool:
if len(predicted_scores) == 0:
return False
predicted_label = predicted_scores[0].label
if isinstance(labels, List):
return predicted_label in labels
return labels == predicted_label
def _should_keep_prediction(
self, predicted_scores: List[OutputScore], actual_label: Optional[OutputScore]
) -> bool:
# filter by class
if len(self._config.classes) != 0:
if not self._predictions_matches_labels(
predicted_scores, self._config.classes
):
return False
if not actual_label:
return True
# filter by accuracy
label_name = actual_label.label
if self._config.prediction == "all":
pass
elif self._config.prediction == "correct":
if not self._predictions_matches_labels(predicted_scores, label_name):
return False
elif self._config.prediction == "incorrect":
if self._predictions_matches_labels(predicted_scores, label_name):
return False
else:
raise Exception(f"Invalid prediction config: {self._config.prediction}")
return True
def _calculate_vis_output(
self,
inputs,
additional_forward_args,
label,
target=None,
single_model_index=None,
) -> Optional[List[VisualizationOutput]]:
# Use all models, unless the user wants to render data for a particular one
models_used = (
[self.models[single_model_index]]
if single_model_index is not None
else self.models
)
results = []
for model_index, model in enumerate(models_used):
# Get list of model visualizations for each input
actual_label_output = None
if label is not None and len(label) > 0:
label_index = int(label[0])
actual_label_output = OutputScore(
score=100, index=label_index, label=self.classes[label_index]
)
(
predicted_scores,
baselines,
transformed_inputs,
) = self.attribution_calculation.calculate_predicted_scores(
inputs, additional_forward_args, model
)
# Filter based on UI configuration
if actual_label_output is None or not self._should_keep_prediction(
predicted_scores, actual_label_output
):
continue
if target is None:
target = (
predicted_scores[0].index if len(predicted_scores) > 0 else None
)
# attributions are given per input*
# inputs given to the model are described via `self.features`
#
# *an input contains multiple features that represent it
# e.g. all the pixels that describe an image is an input
attrs_per_feature = self.attribution_calculation.calculate_attribution(
baselines,
transformed_inputs,
additional_forward_args,
target,
self._config.attribution_method,
self._config.attribution_arguments,
model,
)
net_contrib = self.attribution_calculation.calculate_net_contrib(
attrs_per_feature
)
# the features per input given
features_per_input = [
feature.visualize(attr, data, contrib)
for feature, attr, data, contrib in zip(
self.features, attrs_per_feature, inputs, net_contrib
)
]
results.append(
VisualizationOutput(
feature_outputs=features_per_input,
actual=actual_label_output,
predicted=predicted_scores,
active_index=target
if target is not None
else actual_label_output.index,
# Even if we only iterated over one model, the index should be fixed
# to show the index the model would have had in the list
model_index=single_model_index
if single_model_index is not None
else model_index,
)
)
return results if results else None
def _get_outputs(self) -> List[Tuple[List[VisualizationOutput], SampleCache]]:
# If we run out of new batches, then we need to
# display data which was already shown before.
# However, since the dataset given to us is a generator,
# we can't reset it to return to the beginning.
# Because of this, we store a small cache of stale
# data, and iterate on it after the main generator
# stops returning new batches.
try:
batch_data = next(self._dataset_iter)
self._dataset_cache.append(batch_data)
if len(self._dataset_cache) > self._config.num_examples:
self._dataset_cache.pop(0)
except StopIteration:
self._dataset_iter = cycle(self._dataset_cache)
batch_data = next(self._dataset_iter)
vis_outputs = []
# Type ignore for issue with passing union to function taking generic
# https://github.com/python/mypy/issues/1533
for (
inputs,
additional_forward_args,
label,
) in _batched_generator( # type: ignore
inputs=batch_data.inputs,
additional_forward_args=batch_data.additional_args,
target_ind=batch_data.labels,
internal_batch_size=1, # should be 1 until we have batch label support
):
output = self._calculate_vis_output(inputs, additional_forward_args, label)
if output is not None:
cache = SampleCache(inputs, additional_forward_args, label)
vis_outputs.append((output, cache))
return vis_outputs
@log_usage()
def visualize(self):
self._outputs = []
while len(self._outputs) < self._config.num_examples:
self._outputs.extend(self._get_outputs())
return [o[0] for o in self._outputs]
def get_insights_config(self):
return {
"classes": self.classes,
"methods": list(ATTRIBUTION_NAMES_TO_METHODS.keys()),
"method_arguments": namedtuple_to_dict(
{k: v.params for (k, v) in ATTRIBUTION_METHOD_CONFIG.items()}
),
"selected_method": self._config.attribution_method,
}
|
#!/usr/bin/env python3
from typing import Callable, List, Optional, Union
def format_transforms(
transforms: Optional[Union[Callable, List[Callable]]]
) -> List[Callable]:
if transforms is None:
return []
if callable(transforms):
return [transforms]
return transforms
|
#!/usr/bin/env python3
import ipywidgets as widgets
from captum.insights import AttributionVisualizer
from captum.insights.attr_vis.server import namedtuple_to_dict
from traitlets import Dict, Instance, List, observe, Unicode
@widgets.register
class CaptumInsights(widgets.DOMWidget):
"""A widget for interacting with Captum Insights."""
_view_name = Unicode("CaptumInsightsView").tag(sync=True)
_model_name = Unicode("CaptumInsightsModel").tag(sync=True)
_view_module = Unicode("jupyter-captum-insights").tag(sync=True)
_model_module = Unicode("jupyter-captum-insights").tag(sync=True)
_view_module_version = Unicode("^0.1.0").tag(sync=True)
_model_module_version = Unicode("^0.1.0").tag(sync=True)
visualizer = Instance(klass=AttributionVisualizer)
insights_config = Dict().tag(sync=True)
label_details = Dict().tag(sync=True)
attribution = Dict().tag(sync=True)
config = Dict().tag(sync=True)
output = List().tag(sync=True)
def __init__(self, **kwargs) -> None:
super(CaptumInsights, self).__init__(**kwargs)
self.insights_config = self.visualizer.get_insights_config()
self.out = widgets.Output()
with self.out:
print("Captum Insights widget created.")
@observe("config")
def _fetch_data(self, change):
if not self.config:
return
with self.out:
self.visualizer._update_config(self.config)
self.output = namedtuple_to_dict(self.visualizer.visualize())
self.config = dict()
@observe("label_details")
def _fetch_attribution(self, change):
if not self.label_details:
return
with self.out:
self.attribution = namedtuple_to_dict(
self.visualizer._calculate_attribution_from_cache(
self.label_details["inputIndex"],
self.label_details["modelIndex"],
self.label_details["labelIndex"],
)
)
self.label_details = dict()
|
version_info = (0, 1, 0, "alpha", 0)
_specifier_ = {"alpha": "a", "beta": "b", "candidate": "rc", "final": ""}
__version__ = "%s.%s.%s%s" % (
version_info[0],
version_info[1],
version_info[2],
""
if version_info[3] == "final"
else _specifier_[version_info[3]] + str(version_info[4]),
)
|
from captum.insights.attr_vis.widget._version import __version__, version_info # noqa
from captum.insights.attr_vis.widget.widget import * # noqa
def _jupyter_nbextension_paths():
return [
{
"section": "notebook",
"src": "static",
"dest": "jupyter-captum-insights",
"require": "jupyter-captum-insights/extension",
}
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.