python_code
stringlengths 0
229k
|
---|
#!/usr/bin/env python3
# Welcome to the PyTorch Captum setup.py.
#
# Environment variables for feature toggles:
#
# BUILD_INSIGHTS
# enables Captum Insights build via yarn
#
import os
import re
import subprocess
import sys
from setuptools import find_packages, setup
REQUIRED_MAJOR = 3
REQUIRED_MINOR = 6
# Check for python version
if sys.version_info < (REQUIRED_MAJOR, REQUIRED_MINOR):
error = (
"Your version of python ({major}.{minor}) is too old. You need "
"python >= {required_major}.{required_minor}."
).format(
major=sys.version_info.major,
minor=sys.version_info.minor,
required_minor=REQUIRED_MINOR,
required_major=REQUIRED_MAJOR,
)
sys.exit(error)
# Allow for environment variable checks
def check_env_flag(name, default=""):
return os.getenv(name, default).upper() in ["ON", "1", "YES", "TRUE", "Y"]
BUILD_INSIGHTS = check_env_flag("BUILD_INSIGHTS")
VERBOSE_SCRIPT = True
for arg in sys.argv:
if arg == "-q" or arg == "--quiet":
VERBOSE_SCRIPT = False
def report(*args):
if VERBOSE_SCRIPT:
print(*args)
else:
pass
INSIGHTS_REQUIRES = ["flask", "ipython", "ipywidgets", "jupyter", "flask-compress"]
INSIGHTS_FILE_SUBDIRS = [
"insights/attr_vis/frontend/build",
"insights/attr_vis/models",
"insights/attr_vis/widget/static",
]
TUTORIALS_REQUIRES = INSIGHTS_REQUIRES + ["torchtext", "torchvision"]
TEST_REQUIRES = ["pytest", "pytest-cov", "parameterized"]
DEV_REQUIRES = (
INSIGHTS_REQUIRES
+ TEST_REQUIRES
+ [
"black==22.3.0",
"flake8",
"sphinx",
"sphinx-autodoc-typehints",
"sphinxcontrib-katex",
"mypy>=0.760",
"usort==1.0.2",
"ufmt",
"scikit-learn",
"annoy",
]
)
# get version string from module
with open(os.path.join(os.path.dirname(__file__), "captum/__init__.py"), "r") as f:
version = re.search(r"__version__ = ['\"]([^'\"]*)['\"]", f.read(), re.M).group(1)
report("-- Building version " + version)
# read in README.md as the long description
with open("README.md", "r") as fh:
long_description = fh.read()
# optionally build Captum Insights via yarn
def build_insights():
report("-- Building Captum Insights")
command = "./scripts/build_insights.sh"
report("Running: " + command)
subprocess.check_call(command)
# explore paths under root and subdirs to gather package files
def get_package_files(root, subdirs):
paths = []
for subroot in subdirs:
paths.append(os.path.join(subroot, "*"))
for path, dirs, _ in os.walk(os.path.join(root, subroot)):
for d in dirs:
paths.append(os.path.join(path, d, "*")[len(root) + 1 :])
return paths
if __name__ == "__main__":
if BUILD_INSIGHTS:
build_insights()
package_files = get_package_files("captum", INSIGHTS_FILE_SUBDIRS)
setup(
name="captum",
version=version,
description="Model interpretability for PyTorch",
author="PyTorch Team",
license="BSD-3",
url="https://captum.ai",
project_urls={
"Documentation": "https://captum.ai",
"Source": "https://github.com/pytorch/captum",
"conda": "https://anaconda.org/pytorch/captum",
},
keywords=[
"Model Interpretability",
"Model Understanding",
"Feature Importance",
"Neuron Importance",
"PyTorch",
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Scientific/Engineering",
],
long_description=long_description,
long_description_content_type="text/markdown",
python_requires=">=3.6",
install_requires=["matplotlib", "numpy", "torch>=1.6"],
packages=find_packages(exclude=("tests", "tests.*")),
extras_require={
"dev": DEV_REQUIRES,
"insights": INSIGHTS_REQUIRES,
"test": TEST_REQUIRES,
"tutorials": TUTORIALS_REQUIRES,
},
package_data={"captum": package_files},
data_files=[
(
"share/jupyter/nbextensions/jupyter-captum-insights",
[
"captum/insights/attr_vis/frontend/widget/src/extension.js",
"captum/insights/attr_vis/frontend/widget/src/index.js",
],
),
(
"etc/jupyter/nbconfig/notebook.d",
["captum/insights/attr_vis/widget/jupyter-captum-insights.json"],
),
],
)
|
#!/usr/bin/env python3
import typing
from typing import Any, Callable, cast, List, Tuple, Union
import torch
from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr import (
DeepLift,
GradientShap,
GuidedBackprop,
IntegratedGradients,
Saliency,
)
from captum.metrics import sensitivity_max
from captum.metrics._core.sensitivity import default_perturb_func
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel2,
BasicModel4_MultiArgs,
BasicModel_ConvNet_One_Conv,
BasicModel_MultiLayer,
)
from torch import Tensor
@typing.overload
def _perturb_func(inputs: Tensor) -> Tensor:
...
@typing.overload
def _perturb_func(inputs: Tuple[Tensor, ...]) -> Tuple[Tensor, ...]:
...
def _perturb_func(
inputs: TensorOrTupleOfTensorsGeneric,
) -> Union[Tensor, Tuple[Tensor, ...]]:
def perturb_ratio(input):
return (
torch.arange(-torch.numel(input[0]) // 2, torch.numel(input[0]) // 2)
.view(input[0].shape)
.float()
/ 100
)
input2 = None
if isinstance(inputs, tuple):
input1 = inputs[0]
input2 = inputs[1]
else:
input1 = cast(Tensor, inputs)
perturbed_input1 = input1 + perturb_ratio(input1)
if input2 is None:
return perturbed_input1
return perturbed_input1, input2 + perturb_ratio(input2)
class Test(BaseTest):
def test_basic_sensitivity_max_single(self) -> None:
model = BasicModel2()
sa = Saliency(model)
input1 = torch.tensor([3.0])
input2 = torch.tensor([1.0])
self.sensitivity_max_assert(
sa.attribute,
(input1, input2),
torch.zeros(1),
perturb_func=default_perturb_func,
)
def test_basic_sensitivity_max_multiple(self) -> None:
model = BasicModel2()
sa = Saliency(model)
input1 = torch.tensor([3.0] * 20)
input2 = torch.tensor([1.0] * 20)
self.sensitivity_max_assert(
sa.attribute, (input1, input2), torch.zeros(20), max_examples_per_batch=21
)
self.sensitivity_max_assert(
sa.attribute, (input1, input2), torch.zeros(20), max_examples_per_batch=60
)
def test_basic_sensitivity_max_multiple_gradshap(self) -> None:
model = BasicModel2()
gs = GradientShap(model)
input1 = torch.tensor([0.0] * 5)
input2 = torch.tensor([0.0] * 5)
baseline1 = torch.arange(0, 2).float() / 1000
baseline2 = torch.arange(0, 2).float() / 1000
self.sensitivity_max_assert(
gs.attribute,
(input1, input2),
torch.zeros(5),
baselines=(baseline1, baseline2),
max_examples_per_batch=2,
)
self.sensitivity_max_assert(
gs.attribute,
(input1, input2),
torch.zeros(5),
baselines=(baseline1, baseline2),
max_examples_per_batch=20,
)
def test_convnet_multi_target(self) -> None:
r"""
Another test with Saliency, local sensitivity and more
complex model with higher dimensional input.
"""
model = BasicModel_ConvNet_One_Conv()
sa = Saliency(model)
input = torch.stack([torch.arange(1, 17).float()] * 20, dim=0).view(20, 1, 4, 4)
self.sensitivity_max_assert(
sa.attribute,
input,
torch.zeros(20),
target=torch.tensor([1] * 20),
n_perturb_samples=10,
max_examples_per_batch=40,
)
def test_convnet_multi_target_and_default_pert_func(self) -> None:
r"""
Similar to previous example but here we also test default
perturbation function.
"""
model = BasicModel_ConvNet_One_Conv()
gbp = GuidedBackprop(model)
input = torch.stack([torch.arange(1, 17).float()] * 20, dim=0).view(20, 1, 4, 4)
sens1 = self.sensitivity_max_assert(
gbp.attribute,
input,
torch.zeros(20),
perturb_func=default_perturb_func,
target=torch.tensor([1] * 20),
n_perturb_samples=10,
max_examples_per_batch=40,
)
sens2 = self.sensitivity_max_assert(
gbp.attribute,
input,
torch.zeros(20),
perturb_func=default_perturb_func,
target=torch.tensor([1] * 20),
n_perturb_samples=10,
max_examples_per_batch=5,
)
assertTensorAlmostEqual(self, sens1, sens2)
def test_sensitivity_max_multi_dim(self) -> None:
model = BasicModel_MultiLayer()
input = torch.arange(1.0, 13.0).view(4, 3)
additional_forward_args = (None, True)
targets: List = [(0, 1, 1), (0, 1, 1), (1, 1, 1), (0, 1, 1)]
ig = IntegratedGradients(model)
self.sensitivity_max_assert(
ig.attribute,
input,
torch.tensor([0.006, 0.01, 0.001, 0.008]),
n_perturb_samples=1,
max_examples_per_batch=4,
perturb_func=_perturb_func,
target=targets,
additional_forward_args=additional_forward_args,
)
def test_sensitivity_max_multi_dim_batching(self) -> None:
model = BasicModel_MultiLayer()
input = torch.arange(1.0, 16.0).view(5, 3)
additional_forward_args = (torch.ones(5, 3).float(), False)
targets: List = [0, 0, 0, 0, 0]
sa = Saliency(model)
sensitivity1 = self.sensitivity_max_assert(
sa.attribute,
input,
torch.zeros(5),
n_perturb_samples=1,
max_examples_per_batch=None,
perturb_func=_perturb_func,
target=targets,
additional_forward_args=additional_forward_args,
)
sensitivity2 = self.sensitivity_max_assert(
sa.attribute,
input,
torch.zeros(5),
n_perturb_samples=10,
max_examples_per_batch=10,
perturb_func=_perturb_func,
target=targets,
additional_forward_args=additional_forward_args,
)
assertTensorAlmostEqual(self, sensitivity1, sensitivity2, 0.0)
def test_sensitivity_additional_forward_args_multi_args(self) -> None:
model = BasicModel4_MultiArgs()
input1 = torch.tensor([[1.5, 2.0, 3.3]])
input2 = torch.tensor([[3.0, 3.5, 2.2]])
args = torch.tensor([[1.0, 3.0, 4.0]])
ig = DeepLift(model)
sensitivity1 = self.sensitivity_max_assert(
ig.attribute,
(input1, input2),
torch.zeros(1),
additional_forward_args=args,
n_perturb_samples=1,
max_examples_per_batch=1,
perturb_func=_perturb_func,
)
sensitivity2 = self.sensitivity_max_assert(
ig.attribute,
(input1, input2),
torch.zeros(1),
additional_forward_args=args,
n_perturb_samples=4,
max_examples_per_batch=2,
perturb_func=_perturb_func,
)
assertTensorAlmostEqual(self, sensitivity1, sensitivity2, 0.0)
def test_classification_sensitivity_tpl_target_w_baseline(self) -> None:
model = BasicModel_MultiLayer()
input = torch.arange(1.0, 13.0).view(4, 3)
baseline = torch.ones(4, 3)
additional_forward_args = (torch.arange(1, 13).view(4, 3).float(), True)
targets: List = [(0, 1, 1), (0, 1, 1), (1, 1, 1), (0, 1, 1)]
dl = DeepLift(model)
sens1 = self.sensitivity_max_assert(
dl.attribute,
input,
torch.tensor([0.01, 0.003, 0.001, 0.001]),
additional_forward_args=additional_forward_args,
baselines=baseline,
target=targets,
n_perturb_samples=10,
perturb_func=_perturb_func,
)
sens2 = self.sensitivity_max_assert(
dl.attribute,
input,
torch.zeros(4),
additional_forward_args=additional_forward_args,
baselines=baseline,
target=targets,
n_perturb_samples=10,
perturb_func=_perturb_func,
max_examples_per_batch=30,
)
assertTensorAlmostEqual(self, sens1, sens2)
def sensitivity_max_assert(
self,
expl_func: Callable,
inputs: TensorOrTupleOfTensorsGeneric,
expected_sensitivity: Tensor,
perturb_func: Callable = _perturb_func,
n_perturb_samples: int = 5,
max_examples_per_batch: int = None,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
) -> Tensor:
if baselines is None:
sens = sensitivity_max(
expl_func,
inputs,
perturb_func=perturb_func,
target=target,
additional_forward_args=additional_forward_args,
n_perturb_samples=n_perturb_samples,
max_examples_per_batch=max_examples_per_batch,
)
else:
sens = sensitivity_max(
expl_func,
inputs,
perturb_func=perturb_func,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
n_perturb_samples=n_perturb_samples,
max_examples_per_batch=max_examples_per_batch,
)
assertTensorAlmostEqual(self, sens, expected_sensitivity, 0.05)
return sens
|
#!/usr/bin/env python3
import typing
from typing import Any, Callable, cast, List, Tuple, Union
import torch
from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr import (
Attribution,
DeepLift,
FeatureAblation,
IntegratedGradients,
Saliency,
)
from captum.metrics import infidelity, infidelity_perturb_func_decorator
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel2,
BasicModel4_MultiArgs,
BasicModel_ConvNet_One_Conv,
BasicModel_MultiLayer,
)
from torch import Tensor
from torch.nn import Module
@infidelity_perturb_func_decorator(False)
def _local_perturb_func_default(
inputs: TensorOrTupleOfTensorsGeneric,
) -> TensorOrTupleOfTensorsGeneric:
return _local_perturb_func(inputs)[1]
@typing.overload
def _local_perturb_func(inputs: Tensor) -> Tuple[Tensor, Tensor]:
...
@typing.overload
def _local_perturb_func(
inputs: Tuple[Tensor, ...]
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
def _local_perturb_func(
inputs: TensorOrTupleOfTensorsGeneric,
) -> Tuple[Union[Tensor, Tuple[Tensor, ...]], Union[Tensor, Tuple[Tensor, ...]]]:
input2 = None
if isinstance(inputs, tuple):
input1 = inputs[0]
input2 = inputs[1]
else:
input1 = cast(Tensor, inputs)
perturb1 = 0.0009 * torch.ones_like(input1)
if input2 is None:
return perturb1, input1 - perturb1
perturb2 = 0.0121 * torch.ones_like(input2)
return (perturb1, perturb2), (input1 - perturb1, input2 - perturb2)
@infidelity_perturb_func_decorator(True)
def _global_perturb_func1_default(
inputs: TensorOrTupleOfTensorsGeneric,
) -> TensorOrTupleOfTensorsGeneric:
return _global_perturb_func1(inputs)[1]
@typing.overload
def _global_perturb_func1(inputs: Tensor) -> Tuple[Tensor, Tensor]:
...
@typing.overload
def _global_perturb_func1(
inputs: Tuple[Tensor, ...]
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
# sensitivity-N, N = #input features
def _global_perturb_func1(
inputs: TensorOrTupleOfTensorsGeneric,
) -> Tuple[Union[Tensor, Tuple[Tensor, ...]], Union[Tensor, Tuple[Tensor, ...]]]:
input2 = None
if isinstance(inputs, tuple):
input1 = inputs[0]
input2 = inputs[1]
else:
input1 = cast(Tensor, inputs)
pert1 = torch.ones(input1.shape)
if input2 is None:
return pert1, torch.zeros(input1.shape)
pert2 = torch.ones(input2.shape)
return (pert1, pert2), (torch.zeros(input1.shape), torch.zeros(input2.shape))
class Test(BaseTest):
def test_basic_infidelity_single(self) -> None:
input1 = torch.tensor([3.0])
input2 = torch.tensor([1.0])
inputs = (input1, input2)
expected = torch.zeros(1)
self.basic_model_assert(BasicModel2(), inputs, expected)
def test_basic_infidelity_multiple(self) -> None:
input1 = torch.tensor([3.0] * 3)
input2 = torch.tensor([1.0] * 3)
inputs = (input1, input2)
expected = torch.zeros(3)
infid = self.basic_model_assert(BasicModel2(), inputs, expected)
infid_w_common_func = self.basic_model_assert(
BasicModel2(),
inputs,
expected,
perturb_func=_local_perturb_func_default,
multiply_by_inputs=False,
)
assertTensorAlmostEqual(self, infid, infid_w_common_func)
def test_basic_infidelity_multiple_with_batching(self) -> None:
input1 = torch.tensor([3.0] * 20)
input2 = torch.tensor([1.0] * 20)
expected = torch.zeros(20)
infid1 = self.basic_model_assert(
BasicModel2(),
(input1, input2),
expected,
n_perturb_samples=5,
max_batch_size=21,
)
infid2 = self.basic_model_assert(
BasicModel2(),
(input1, input2),
expected,
n_perturb_samples=5,
max_batch_size=60,
)
assertTensorAlmostEqual(self, infid1, infid2, delta=0.01, mode="max")
def test_basic_infidelity_additional_forward_args1(self) -> None:
model = BasicModel4_MultiArgs()
input1 = torch.tensor([[1.5, 2.0, 3.3]])
input2 = torch.tensor([[3.0, 3.5, 2.2]])
args = torch.tensor([[1.0, 3.0, 4.0]])
ig = IntegratedGradients(model)
infidelity1 = self.basic_model_global_assert(
ig,
model,
(input1, input2),
torch.zeros(1),
additional_args=args,
n_perturb_samples=1,
max_batch_size=1,
perturb_func=_global_perturb_func1,
)
infidelity2 = self.basic_model_global_assert(
ig,
model,
(input1, input2),
torch.zeros(1),
additional_args=args,
n_perturb_samples=5,
max_batch_size=2,
perturb_func=_global_perturb_func1,
)
infidelity2_w_custom_pert_func = self.basic_model_global_assert(
ig,
model,
(input1, input2),
torch.zeros(1),
additional_args=args,
n_perturb_samples=5,
max_batch_size=2,
perturb_func=_global_perturb_func1_default,
)
assertTensorAlmostEqual(self, infidelity1, infidelity2, 0.0)
assertTensorAlmostEqual(self, infidelity2_w_custom_pert_func, infidelity2, 0.0)
def test_classification_infidelity_convnet_multi_targets(self) -> None:
model = BasicModel_ConvNet_One_Conv()
dl = DeepLift(model)
input = torch.stack([torch.arange(1, 17).float()] * 20, dim=0).view(20, 1, 4, 4)
self.infidelity_assert(
model,
dl.attribute(input, target=torch.tensor([1] * 20)) / input,
input,
torch.zeros(20),
target=torch.tensor([1] * 20),
multi_input=False,
n_perturb_samples=500,
max_batch_size=120,
)
def test_classification_infidelity_tpl_target(self) -> None:
model = BasicModel_MultiLayer()
input = torch.arange(1.0, 13.0).view(4, 3)
additional_forward_args = (torch.arange(1, 13).view(4, 3).float(), True)
targets: List = [(0, 1, 1), (0, 1, 1), (1, 1, 1), (0, 1, 1)]
sa = Saliency(model)
infid1 = self.infidelity_assert(
model,
sa.attribute(
input, target=targets, additional_forward_args=additional_forward_args
),
input,
torch.zeros(4),
additional_args=additional_forward_args,
target=targets,
multi_input=False,
)
infid2 = self.infidelity_assert(
model,
sa.attribute(
input, target=targets, additional_forward_args=additional_forward_args
),
input,
torch.zeros(4),
additional_args=additional_forward_args,
target=targets,
max_batch_size=2,
multi_input=False,
)
assertTensorAlmostEqual(self, infid1, infid2, delta=1e-05, mode="max")
def test_classification_infidelity_tpl_target_w_baseline(self) -> None:
model = BasicModel_MultiLayer()
input = torch.arange(1.0, 13.0).view(4, 3)
baseline = torch.ones(4, 3)
additional_forward_args = (torch.arange(1, 13).view(4, 3).float(), True)
targets: List = [(0, 1, 1), (0, 1, 1), (1, 1, 1), (0, 1, 1)]
ig = IntegratedGradients(model)
def perturbed_func2(inputs, baselines):
return torch.ones(baselines.shape), baselines
@infidelity_perturb_func_decorator(True)
def perturbed_func3(inputs, baselines):
return baselines
attr, delta = ig.attribute(
input,
target=targets,
additional_forward_args=additional_forward_args,
baselines=baseline,
return_convergence_delta=True,
)
infid = self.infidelity_assert(
model,
attr,
input,
torch.tensor([0.10686, 0.0, 0.0, 0.0]),
additional_args=additional_forward_args,
baselines=baseline,
target=targets,
multi_input=False,
n_perturb_samples=3,
perturb_func=perturbed_func3,
)
infid2 = self.infidelity_assert(
model,
attr,
input,
torch.tensor([0.10686, 0.0, 0.0, 0.0]),
additional_args=additional_forward_args,
baselines=baseline,
target=targets,
multi_input=False,
n_perturb_samples=3,
perturb_func=perturbed_func2,
)
assertTensorAlmostEqual(self, infid, delta * delta)
assertTensorAlmostEqual(self, infid, infid2)
def test_basic_infidelity_multiple_with_normalize(self) -> None:
input1 = torch.tensor([3.0] * 3)
input2 = torch.tensor([1.0] * 3)
inputs = (input1, input2)
expected = torch.zeros(3)
model = BasicModel2()
ig = IntegratedGradients(model)
attrs = ig.attribute(inputs)
scaled_attrs = tuple(attr * 100 for attr in attrs)
infid = self.infidelity_assert(model, attrs, inputs, expected, normalize=True)
scaled_infid = self.infidelity_assert(
model,
scaled_attrs,
inputs,
expected,
normalize=True,
)
# scaling attr should not change normalized infidelity
assertTensorAlmostEqual(self, infid, scaled_infid)
def test_sensitivity_n_ig(self) -> None:
model = BasicModel_MultiLayer()
ig = IntegratedGradients(model)
self.basic_multilayer_sensitivity_n(ig, model)
def test_sensitivity_n_fa(self) -> None:
model = BasicModel_MultiLayer()
fa = FeatureAblation(model)
self.basic_multilayer_sensitivity_n(fa, model)
def basic_multilayer_sensitivity_n(
self, attr_algo: Attribution, model: Module
) -> None:
# sensitivity-2
def _global_perturb_func2(input):
pert = torch.tensor([[0, 1, 1], [1, 1, 0], [1, 0, 1]]).float()
return pert, (1 - pert) * input
# sensitivity-1
def _global_perturb_func3(input):
pert = torch.tensor([[0, 0, 1], [1, 0, 0], [0, 1, 0]]).float()
return pert, (1 - pert) * input
@infidelity_perturb_func_decorator(True)
def _global_perturb_func3_custom(input):
return _global_perturb_func3(input)[1]
input = torch.tensor([[1.0, 2.5, 3.3]])
# infidelity for sensitivity-1
infid = self.basic_model_global_assert(
attr_algo,
model,
input,
torch.zeros(1),
additional_args=None,
target=0,
n_perturb_samples=3,
max_batch_size=None,
perturb_func=_global_perturb_func3,
)
infid_w_default = self.basic_model_global_assert(
attr_algo,
model,
input,
torch.zeros(1),
additional_args=None,
target=0,
n_perturb_samples=3,
max_batch_size=None,
perturb_func=_global_perturb_func3_custom,
)
assertTensorAlmostEqual(self, infid, infid_w_default)
# infidelity for sensitivity-2
self.basic_model_global_assert(
attr_algo,
model,
input,
torch.zeros(1),
additional_args=None,
target=0,
n_perturb_samples=3,
max_batch_size=None,
perturb_func=_global_perturb_func2,
)
# infidelity for sensitivity-3
self.basic_model_global_assert(
attr_algo,
model,
input,
torch.zeros(1),
additional_args=None,
target=0,
n_perturb_samples=3,
max_batch_size=None,
perturb_func=_global_perturb_func1,
)
def basic_model_assert(
self,
model: Module,
inputs: TensorOrTupleOfTensorsGeneric,
expected: Tensor,
n_perturb_samples: int = 10,
max_batch_size: int = None,
perturb_func: Callable = _local_perturb_func,
multiply_by_inputs: bool = False,
normalize: bool = False,
) -> Tensor:
ig = IntegratedGradients(model)
if multiply_by_inputs:
attrs = cast(
TensorOrTupleOfTensorsGeneric,
tuple(
attr / input for input, attr in zip(inputs, ig.attribute(inputs))
),
)
else:
attrs = ig.attribute(inputs)
return self.infidelity_assert(
model,
attrs,
inputs,
expected,
n_perturb_samples=n_perturb_samples,
max_batch_size=max_batch_size,
perturb_func=perturb_func,
normalize=normalize,
)
def basic_model_global_assert(
self,
attr_algo: Attribution,
model: Module,
inputs: TensorOrTupleOfTensorsGeneric,
expected: Tensor,
additional_args: Any = None,
target: TargetType = None,
n_perturb_samples: int = 10,
max_batch_size: int = None,
perturb_func: Callable = _global_perturb_func1,
normalize: bool = False,
) -> Tensor:
attrs = attr_algo.attribute(
inputs, additional_forward_args=additional_args, target=target
)
infid = self.infidelity_assert(
model,
attrs,
inputs,
expected,
additional_args=additional_args,
perturb_func=perturb_func,
target=target,
n_perturb_samples=n_perturb_samples,
max_batch_size=max_batch_size,
normalize=normalize,
)
return infid
def infidelity_assert(
self,
model: Module,
attributions: TensorOrTupleOfTensorsGeneric,
inputs: TensorOrTupleOfTensorsGeneric,
expected: Tensor,
additional_args: Any = None,
baselines: BaselineType = None,
n_perturb_samples: int = 10,
target: TargetType = None,
max_batch_size: int = None,
multi_input: bool = True,
perturb_func: Callable = _local_perturb_func,
normalize: bool = False,
**kwargs: Any,
) -> Tensor:
infid = infidelity(
model,
perturb_func,
inputs,
attributions,
additional_forward_args=additional_args,
target=target,
baselines=baselines,
n_perturb_samples=n_perturb_samples,
max_examples_per_batch=max_batch_size,
normalize=normalize,
)
assertTensorAlmostEqual(self, infid, expected, 0.05)
return infid
|
from unittest.mock import patch
import torch
from captum.insights.attr_vis.features import (
_convert_figure_base64,
EmptyFeature,
FeatureOutput,
GeneralFeature,
ImageFeature,
TextFeature,
)
from matplotlib.figure import Figure
from tests.helpers.basic import BaseTest
class TestTextFeature(BaseTest):
FEATURE_NAME = "question"
def test_text_feature_returns_text_as_visualization_type(self) -> None:
feature = TextFeature(self.FEATURE_NAME, None, None, None)
self.assertEqual(feature.visualization_type(), "text")
def test_text_feature_uses_visualization_transform_if_provided(self) -> None:
input_data = torch.rand(2, 2)
transformed_data = torch.rand(1, 1)
def mock_transform(data):
return transformed_data
feature = TextFeature(
name=self.FEATURE_NAME,
baseline_transforms=None,
input_transforms=None,
visualization_transform=mock_transform,
)
feature_output = feature.visualize(
attribution=torch.rand(1, 1), data=input_data, contribution_frac=1.0
)
# has transformed data
self.assertEqual(feature_output.base, transformed_data)
feature = TextFeature(
name=self.FEATURE_NAME,
baseline_transforms=None,
input_transforms=None,
visualization_transform=None,
)
feature_output = feature.visualize(
attribution=torch.rand(1, 1), data=input_data, contribution_frac=1.0
)
# has original data
self.assertIs(feature_output.base, input_data)
def test_text_feature_generates_correct_visualization_output(self) -> None:
attribution = torch.tensor([0.1, 0.2, 0.3, 0.4])
input_data = torch.rand(1, 2)
expected_modified = [100 * x for x in (attribution / attribution.max())]
contribution_frac = torch.rand(1).item()
feature = TextFeature(
name=self.FEATURE_NAME,
baseline_transforms=None,
input_transforms=None,
visualization_transform=None,
)
feature_output = feature.visualize(attribution, input_data, contribution_frac)
expected_feature_output = FeatureOutput(
name=self.FEATURE_NAME,
base=input_data,
modified=expected_modified,
type="text",
contribution=contribution_frac,
)
self.assertEqual(expected_feature_output, feature_output)
class TestEmptyFeature(BaseTest):
def test_empty_feature_should_generate_fixed_output(self) -> None:
feature = EmptyFeature()
contribution = torch.rand(1).item()
expected_output = FeatureOutput(
name="empty",
base=None,
modified=None,
type="empty",
contribution=contribution,
)
self.assertEqual(expected_output, feature.visualize(None, None, contribution))
class TestImageFeature(BaseTest):
def test_image_feature_generates_correct_ouput(self) -> None:
attribution = torch.zeros(1, 3, 4, 4)
data = torch.ones(1, 3, 4, 4)
contribution = 1.0
name = "photo"
orig_fig = Figure(figsize=(4, 4))
attr_fig = Figure(figsize=(4, 4))
def mock_viz_attr(*args, **kwargs):
if kwargs["method"] == "original_image":
return orig_fig, None
else:
return attr_fig, None
feature = ImageFeature(
name=name,
baseline_transforms=None,
input_transforms=None,
visualization_transform=None,
)
with patch(
"captum.attr._utils.visualization.visualize_image_attr", mock_viz_attr
):
feature_output = feature.visualize(attribution, data, contribution)
expected_feature_output = FeatureOutput(
name=name,
base=_convert_figure_base64(orig_fig),
modified=_convert_figure_base64(attr_fig),
type="image",
contribution=contribution,
)
self.assertEqual(expected_feature_output, feature_output)
class TestGeneralFeature(BaseTest):
def test_general_feature_generates_correct_output(self) -> None:
name = "general_feature"
categories = ["cat1", "cat2", "cat3", "cat4"]
attribution = torch.Tensor(1, 4)
attribution.fill_(0.5)
data = torch.rand(1, 4)
contribution = torch.rand(1).item()
attr_squeezed = attribution.squeeze(0)
expected_modified = [
x * 100 for x in (attr_squeezed / attr_squeezed.norm()).tolist()
]
expected_base = [
f"{c}: {d:.2f}" for c, d in zip(categories, data.squeeze().tolist())
]
feature = GeneralFeature(name, categories)
feature_output = feature.visualize(
attribution=attribution, data=data, contribution_frac=contribution
)
expected_feature_output = FeatureOutput(
name=name,
base=expected_base,
modified=expected_modified,
type="general",
contribution=contribution,
)
self.assertEqual(expected_feature_output, feature_output)
|
#!/usr/bin/env python3
import unittest
from typing import Callable, List, Union
import torch
import torch.nn as nn
from captum.insights import AttributionVisualizer, Batch
from captum.insights.attr_vis.app import FilterConfig
from captum.insights.attr_vis.features import BaseFeature, FeatureOutput, ImageFeature
from tests.helpers.basic import BaseTest
class RealFeature(BaseFeature):
def __init__(
self,
name: str,
baseline_transforms: Union[Callable, List[Callable]],
input_transforms: Union[Callable, List[Callable]],
visualization_transforms: Union[None, Callable, List[Callable]] = None,
) -> None:
super().__init__(
name,
baseline_transforms=baseline_transforms,
input_transforms=input_transforms,
visualization_transform=None,
)
def visualization_type(self) -> str:
return "real"
def visualize(self, attribution, data, contribution_frac) -> FeatureOutput:
return FeatureOutput(
name=self.name,
base=data,
modified=data,
type=self.visualization_type(),
contribution=contribution_frac,
)
def _get_classes():
classes = [
"Plane",
"Car",
"Bird",
"Cat",
"Deer",
"Dog",
"Frog",
"Horse",
"Ship",
"Truck",
]
return classes
class TinyCnn(nn.Module):
def __init__(self, feature_extraction=False) -> None:
super().__init__()
self.feature_extraction = feature_extraction
self.conv1 = nn.Conv2d(3, 3, 5)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2, 2)
if not self.feature_extraction:
self.conv2 = nn.Conv2d(3, 10, 2)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
if not self.feature_extraction:
x = self.conv2(x)
x = x.view(-1, 10)
else:
x = x.view(-1, 12)
return x
class TinyMultiModal(nn.Module):
def __init__(self, input_size=256, pretrained=False) -> None:
super().__init__()
if pretrained:
self.img_model = _get_cnn(feature_extraction=True)
else:
self.img_model = TinyCnn(feature_extraction=True)
self.misc_model = nn.Sequential(nn.Linear(input_size, 12), nn.ReLU())
self.fc = nn.Linear(12 * 2, 10)
def forward(self, img, misc):
img = self.img_model(img)
misc = self.misc_model(misc)
x = torch.cat((img, misc), dim=-1)
return self.fc(x)
def _labelled_img_data(num_samples=10, width=8, height=8, depth=3, num_labels=10):
for _ in range(num_samples):
yield torch.empty(depth, height, width).uniform_(0, 1), torch.randint(
num_labels, (1,)
)
def _multi_modal_data(img_dataset, feature_size=256):
def misc_data(length, feature_size=None):
for _ in range(length):
yield torch.randn(feature_size)
misc_dataset = misc_data(length=len(img_dataset), feature_size=feature_size)
# re-arrange dataset
for (img, label), misc in zip(img_dataset, misc_dataset):
yield ((img, misc), label)
def _get_cnn(feature_extraction=False):
return TinyCnn(feature_extraction=feature_extraction)
def _get_multimodal(input_size=256):
return TinyMultiModal(input_size=input_size, pretrained=True)
def to_iter(data_loader):
# TODO: not sure how to make this cleaner
for x, y in data_loader:
# if it's not multi input
# NOTE: torch.utils.data.DataLoader returns a list in this case
if not isinstance(x, list):
x = (x,)
yield Batch(inputs=tuple(x), labels=y)
class Test(BaseTest):
def test_one_feature(self) -> None:
batch_size = 2
classes = _get_classes()
dataset = list(
_labelled_img_data(num_labels=len(classes), num_samples=batch_size)
)
# NOTE: using DataLoader to batch the inputs
# since AttributionVisualizer requires the input to be of size `B x ...`
data_loader = torch.utils.data.DataLoader(
list(dataset), batch_size=batch_size, shuffle=False, num_workers=0
)
visualizer = AttributionVisualizer(
models=[_get_cnn()],
classes=classes,
features=[
ImageFeature(
"Photo",
input_transforms=[lambda x: x],
baseline_transforms=[lambda x: x * 0],
)
],
dataset=to_iter(data_loader),
score_func=None,
)
visualizer._config = FilterConfig(attribution_arguments={"n_steps": 2})
outputs = visualizer.visualize()
for output in outputs:
total_contrib = sum(abs(f.contribution) for f in output[0].feature_outputs)
self.assertAlmostEqual(total_contrib, 1.0, places=6)
def test_multi_features(self) -> None:
batch_size = 2
classes = _get_classes()
img_dataset = list(
_labelled_img_data(num_labels=len(classes), num_samples=batch_size)
)
misc_feature_size = 2
dataset = _multi_modal_data(
img_dataset=img_dataset, feature_size=misc_feature_size
)
# NOTE: using DataLoader to batch the inputs since
# AttributionVisualizer requires the input to be of size `N x ...`
data_loader = torch.utils.data.DataLoader(
list(dataset), batch_size=batch_size, shuffle=False, num_workers=0
)
visualizer = AttributionVisualizer(
models=[_get_multimodal(input_size=misc_feature_size)],
classes=classes,
features=[
ImageFeature(
"Photo",
input_transforms=[lambda x: x],
baseline_transforms=[lambda x: x * 0],
),
RealFeature(
"Random",
input_transforms=[lambda x: x],
baseline_transforms=[lambda x: x * 0],
),
],
dataset=to_iter(data_loader),
score_func=None,
)
visualizer._config = FilterConfig(attribution_arguments={"n_steps": 2})
outputs = visualizer.visualize()
for output in outputs:
total_contrib = sum(abs(f.contribution) for f in output[0].feature_outputs)
self.assertAlmostEqual(total_contrib, 1.0, places=6)
# TODO: add test for multiple models (related to TODO in captum/insights/api.py)
#
# TODO: add test to make the attribs == 0 -- error occurs
# I know (through manual testing) that this breaks some existing code
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import collections
from typing import List
import torch
from captum.robust import AttackComparator, FGSM
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModel, BasicModel_MultiLayer
from torch import Tensor
def float_metric(model_out: Tensor, target: int):
return model_out[:, target]
ModelResult = collections.namedtuple("ModelResult", "accuracy output")
def tuple_metric(model_out: Tensor, target: int, named_tuple=False):
_, pred = torch.max(model_out, dim=1)
acc = (pred == target).float()
output = model_out[:, target]
if named_tuple:
return ModelResult(
accuracy=acc.item() if acc.numel() == 1 else acc,
output=output.item() if output.numel() == 1 else output,
)
return (acc, output)
def drop_column_perturb(inp: Tensor, column: int) -> Tensor:
mask = torch.ones_like(inp)
mask[:, column] = 0.0
return mask * inp
def text_preproc_fn(inp: List[str]) -> Tensor:
return torch.tensor([float(ord(elem[0])) for elem in inp]).unsqueeze(0)
def batch_text_preproc_fn(inp: List[List[str]]) -> Tensor:
return torch.cat([text_preproc_fn(elem) for elem in inp])
def string_perturb(inp: List[str]) -> List[str]:
return ["a" + elem for elem in inp]
def string_batch_perturb(inp: List[List[str]]) -> List[List[str]]:
return [string_perturb(elem) for elem in inp]
class SamplePerturb:
def __init__(self) -> None:
self.count = 0
def perturb(self, inp: Tensor) -> Tensor:
mask = torch.ones_like(inp)
mask[:, self.count % mask.shape[1]] = 0.0
self.count += 1
return mask * inp
class Test(BaseTest):
def test_attack_comparator_basic(self) -> None:
model = BasicModel()
inp = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]])
attack_comp = AttackComparator(
forward_func=lambda x: model(x)
+ torch.tensor([[0.000001, 0.0, 0.0, 0.0, 0.0]]),
metric=tuple_metric,
)
attack_comp.add_attack(
drop_column_perturb,
name="first_column_perturb",
attack_kwargs={"column": 0},
)
attack_comp.add_attack(
drop_column_perturb,
name="last_column_perturb",
attack_kwargs={"column": -1},
)
attack_comp.add_attack(
FGSM(model),
attack_kwargs={"epsilon": 0.5},
additional_attack_arg_names=["target"],
)
batch_results = attack_comp.evaluate(inp, target=0, named_tuple=True)
expected_first_results = {
"Original": (1.0, 1.0),
"first_column_perturb": {"mean": (0.0, 0.0)},
"last_column_perturb": {"mean": (1.0, 1.0)},
"FGSM": {"mean": (1.0, 1.0)},
}
self._compare_results(batch_results, expected_first_results)
alt_inp = torch.tensor([[1.0, 2.0, -3.0, 4.0, -5.0]])
second_batch_results = attack_comp.evaluate(alt_inp, target=4, named_tuple=True)
expected_second_results = {
"Original": (0.0, -5.0),
"first_column_perturb": {"mean": (0.0, -5.0)},
"last_column_perturb": {"mean": (0.0, 0.0)},
"FGSM": {"mean": (0.0, -4.5)},
}
self._compare_results(second_batch_results, expected_second_results)
expected_summary_results = {
"Original": {"mean": (0.5, -2.0)},
"first_column_perturb": {"mean": (0.0, -2.5)},
"last_column_perturb": {"mean": (0.5, 0.5)},
"FGSM": {"mean": (0.5, -1.75)},
}
self._compare_results(attack_comp.summary(), expected_summary_results)
def test_attack_comparator_with_preproc(self) -> None:
model = BasicModel_MultiLayer()
text_inp = ["abc", "zyd", "ghi"]
attack_comp = AttackComparator(
forward_func=model, metric=tuple_metric, preproc_fn=text_preproc_fn
)
attack_comp.add_attack(
SamplePerturb().perturb,
name="Sequence Column Perturb",
num_attempts=5,
apply_before_preproc=False,
)
attack_comp.add_attack(
string_perturb,
name="StringPerturb",
apply_before_preproc=True,
)
batch_results = attack_comp.evaluate(
text_inp, target=0, named_tuple=True, perturbations_per_eval=3
)
expected_first_results = {
"Original": (0.0, 1280.0),
"Sequence Column Perturb": {
"mean": (0.0, 847.2),
"max": (0.0, 892.0),
"min": (0.0, 792.0),
},
"StringPerturb": {"mean": (0.0, 1156.0)},
}
self._compare_results(batch_results, expected_first_results)
expected_summary_results = {
"Original": {"mean": (0.0, 1280.0)},
"Sequence Column Perturb Mean Attempt": {"mean": (0.0, 847.2)},
"Sequence Column Perturb Min Attempt": {"mean": (0.0, 792.0)},
"Sequence Column Perturb Max Attempt": {"mean": (0.0, 892.0)},
"StringPerturb": {"mean": (0.0, 1156.0)},
}
self._compare_results(attack_comp.summary(), expected_summary_results)
def test_attack_comparator_with_additional_args(self) -> None:
model = BasicModel_MultiLayer()
text_inp = [["abc", "zyd", "ghi"], ["mnop", "qrs", "Tuv"]]
additional_forward_args = torch.ones((2, 3)) * -97
attack_comp = AttackComparator(
forward_func=model, metric=tuple_metric, preproc_fn=batch_text_preproc_fn
)
attack_comp.add_attack(
SamplePerturb().perturb,
name="Sequence Column Perturb",
num_attempts=5,
apply_before_preproc=False,
)
attack_comp.add_attack(
string_batch_perturb,
name="StringPerturb",
apply_before_preproc=True,
)
batch_results = attack_comp.evaluate(
text_inp,
additional_forward_args=additional_forward_args,
target=0,
named_tuple=True,
perturbations_per_eval=2,
)
expected_first_results = {
"Original": ([0.0, 0.0], [116.0, 52.0]),
"Sequence Column Perturb": {
"mean": ([0.0, 0.0], [-1.0, -1.0]),
"max": ([0.0, 0.0], [-1.0, -1.0]),
"min": ([0.0, 0.0], [-1.0, -1.0]),
},
"StringPerturb": {"mean": ([0.0, 0.0], [2.0, 2.0])},
}
self._compare_results(batch_results, expected_first_results)
expected_summary_results = {
"Original": {
"mean": (0.0, 84.0),
},
"Sequence Column Perturb Mean Attempt": {"mean": (0.0, -1.0)},
"Sequence Column Perturb Min Attempt": {"mean": (0.0, -1.0)},
"Sequence Column Perturb Max Attempt": {"mean": (0.0, -1.0)},
"StringPerturb": {"mean": (0.0, 2.0)},
}
self._compare_results(attack_comp.summary(), expected_summary_results)
attack_comp.reset()
self.assertEqual(len(attack_comp.summary()), 0)
def _compare_results(self, obtained, expected) -> None:
if isinstance(expected, dict):
self.assertIsInstance(obtained, dict)
for key in expected:
self._compare_results(obtained[key], expected[key])
elif isinstance(expected, tuple):
self.assertIsInstance(obtained, tuple)
for i in range(len(expected)):
self._compare_results(obtained[i], expected[i])
else:
assertTensorAlmostEqual(self, obtained, expected)
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Optional, Tuple, Union
import torch
from captum._utils.typing import TensorLikeList, TensorOrTupleOfTensorsGeneric
from captum.robust import FGSM
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModel, BasicModel2, BasicModel_MultiLayer
from torch import Tensor
from torch.nn import CrossEntropyLoss
class Test(BaseTest):
def test_attack_nontargeted(self) -> None:
model = BasicModel()
input = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]])
self._FGSM_assert(model, input, 1, 0.1, [[2.0, -8.9, 9.0, 1.0, -3.0]])
def test_attack_targeted(self) -> None:
model = BasicModel()
input = torch.tensor([[9.0, 10.0, -6.0, -1.0]])
self._FGSM_assert(
model, input, 3, 0.2, [[9.0, 10.0, -6.0, -1.2]], targeted=True
)
def test_attack_multiinput(self) -> None:
model = BasicModel2()
input1 = torch.tensor([[4.0, -1.0], [3.0, 10.0]], requires_grad=True)
input2 = torch.tensor([[2.0, -5.0], [-2.0, 1.0]], requires_grad=True)
self._FGSM_assert(
model,
(input1, input2),
0,
0.25,
([[3.75, -1.0], [2.75, 10.0]], [[2.25, -5.0], [-2.0, 1.0]]),
)
def test_attack_label_list(self) -> None:
model = BasicModel2()
input1 = torch.tensor([[4.0, -1.0], [3.0, 10.0]], requires_grad=True)
input2 = torch.tensor([[2.0, -5.0], [-2.0, 1.0]], requires_grad=True)
self._FGSM_assert(
model,
(input1, input2),
[0, 1],
0.1,
([[3.9, -1.0], [3.0, 9.9]], [[2.1, -5.0], [-2.0, 1.1]]),
)
def test_attack_label_tensor(self) -> None:
model = BasicModel2()
input1 = torch.tensor([[4.0, -1.0], [3.0, 10.0]], requires_grad=True)
input2 = torch.tensor([[2.0, -5.0], [-2.0, 1.0]], requires_grad=True)
labels = torch.tensor([0, 1])
self._FGSM_assert(
model,
(input1, input2),
labels,
0.1,
([[4.1, -1.0], [3.0, 10.1]], [[1.9, -5.0], [-2.0, 0.9]]),
targeted=True,
)
def test_attack_label_tuple(self) -> None:
model = BasicModel()
input = torch.tensor(
[[[4.0, 2.0], [-1.0, -2.0]], [[3.0, -4.0], [10.0, 5.0]]], requires_grad=True
)
labels = (0, 1)
self._FGSM_assert(
model,
input,
labels,
0.1,
[[[4.0, 2.0], [-1.0, -2.0]], [[3.0, -3.9], [10.0, 5.0]]],
)
def test_attack_label_listtuple(self) -> None:
model = BasicModel()
input = torch.tensor(
[[[4.0, 2.0], [-1.0, -2.0]], [[3.0, -4.0], [10.0, 5.0]]], requires_grad=True
)
labels: List[Tuple[int, ...]] = [(1, 1), (0, 1)]
self._FGSM_assert(
model,
input,
labels,
0.1,
[[[4.0, 2.0], [-1.0, -1.9]], [[3.0, -3.9], [10.0, 5.0]]],
)
def test_attack_additional_inputs(self) -> None:
model = BasicModel_MultiLayer()
add_input = torch.tensor([[-1.0, 2.0, 2.0]], requires_grad=True)
input = torch.tensor([[1.0, 6.0, -3.0]], requires_grad=True)
self._FGSM_assert(
model, input, 0, 0.2, [[0.8, 5.8, -3.2]], additional_inputs=(add_input,)
)
self._FGSM_assert(
model, input, 0, 0.2, [[0.8, 5.8, -3.2]], additional_inputs=add_input
)
def test_attack_loss_defined(self) -> None:
model = BasicModel_MultiLayer()
add_input = torch.tensor([[-1.0, 2.0, 2.0]])
input = torch.tensor([[1.0, 6.0, -3.0]])
labels = torch.tensor([0])
loss_func = CrossEntropyLoss(reduction="none")
adv = FGSM(model, loss_func)
perturbed_input = adv.perturb(
input, 0.2, labels, additional_forward_args=(add_input,)
)
assertTensorAlmostEqual(
self, perturbed_input, [[1.0, 6.0, -3.0]], delta=0.01, mode="max"
)
def test_attack_bound(self) -> None:
model = BasicModel()
input = torch.tensor([[9.0, 10.0, -6.0, -1.0]])
self._FGSM_assert(
model,
input,
3,
0.2,
[[5.0, 5.0, -5.0, -1.2]],
targeted=True,
lower_bound=-5.0,
upper_bound=5.0,
)
def test_attack_masked_tensor(self) -> None:
model = BasicModel()
input = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]], requires_grad=True)
mask = torch.tensor([[1, 0, 0, 1, 1]])
self._FGSM_assert(
model, input, 1, 0.1, [[2.0, -9.0, 9.0, 1.0, -3.0]], mask=mask
)
def test_attack_masked_multiinput(self) -> None:
model = BasicModel2()
input1 = torch.tensor([[4.0, -1.0], [3.0, 10.0]], requires_grad=True)
input2 = torch.tensor([[2.0, -5.0], [-2.0, 1.0]], requires_grad=True)
mask1 = torch.tensor([[1, 0], [1, 0]])
mask2 = torch.tensor([[0, 0], [0, 0]])
self._FGSM_assert(
model,
(input1, input2),
0,
0.25,
([[3.75, -1.0], [2.75, 10.0]], [[2.0, -5.0], [-2.0, 1.0]]),
mask=(mask1, mask2),
)
def test_attack_masked_loss_defined(self) -> None:
model = BasicModel_MultiLayer()
add_input = torch.tensor([[-1.0, 2.0, 2.0]])
input = torch.tensor([[1.0, 6.0, -3.0]])
labels = torch.tensor([0])
mask = torch.tensor([[0, 0, 1]])
loss_func = CrossEntropyLoss(reduction="none")
adv = FGSM(model, loss_func)
perturbed_input = adv.perturb(
input, 0.2, labels, additional_forward_args=(add_input,), mask=mask
)
assertTensorAlmostEqual(
self, perturbed_input, [[1.0, 6.0, -3.0]], delta=0.01, mode="max"
)
def test_attack_masked_bound(self) -> None:
model = BasicModel()
input = torch.tensor([[9.0, 10.0, -6.0, -1.0]])
mask = torch.tensor([[1, 0, 1, 0]])
self._FGSM_assert(
model,
input,
3,
0.2,
[[5.0, 5.0, -5.0, -1.0]],
targeted=True,
lower_bound=-5.0,
upper_bound=5.0,
mask=mask,
)
def _FGSM_assert(
self,
model: Callable,
inputs: TensorOrTupleOfTensorsGeneric,
target: Any,
epsilon: float,
answer: Union[TensorLikeList, Tuple[TensorLikeList, ...]],
targeted=False,
additional_inputs: Any = None,
lower_bound: float = float("-inf"),
upper_bound: float = float("inf"),
mask: Optional[TensorOrTupleOfTensorsGeneric] = None,
) -> None:
adv = FGSM(model, lower_bound=lower_bound, upper_bound=upper_bound)
perturbed_input = adv.perturb(
inputs, epsilon, target, additional_inputs, targeted, mask
)
if isinstance(perturbed_input, Tensor):
assertTensorAlmostEqual(
self, perturbed_input, answer, delta=0.01, mode="max"
)
else:
for i in range(len(perturbed_input)):
assertTensorAlmostEqual(
self, perturbed_input[i], answer[i], delta=0.01, mode="max"
)
|
#!/usr/bin/env python3
from typing import cast, List
import torch
from captum.robust import MinParamPerturbation
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModel, BasicModel_MultiLayer
from torch import Tensor
def inp_subtract(inp: Tensor, ind: int = 0, add_arg: int = 0) -> Tensor:
inp_repeat = 1.0 * inp
inp_repeat[0][ind] -= add_arg
return inp_repeat
def add_char(inp: List[str], ind: int = 0, char_val: int = 0) -> List[str]:
list_copy = list(inp)
list_copy[ind] = chr(122 - char_val) + list_copy[ind]
return list_copy
def add_char_batch(inp: List[List[str]], ind: int, char_val: int) -> List[List[str]]:
return [add_char(elem, ind, char_val) for elem in inp]
def text_preproc_fn(inp: List[str]) -> Tensor:
return torch.tensor([float(ord(elem[0])) for elem in inp]).unsqueeze(0)
def batch_text_preproc_fn(inp: List[List[str]]) -> Tensor:
return torch.cat([text_preproc_fn(elem) for elem in inp])
def alt_correct_fn(model_out: Tensor, target: int, threshold: float) -> bool:
if all(model_out[:, target] > threshold):
return True
return False
class Test(BaseTest):
def test_minimal_pert_basic_linear(self) -> None:
model = BasicModel()
inp = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]])
minimal_pert = MinParamPerturbation(
forward_func=lambda x: model(x)
+ torch.tensor([[0.000001, 0.0, 0.0, 0.0, 0.0]]),
attack=inp_subtract,
arg_name="add_arg",
arg_min=0.0,
arg_max=1000.0,
arg_step=1.0,
)
target_inp, pert = minimal_pert.evaluate(
inp, target=0, attack_kwargs={"ind": 0}
)
self.assertAlmostEqual(cast(float, pert), 2.0)
assertTensorAlmostEqual(
self, target_inp, torch.tensor([[0.0, -9.0, 9.0, 1.0, -3.0]])
)
def test_minimal_pert_basic_binary(self) -> None:
model = BasicModel()
inp = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]])
minimal_pert = MinParamPerturbation(
forward_func=lambda x: model(x)
+ torch.tensor([[0.000001, 0.0, 0.0, 0.0, 0.0]]),
attack=inp_subtract,
arg_name="add_arg",
arg_min=0.0,
arg_max=1000.0,
arg_step=1.0,
mode="binary",
)
target_inp, pert = minimal_pert.evaluate(
inp,
target=0,
attack_kwargs={"ind": 0},
perturbations_per_eval=10,
)
self.assertAlmostEqual(cast(float, pert), 2.0)
assertTensorAlmostEqual(
self, target_inp, torch.tensor([[0.0, -9.0, 9.0, 1.0, -3.0]])
)
def test_minimal_pert_preproc(self) -> None:
model = BasicModel_MultiLayer()
text_inp = ["abc", "zyd", "ghi"]
minimal_pert = MinParamPerturbation(
forward_func=model,
attack=add_char,
arg_name="char_val",
arg_min=0,
arg_max=26,
arg_step=1,
preproc_fn=text_preproc_fn,
apply_before_preproc=True,
)
target_inp, pert = minimal_pert.evaluate(
text_inp, target=1, attack_kwargs={"ind": 1}
)
self.assertEqual(pert, None)
self.assertEqual(target_inp, None)
def test_minimal_pert_alt_correct(self) -> None:
model = BasicModel_MultiLayer()
text_inp = ["abc", "zyd", "ghi"]
minimal_pert = MinParamPerturbation(
forward_func=model,
attack=add_char,
arg_name="char_val",
arg_min=0,
arg_max=26,
arg_step=1,
preproc_fn=text_preproc_fn,
apply_before_preproc=True,
correct_fn=alt_correct_fn,
num_attempts=5,
)
expected_list = ["abc", "ezyd", "ghi"]
target_inp, pert = minimal_pert.evaluate(
text_inp,
target=1,
attack_kwargs={"ind": 1},
correct_fn_kwargs={"threshold": 1200},
perturbations_per_eval=5,
)
self.assertEqual(pert, 21)
self.assertListEqual(target_inp, expected_list)
target_inp_single, pert_single = minimal_pert.evaluate(
text_inp,
target=1,
attack_kwargs={"ind": 1},
correct_fn_kwargs={"threshold": 1200},
)
self.assertEqual(pert_single, 21)
self.assertListEqual(target_inp_single, expected_list)
def test_minimal_pert_additional_forward_args(self) -> None:
model = BasicModel_MultiLayer()
text_inp = [["abc", "zyd", "ghi"], ["abc", "uyd", "ghi"]]
additional_forward_args = torch.ones((2, 3)) * -97
model = BasicModel_MultiLayer()
minimal_pert = MinParamPerturbation(
forward_func=model,
attack=add_char_batch,
arg_name="char_val",
arg_min=0,
arg_max=26,
arg_step=1,
preproc_fn=batch_text_preproc_fn,
apply_before_preproc=True,
correct_fn=alt_correct_fn,
)
expected_list = [["abc", "uzyd", "ghi"], ["abc", "uuyd", "ghi"]]
target_inp, pert = minimal_pert.evaluate(
text_inp,
target=1,
attack_kwargs={"ind": 1},
correct_fn_kwargs={"threshold": 100},
perturbations_per_eval=15,
additional_forward_args=(additional_forward_args,),
)
self.assertEqual(pert, 5)
self.assertListEqual(target_inp, expected_list)
target_inp_single, pert_single = minimal_pert.evaluate(
text_inp,
target=1,
attack_kwargs={"ind": 1},
correct_fn_kwargs={"threshold": 100},
additional_forward_args=(additional_forward_args,),
)
self.assertEqual(pert_single, 5)
self.assertListEqual(target_inp_single, expected_list)
def test_minimal_pert_tuple_test(self) -> None:
model = BasicModel_MultiLayer()
text_inp = (
[["abc", "zyd", "ghi"], ["abc", "uyd", "ghi"]],
torch.ones((2, 3)) * -97,
)
model = BasicModel_MultiLayer()
minimal_pert = MinParamPerturbation(
forward_func=lambda x: model(*x),
attack=lambda x, ind, char_val: (add_char_batch(x[0], ind, char_val), x[1]),
arg_name="char_val",
arg_min=0,
arg_max=26,
arg_step=1,
preproc_fn=lambda x: (batch_text_preproc_fn(x[0]), x[1]),
apply_before_preproc=True,
correct_fn=alt_correct_fn,
)
expected_list = [["abc", "uzyd", "ghi"], ["abc", "uuyd", "ghi"]]
target_inp, pert = minimal_pert.evaluate(
text_inp,
target=1,
attack_kwargs={"ind": 1},
correct_fn_kwargs={"threshold": 100},
perturbations_per_eval=15,
)
self.assertEqual(pert, 5)
self.assertListEqual(target_inp[0], expected_list)
|
#!/usr/bin/env python3
import torch
from captum.robust import PGD
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModel, BasicModel2, BasicModel_MultiLayer
from torch.nn import CrossEntropyLoss
class Test(BaseTest):
def test_attack_nontargeted(self) -> None:
model = BasicModel()
input = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]])
adv = PGD(model)
perturbed_input = adv.perturb(input, 0.25, 0.1, 2, 4)
assertTensorAlmostEqual(
self,
perturbed_input,
[[2.0, -9.0, 9.0, 1.0, -2.8]],
delta=0.01,
mode="max",
)
def test_attack_targeted(self) -> None:
model = BasicModel()
input = torch.tensor([[9.0, 10.0, -6.0, -1.0]], requires_grad=True)
adv = PGD(model)
perturbed_input = adv.perturb(input, 0.2, 0.1, 3, 3, targeted=True)
assertTensorAlmostEqual(
self,
perturbed_input,
[[9.0, 10.0, -6.0, -1.2]],
delta=0.01,
mode="max",
)
def test_attack_l2norm(self) -> None:
model = BasicModel()
input = torch.tensor([[9.0, 10.0, -6.0, -1.0]], requires_grad=True)
adv = PGD(model)
perturbed_input = adv.perturb(input, 0.2, 0.1, 3, 2, targeted=True, norm="L2")
assertTensorAlmostEqual(
self,
perturbed_input,
[[9.0, 10.0, -6.2, -1.0]],
delta=0.01,
mode="max",
)
def test_attack_multiinput(self) -> None:
model = BasicModel2()
input1 = torch.tensor([[4.0, -1.0], [3.0, 10.0]], requires_grad=True)
input2 = torch.tensor([[2.0, -5.0], [-2.0, 1.0]], requires_grad=True)
adv = PGD(model)
perturbed_input = adv.perturb((input1, input2), 0.25, 0.1, 3, 0, norm="L2")
answer = ([[3.75, -1.0], [2.75, 10.0]], [[2.25, -5.0], [-2.0, 1.0]])
for i in range(len(perturbed_input)):
assertTensorAlmostEqual(
self,
perturbed_input[i],
answer[i],
delta=0.01,
mode="max",
)
def test_attack_3dimensional_input(self) -> None:
model = BasicModel()
input = torch.tensor(
[[[4.0, 2.0], [-1.0, -2.0]], [[3.0, -4.0], [10.0, 5.0]]], requires_grad=True
)
adv = PGD(model)
perturbed_input = adv.perturb(input, 0.25, 0.1, 3, (0, 1))
assertTensorAlmostEqual(
self,
perturbed_input,
[[[4.0, 2.0], [-1.0, -2.0]], [[3.0, -3.75], [10.0, 5.0]]],
delta=0.01,
mode="max",
)
def test_attack_loss_defined(self) -> None:
model = BasicModel_MultiLayer()
add_input = torch.tensor([[-1.0, 2.0, 2.0]])
input = torch.tensor([[1.0, 6.0, -3.0]])
labels = torch.tensor([0])
loss_func = CrossEntropyLoss(reduction="none")
adv = PGD(model, loss_func)
perturbed_input = adv.perturb(
input, 0.25, 0.1, 3, labels, additional_forward_args=(add_input,)
)
assertTensorAlmostEqual(
self, perturbed_input, [[1.0, 6.0, -3.0]], delta=0.01, mode="max"
)
def test_attack_random_start(self) -> None:
model = BasicModel()
input = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]])
adv = PGD(model)
perturbed_input = adv.perturb(input, 0.25, 0.1, 0, 4, random_start=True)
assertTensorAlmostEqual(
self,
perturbed_input,
[[2.0, -9.0, 9.0, 1.0, -3.0]],
delta=0.25,
mode="max",
)
perturbed_input = adv.perturb(
input, 0.25, 0.1, 0, 4, norm="L2", random_start=True
)
norm = torch.norm((perturbed_input - input).squeeze()).numpy()
self.assertLessEqual(norm, 0.25)
def test_attack_masked_nontargeted(self) -> None:
model = BasicModel()
input = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]])
mask = torch.tensor([[1, 1, 0, 0, 0]])
adv = PGD(model)
perturbed_input = adv.perturb(input, 0.25, 0.1, 2, 4, mask=mask)
assertTensorAlmostEqual(
self,
perturbed_input,
[[2.0, -9.0, 9.0, 1.0, -3.0]],
delta=0.01,
mode="max",
)
def test_attack_masked_targeted(self) -> None:
model = BasicModel()
input = torch.tensor([[9.0, 10.0, -6.0, -1.0]], requires_grad=True)
mask = torch.tensor([[1, 1, 1, 0]])
adv = PGD(model)
perturbed_input = adv.perturb(input, 0.2, 0.1, 3, 3, targeted=True, mask=mask)
assertTensorAlmostEqual(
self,
perturbed_input,
[[9.0, 10.0, -6.0, -1.0]],
delta=0.01,
mode="max",
)
def test_attack_masked_multiinput(self) -> None:
model = BasicModel2()
input1 = torch.tensor([[4.0, -1.0], [3.0, 10.0]], requires_grad=True)
input2 = torch.tensor([[2.0, -5.0], [-2.0, 1.0]], requires_grad=True)
mask1 = torch.tensor([[1, 1], [0, 0]])
mask2 = torch.tensor([[0, 1], [0, 1]])
adv = PGD(model)
perturbed_input = adv.perturb(
(input1, input2), 0.25, 0.1, 3, 0, norm="L2", mask=(mask1, mask2)
)
answer = ([[3.75, -1.0], [3.0, 10.0]], [[2.0, -5.0], [-2.0, 1.0]])
for i in range(len(perturbed_input)):
assertTensorAlmostEqual(
self,
perturbed_input[i],
answer[i],
delta=0.01,
mode="max",
)
def test_attack_masked_random_start(self) -> None:
model = BasicModel()
input = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]])
mask = torch.tensor([[1, 0, 1, 0, 1]])
adv = PGD(model)
perturbed_input = adv.perturb(
input, 0.25, 0.1, 0, 4, random_start=True, mask=mask
)
assertTensorAlmostEqual(
self,
perturbed_input,
[[2.0, -9.0, 9.0, 1.0, -3.0]],
delta=0.25,
mode="max",
)
perturbed_input = adv.perturb(
input, 0.25, 0.1, 0, 4, norm="L2", random_start=True, mask=mask
)
norm = torch.norm((perturbed_input - input).squeeze()).numpy()
self.assertLessEqual(norm, 0.25)
def test_attack_masked_3dimensional_input(self) -> None:
model = BasicModel()
input = torch.tensor(
[[[4.0, 2.0], [-1.0, -2.0]], [[3.0, -4.0], [10.0, 5.0]]], requires_grad=True
)
mask = torch.tensor([[[1, 0], [0, 1]], [[1, 0], [1, 1]]])
adv = PGD(model)
perturbed_input = adv.perturb(input, 0.25, 0.1, 3, (0, 1), mask=mask)
assertTensorAlmostEqual(
self,
perturbed_input,
[[[4.0, 2.0], [-1.0, -2.0]], [[3.0, -4.0], [10.0, 5.0]]],
delta=0.01,
mode="max",
)
def test_attack_masked_loss_defined(self) -> None:
model = BasicModel_MultiLayer()
add_input = torch.tensor([[-1.0, 2.0, 2.0]])
input = torch.tensor([[1.0, 6.0, -3.0]])
mask = torch.tensor([[0, 1, 0]])
labels = torch.tensor([0])
loss_func = CrossEntropyLoss(reduction="none")
adv = PGD(model, loss_func)
perturbed_input = adv.perturb(
input, 0.25, 0.1, 3, labels, additional_forward_args=(add_input,), mask=mask
)
assertTensorAlmostEqual(
self, perturbed_input, [[1.0, 6.0, -3.0]], delta=0.01, mode="max"
)
|
import inspect
import os
import unittest
from functools import partial
from typing import Callable, Iterator, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from captum.influence import DataInfluence
from captum.influence._core.tracincp_fast_rand_proj import (
TracInCPFast,
TracInCPFastRandProj,
)
from parameterized import parameterized
from parameterized.parameterized import param
from torch import Tensor
from torch.nn import Module
from torch.utils.data import DataLoader, Dataset
def _isSorted(x, key=lambda x: x, descending=True):
if descending:
return all([key(x[i]) >= key(x[i + 1]) for i in range(len(x) - 1)])
else:
return all([key(x[i]) <= key(x[i + 1]) for i in range(len(x) - 1)])
def _wrap_model_in_dataparallel(net):
alt_device_ids = [0] + [x for x in range(torch.cuda.device_count() - 1, 0, -1)]
net = net.cuda()
return torch.nn.DataParallel(net, device_ids=alt_device_ids)
def _move_sample_to_cuda(samples):
return [s.cuda() for s in samples]
class ExplicitDataset(Dataset):
def __init__(self, samples, labels, use_gpu=False) -> None:
self.samples, self.labels = samples, labels
if use_gpu:
self.samples = (
_move_sample_to_cuda(self.samples)
if isinstance(self.samples, list)
else self.samples.cuda()
)
self.labels = self.labels.cuda()
def __len__(self) -> int:
return len(self.samples)
def __getitem__(self, idx):
return (self.samples[idx], self.labels[idx])
class UnpackDataset(Dataset):
def __init__(self, samples, labels, use_gpu=False) -> None:
self.samples, self.labels = samples, labels
if use_gpu:
self.samples = (
_move_sample_to_cuda(self.samples)
if isinstance(self.samples, list)
else self.samples.cuda()
)
self.labels = self.labels.cuda()
def __len__(self) -> int:
return len(self.samples[0])
def __getitem__(self, idx):
"""
The signature of the returning item is: List[List], where the contents
are: [sample_0, sample_1, ...] + [labels] (two lists concacenated).
"""
return [lst[idx] for lst in self.samples] + [self.labels[idx]]
class IdentityDataset(ExplicitDataset):
def __init__(self, num_features, use_gpu=False) -> None:
self.samples = torch.diag(torch.ones(num_features))
self.labels = torch.zeros(num_features).unsqueeze(1)
if use_gpu:
self.samples = self.samples.cuda()
self.labels = self.labels.cuda()
class RangeDataset(ExplicitDataset):
def __init__(self, low, high, num_features, use_gpu=False) -> None:
self.samples = (
torch.arange(start=low, end=high, dtype=torch.float)
.repeat(num_features, 1)
.transpose(1, 0)
)
self.labels = torch.arange(start=low, end=high, dtype=torch.float).unsqueeze(1)
if use_gpu:
self.samples = self.samples.cuda()
self.labels = self.labels.cuda()
class BinaryDataset(ExplicitDataset):
def __init__(self, use_gpu=False) -> None:
self.samples = F.normalize(
torch.stack(
(
torch.Tensor([1, 1]),
torch.Tensor([2, 1]),
torch.Tensor([1, 2]),
torch.Tensor([1, 5]),
torch.Tensor([0.01, 1]),
torch.Tensor([5, 1]),
torch.Tensor([1, 0.01]),
torch.Tensor([-1, -1]),
torch.Tensor([-2, -1]),
torch.Tensor([-1, -2]),
torch.Tensor([-1, -5]),
torch.Tensor([-5, -1]),
torch.Tensor([1, -1]),
torch.Tensor([2, -1]),
torch.Tensor([1, -2]),
torch.Tensor([1, -5]),
torch.Tensor([0.01, -1]),
torch.Tensor([5, -1]),
torch.Tensor([-1, 1]),
torch.Tensor([-2, 1]),
torch.Tensor([-1, 2]),
torch.Tensor([-1, 5]),
torch.Tensor([-5, 1]),
torch.Tensor([-1, 0.01]),
)
)
)
self.labels = torch.cat(
(
torch.Tensor([1]).repeat(12, 1),
torch.Tensor([-1]).repeat(12, 1),
)
)
super().__init__(self.samples, self.labels, use_gpu)
class CoefficientNet(nn.Module):
def __init__(self, in_features=1) -> None:
super().__init__()
self.fc1 = nn.Linear(in_features, 1, bias=False)
self.fc1.weight.data.fill_(0.01)
def forward(self, x):
x = self.fc1(x)
return x
class BasicLinearNet(nn.Module):
def __init__(self, in_features, hidden_nodes, out_features) -> None:
super().__init__()
self.linear1 = nn.Linear(in_features, hidden_nodes)
self.linear2 = nn.Linear(hidden_nodes, out_features)
def forward(self, input):
x = torch.tanh(self.linear1(input))
return torch.tanh(self.linear2(x))
class MultLinearNet(nn.Module):
def __init__(self, in_features, hidden_nodes, out_features, num_inputs) -> None:
super().__init__()
self.pre = nn.Linear(in_features * num_inputs, in_features)
self.linear1 = nn.Linear(in_features, hidden_nodes)
self.linear2 = nn.Linear(hidden_nodes, out_features)
def forward(self, *inputs):
"""
The signature of inputs is List[torch.Tensor],
where torch.Tensor has the dimensions [num_inputs x in_features].
It first concacenates the list and a linear layer to reduce the
dimension.
"""
inputs = self.pre(torch.cat(inputs, dim=1))
x = torch.tanh(self.linear1(inputs))
return torch.tanh(self.linear2(x))
def get_random_model_and_data(
tmpdir, unpack_inputs, return_test_data=True, use_gpu=False
):
in_features, hidden_nodes, out_features = 5, 4, 3
num_inputs = 2
net = (
BasicLinearNet(in_features, hidden_nodes, out_features)
if not unpack_inputs
else MultLinearNet(in_features, hidden_nodes, out_features, num_inputs)
).double()
num_checkpoints = 5
for i in range(num_checkpoints):
net.linear1.weight.data = torch.normal(
3, 4, (hidden_nodes, in_features)
).double()
net.linear2.weight.data = torch.normal(
5, 6, (out_features, hidden_nodes)
).double()
if unpack_inputs:
net.pre.weight.data = torch.normal(
3, 4, (in_features, in_features * num_inputs)
)
if hasattr(net, "pre"):
net.pre.weight.data = net.pre.weight.data.double()
checkpoint_name = "-".join(["checkpoint-reg", str(i + 1) + ".pt"])
net_adjusted = _wrap_model_in_dataparallel(net) if use_gpu else net
torch.save(net_adjusted.state_dict(), os.path.join(tmpdir, checkpoint_name))
num_samples = 50
num_train = 32
all_labels = torch.normal(1, 2, (num_samples, out_features)).double()
train_labels = all_labels[:num_train]
test_labels = all_labels[num_train:]
if unpack_inputs:
all_samples = [
torch.normal(0, 1, (num_samples, in_features)).double()
for _ in range(num_inputs)
]
train_samples = [ts[:num_train] for ts in all_samples]
test_samples = [ts[num_train:] for ts in all_samples]
else:
all_samples = torch.normal(0, 1, (num_samples, in_features)).double()
train_samples = all_samples[:num_train]
test_samples = all_samples[num_train:]
dataset = (
ExplicitDataset(train_samples, train_labels, use_gpu)
if not unpack_inputs
else UnpackDataset(train_samples, train_labels, use_gpu)
)
if return_test_data:
return (
_wrap_model_in_dataparallel(net) if use_gpu else net,
dataset,
_move_sample_to_cuda(test_samples)
if isinstance(test_samples, list) and use_gpu
else test_samples.cuda()
if use_gpu
else test_samples,
test_labels.cuda() if use_gpu else test_labels,
)
else:
return _wrap_model_in_dataparallel(net) if use_gpu else net, dataset
class DataInfluenceConstructor:
name: str = ""
data_influence_class: type
def __init__(
self,
data_influence_class: type,
name: Optional[str] = None,
duplicate_loss_fn: bool = False,
**kwargs,
) -> None:
"""
if `duplicate_loss_fn` is True, will explicitly pass the provided `loss_fn` as
the `test_loss_fn` when constructing the TracInCPBase instance
"""
self.data_influence_class = data_influence_class
self.name = name if name else data_influence_class.__name__
self.duplicate_loss_fn = duplicate_loss_fn
self.kwargs = kwargs
def __repr__(self) -> str:
return self.name
def __call__(
self,
net: Module,
dataset: Union[Dataset, DataLoader],
tmpdir: Union[str, List[str], Iterator],
batch_size: Union[int, None],
loss_fn: Optional[Union[Module, Callable]],
**kwargs,
) -> DataInfluence:
constructor_kwargs = self.kwargs.copy()
constructor_kwargs.update(kwargs)
# if `self.duplicate_loss_fn`, explicitly pass in `loss_fn` as `test_loss_fn`
# when constructing the instance. Doing so should not affect the behavior of
# the returned tracincp instance, since if `test_loss_fn` is not passed in,
# the constructor sets `test_loss_fn` to be the same as `loss_fn`
if self.duplicate_loss_fn:
constructor_kwargs["test_loss_fn"] = loss_fn
if self.data_influence_class is TracInCPFastRandProj:
self.check_annoy()
if self.data_influence_class in [TracInCPFast, TracInCPFastRandProj]:
return self.data_influence_class(
net,
list(net.children())[-1],
dataset,
tmpdir,
loss_fn=loss_fn,
batch_size=batch_size,
**constructor_kwargs,
)
else:
return self.data_influence_class(
net,
dataset,
tmpdir,
batch_size=batch_size,
loss_fn=loss_fn,
**constructor_kwargs,
)
def check_annoy(self) -> None:
try:
import annoy # noqa
except ImportError:
raise unittest.SkipTest(
(
f"Skipping tests for {self.data_influence_class.__name__}, "
"because it requires the Annoy module."
)
)
def generate_test_name(
testcase_func: Callable,
param_num: str,
param: param,
args_to_skip: Optional[List[str]] = None,
) -> str:
"""
Creates human readable names for parameterized tests
"""
if args_to_skip is None:
args_to_skip = []
param_strs = []
func_param_names = list(inspect.signature(testcase_func).parameters)
# skip the first 'self' parameter
if func_param_names[0] == "self":
func_param_names = func_param_names[1:]
for i, arg in enumerate(param.args):
if func_param_names[i] in args_to_skip:
continue
if isinstance(arg, bool):
if arg:
param_strs.append(func_param_names[i])
else:
args_str = str(arg)
if args_str.isnumeric():
param_strs.append(func_param_names[i])
param_strs.append(args_str)
return "%s_%s" % (
testcase_func.__name__,
parameterized.to_safe_name("_".join(param_strs)),
)
def build_test_name_func(args_to_skip: Optional[List[str]] = None):
"""
Returns function to generate human readable names for parameterized tests
"""
return partial(generate_test_name, args_to_skip=args_to_skip)
def _format_batch_into_tuple(
inputs: Union[Tuple, Tensor], targets: Tensor, unpack_inputs: bool
):
if unpack_inputs:
return (*inputs, targets)
else:
return (inputs, targets)
|
import tempfile
from typing import Callable
import torch.nn as nn
from captum.influence._core.tracincp import TracInCP
from captum.influence._core.tracincp_fast_rand_proj import (
TracInCPFast,
TracInCPFastRandProj,
)
from parameterized import parameterized
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.influence._utils.common import (
_format_batch_into_tuple,
build_test_name_func,
DataInfluenceConstructor,
get_random_model_and_data,
)
from torch.utils.data import DataLoader
class TestTracInDataLoader(BaseTest):
"""
This tests that the influence score computed when a Dataset is fed to the
`self.tracin_constructor` and when a DataLoader constructed using the same
Dataset is fed to `self.tracin_constructor` gives the same results.
"""
@parameterized.expand(
[
(
reduction,
constr,
unpack_inputs,
)
for unpack_inputs in [False, True]
for reduction, constr in [
("none", DataInfluenceConstructor(TracInCP)),
("sum", DataInfluenceConstructor(TracInCPFast)),
("sum", DataInfluenceConstructor(TracInCPFastRandProj)),
(
"sum",
DataInfluenceConstructor(
TracInCPFastRandProj,
name="TracInCPFastRandProj_1DProj",
projection_dim=1,
),
),
]
],
name_func=build_test_name_func(args_to_skip=["reduction"]),
)
def test_tracin_dataloader(
self, reduction: str, tracin_constructor: Callable, unpack_inputs: bool
) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
batch_size = 5
(
net,
train_dataset,
test_samples,
test_labels,
) = get_random_model_and_data(tmpdir, unpack_inputs, return_test_data=True)
self.assertTrue(isinstance(reduction, str))
criterion = nn.MSELoss(reduction=reduction)
self.assertTrue(callable(tracin_constructor))
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
train_scores = tracin.influence(
_format_batch_into_tuple(test_samples, test_labels, unpack_inputs),
k=None,
)
tracin_dataloader = tracin_constructor(
net,
DataLoader(train_dataset, batch_size=batch_size, shuffle=False),
tmpdir,
None,
criterion,
)
train_scores_dataloader = tracin_dataloader.influence(
_format_batch_into_tuple(test_samples, test_labels, unpack_inputs),
k=None,
)
assertTensorAlmostEqual(
self,
train_scores,
train_scores_dataloader,
delta=0.0,
mode="max",
)
|
import tempfile
from typing import Callable
import torch
import torch.nn as nn
from captum.influence._core.tracincp import TracInCP
from parameterized import parameterized
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.influence._utils.common import (
_format_batch_into_tuple,
build_test_name_func,
DataInfluenceConstructor,
get_random_model_and_data,
)
class TestTracInGetKMostInfluential(BaseTest):
use_gpu_list = (
[True, False]
if torch.cuda.is_available() and torch.cuda.device_count() != 0
else [False]
)
param_list = []
for (batch_size, k) in [(4, 7), (7, 4), (40, 5), (5, 40), (40, 45)]:
for unpack_inputs in [True, False]:
for proponents in [True, False]:
for use_gpu in use_gpu_list:
for reduction, constr in [
(
"none",
DataInfluenceConstructor(
TracInCP, name="TracInCP_all_layers"
),
),
(
"none",
DataInfluenceConstructor(
TracInCP,
name="linear2",
layers=["module.linear2"] if use_gpu else ["linear2"],
),
),
]:
if not (
"sample_wise_grads_per_batch" in constr.kwargs
and constr.kwargs["sample_wise_grads_per_batch"]
and use_gpu
):
param_list.append(
(
reduction,
constr,
unpack_inputs,
proponents,
batch_size,
k,
use_gpu,
)
)
@parameterized.expand(
param_list,
name_func=build_test_name_func(),
)
def test_tracin_k_most_influential(
self,
reduction: str,
tracin_constructor: Callable,
unpack_inputs: bool,
proponents: bool,
batch_size: int,
k: int,
use_gpu: bool,
) -> None:
"""
This test constructs a random BasicLinearNet, and checks that the proponents
obtained by calling `influence` and sorting are equal to the proponents
obtained by calling `_k_most_influential`. Those calls are made through
the calls to wrapper method `influence`.
"""
with tempfile.TemporaryDirectory() as tmpdir:
(
net,
train_dataset,
test_samples,
test_labels,
) = get_random_model_and_data(
tmpdir,
unpack_inputs,
True,
use_gpu,
)
self.assertTrue(isinstance(reduction, str))
self.assertTrue(callable(tracin_constructor))
criterion = nn.MSELoss(reduction=reduction)
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
train_scores = tracin.influence(
_format_batch_into_tuple(test_samples, test_labels, unpack_inputs),
k=None,
)
sort_idx = torch.argsort(train_scores, dim=1, descending=proponents)[:, 0:k]
idx, _train_scores = tracin.influence(
_format_batch_into_tuple(test_samples, test_labels, unpack_inputs),
k=k,
proponents=proponents,
)
for i in range(len(idx)):
# check that idx[i] is correct
assertTensorAlmostEqual(
self,
train_scores[i, idx[i]],
train_scores[i, sort_idx[i]],
delta=0.0,
mode="max",
)
# check that _train_scores[i] is correct
assertTensorAlmostEqual(
self,
_train_scores[i],
train_scores[i, sort_idx[i]],
delta=0.001,
mode="max",
)
|
import tempfile
from typing import Callable
import torch
import torch.nn as nn
from captum.influence._core.tracincp import TracInCP
from captum.influence._core.tracincp_fast_rand_proj import TracInCPFast
from parameterized import parameterized
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.influence._utils.common import (
_format_batch_into_tuple,
build_test_name_func,
DataInfluenceConstructor,
get_random_model_and_data,
)
from torch.utils.data import DataLoader
class TestTracInSelfInfluence(BaseTest):
use_gpu_list = (
[True, False]
if torch.cuda.is_available() and torch.cuda.device_count() != 0
else [False]
)
param_list = []
for unpack_inputs in [True, False]:
for use_gpu in use_gpu_list:
for (reduction, constructor) in [
(
"none",
DataInfluenceConstructor(TracInCP, name="TracInCP_all_layers"),
),
(
"none",
DataInfluenceConstructor(
TracInCP,
name="TracInCP_linear1",
layers=["module.linear1"] if use_gpu else ["linear1"],
),
),
(
"none",
DataInfluenceConstructor(
TracInCP,
name="TracInCP_linear1_linear2",
layers=["module.linear1", "module.linear2"]
if use_gpu
else ["linear1", "linear2"],
),
),
(
"sum",
DataInfluenceConstructor(
TracInCP,
name="TracInCP_sample_wise_grads_per_batch_all_layers",
sample_wise_grads_per_batch=True,
),
),
(
"sum",
DataInfluenceConstructor(
TracInCPFast, "TracInCPFast_last_fc_layer"
),
),
(
"mean",
DataInfluenceConstructor(
TracInCPFast, "TracInCPFast_last_fc_layer"
),
),
]:
if not (
"sample_wise_grads_per_batch" in constructor.kwargs
and constructor.kwargs["sample_wise_grads_per_batch"]
and use_gpu
):
param_list.append((reduction, constructor, unpack_inputs, use_gpu))
@parameterized.expand(
param_list,
name_func=build_test_name_func(),
)
def test_tracin_self_influence(
self,
reduction: str,
tracin_constructor: Callable,
unpack_inputs: bool,
use_gpu: bool,
) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
(net, train_dataset,) = get_random_model_and_data(
tmpdir,
unpack_inputs,
False,
use_gpu,
)
# compute tracin_scores of training data on training data
criterion = nn.MSELoss(reduction=reduction)
batch_size = 5
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
train_scores = tracin.influence(
_format_batch_into_tuple(
train_dataset.samples, train_dataset.labels, unpack_inputs
),
k=None,
)
# calculate self_tracin_scores
self_tracin_scores = tracin.self_influence(
outer_loop_by_checkpoints=False,
)
# check that self_tracin scores equals the diagonal of influence scores
assertTensorAlmostEqual(
self,
torch.diagonal(train_scores),
self_tracin_scores,
delta=0.01,
mode="max",
)
# check that setting `outer_loop_by_checkpoints=False` and
# `outer_loop_by_checkpoints=True` gives the same self influence scores
self_tracin_scores_by_checkpoints = tracin.self_influence(
DataLoader(train_dataset, batch_size=batch_size),
outer_loop_by_checkpoints=True,
)
assertTensorAlmostEqual(
self,
self_tracin_scores_by_checkpoints,
self_tracin_scores,
delta=0.01,
mode="max",
)
@parameterized.expand(
[
(reduction, constructor, unpack_inputs)
for unpack_inputs in [True, False]
for (reduction, constructor) in [
("none", DataInfluenceConstructor(TracInCP)),
(
"sum",
DataInfluenceConstructor(
TracInCP,
sample_wise_grads_per_batch=True,
),
),
("sum", DataInfluenceConstructor(TracInCPFast)),
("mean", DataInfluenceConstructor(TracInCPFast)),
]
],
name_func=build_test_name_func(),
)
def test_tracin_self_influence_dataloader_vs_single_batch(
self, reduction: str, tracin_constructor: Callable, unpack_inputs: bool
) -> None:
# tests that the result of calling the public method `self_influence` for a
# DataLoader of batches is the same as when the batches are collated into a
# single batch
with tempfile.TemporaryDirectory() as tmpdir:
(
net,
train_dataset,
) = get_random_model_and_data(tmpdir, unpack_inputs, return_test_data=False)
# create a single batch representing the entire dataset
single_batch = next(
iter(DataLoader(train_dataset, batch_size=len(train_dataset)))
)
# create a dataloader that yields batches from the dataset
dataloader = DataLoader(train_dataset, batch_size=5)
# create tracin instance
criterion = nn.MSELoss(reduction=reduction)
batch_size = 5
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
# compute self influence using `self_influence` when passing in a single
# batch
single_batch_self_influence = tracin.self_influence(single_batch)
# compute self influence using `self_influence` when passing in a
# dataloader with the same examples
dataloader_self_influence = tracin.self_influence(dataloader)
# the two self influences should be equal
assertTensorAlmostEqual(
self,
single_batch_self_influence,
dataloader_self_influence,
delta=0.01, # due to numerical issues, we can't set this to 0.0
mode="max",
)
|
import tempfile
from typing import Callable
import torch.nn as nn
from captum.influence._core.tracincp import TracInCP
from captum.influence._core.tracincp_fast_rand_proj import TracInCPFast
from parameterized import parameterized
from tests.helpers.basic import BaseTest
from tests.influence._utils.common import (
build_test_name_func,
DataInfluenceConstructor,
get_random_model_and_data,
)
class TestTracinValidator(BaseTest):
param_list = []
for reduction, constr in [
(
"none",
DataInfluenceConstructor(TracInCP, name="TracInCP"),
),
(
"mean",
DataInfluenceConstructor(
TracInCPFast,
name="TracInCpFast",
),
),
]:
param_list.append((reduction, constr))
@parameterized.expand(
param_list,
name_func=build_test_name_func(),
)
def test_tracin_require_inputs_dataset(
self,
reduction,
tracin_constructor: Callable,
) -> None:
"""
This test verifies that tracinCP and tracinCPFast
influence methods required `inputs_dataset`.
"""
with tempfile.TemporaryDirectory() as tmpdir:
(
net,
train_dataset,
test_samples,
test_labels,
) = get_random_model_and_data(tmpdir, unpack_inputs=False)
criterion = nn.MSELoss(reduction=reduction)
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
loss_fn=criterion,
batch_size=1,
)
with self.assertRaisesRegex(AssertionError, "required."):
tracin.influence(None, k=None)
|
import os
import tempfile
from collections import OrderedDict
from typing import Callable, cast, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from captum.influence._core.tracincp import TracInCP
from parameterized import parameterized
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.influence._utils.common import (
_wrap_model_in_dataparallel,
BasicLinearNet,
BinaryDataset,
build_test_name_func,
DataInfluenceConstructor,
)
class TestTracInXOR(BaseTest):
# TODO: Move test setup to use setUp and tearDown method overrides.
def _test_tracin_xor_setup(self, tmpdir: str, use_gpu: bool = False):
net = BasicLinearNet(2, 2, 1)
state = OrderedDict(
[
(
"linear1.weight",
torch.Tensor([[-1.2956, -1.4465], [-0.3890, -0.7420]]),
),
("linear1.bias", torch.Tensor([1.2924, 0.0021])),
("linear2.weight", torch.Tensor([[-1.2013, 0.7174]])),
("linear2.bias", torch.Tensor([0.5880])),
]
)
net.load_state_dict(state)
net_adjusted = _wrap_model_in_dataparallel(net) if use_gpu else net
checkpoint_name = "-".join(["checkpoint", "class", "0" + ".pt"])
torch.save(net_adjusted.state_dict(), os.path.join(tmpdir, checkpoint_name))
state = OrderedDict(
[
(
"linear1.weight",
torch.Tensor([[-1.3238, -1.4899], [-0.4544, -0.7448]]),
),
("linear1.bias", torch.Tensor([1.3185, -0.0317])),
("linear2.weight", torch.Tensor([[-1.2342, 0.7741]])),
("linear2.bias", torch.Tensor([0.6234])),
]
)
net.load_state_dict(state)
net_adjusted = _wrap_model_in_dataparallel(net) if use_gpu else net
checkpoint_name = "-".join(["checkpoint", "class", "1" + ".pt"])
torch.save(net_adjusted.state_dict(), os.path.join(tmpdir, checkpoint_name))
state = OrderedDict(
[
(
"linear1.weight",
torch.Tensor([[-1.3546, -1.5288], [-0.5250, -0.7591]]),
),
("linear1.bias", torch.Tensor([1.3432, -0.0684])),
("linear2.weight", torch.Tensor([[-1.2490, 0.8534]])),
("linear2.bias", torch.Tensor([0.6749])),
]
)
net.load_state_dict(state)
net_adjusted = _wrap_model_in_dataparallel(net) if use_gpu else net
checkpoint_name = "-".join(["checkpoint", "class", "2" + ".pt"])
torch.save(net_adjusted.state_dict(), os.path.join(tmpdir, checkpoint_name))
state = OrderedDict(
[
(
"linear1.weight",
torch.Tensor([[-1.4022, -1.5485], [-0.5688, -0.7607]]),
),
("linear1.bias", torch.Tensor([1.3740, -0.1571])),
("linear2.weight", torch.Tensor([[-1.3412, 0.9013]])),
("linear2.bias", torch.Tensor([0.6468])),
]
)
net.load_state_dict(state)
net_adjusted = _wrap_model_in_dataparallel(net) if use_gpu else net
checkpoint_name = "-".join(["checkpoint", "class", "3" + ".pt"])
torch.save(net_adjusted.state_dict(), os.path.join(tmpdir, checkpoint_name))
state = OrderedDict(
[
(
"linear1.weight",
torch.Tensor([[-1.4464, -1.5890], [-0.6348, -0.7665]]),
),
("linear1.bias", torch.Tensor([1.3791, -0.2008])),
("linear2.weight", torch.Tensor([[-1.3818, 0.9586]])),
("linear2.bias", torch.Tensor([0.6954])),
]
)
net.load_state_dict(state)
net_adjusted = _wrap_model_in_dataparallel(net) if use_gpu else net
checkpoint_name = "-".join(["checkpoint", "class", "4" + ".pt"])
torch.save(net_adjusted.state_dict(), os.path.join(tmpdir, checkpoint_name))
state = OrderedDict(
[
(
"linear1.weight",
torch.Tensor([[-1.5217, -1.6242], [-0.6644, -0.7842]]),
),
("linear1.bias", torch.Tensor([1.3500, -0.2418])),
("linear2.weight", torch.Tensor([[-1.4304, 0.9980]])),
("linear2.bias", torch.Tensor([0.7567])),
]
)
net.load_state_dict(state)
net_adjusted = _wrap_model_in_dataparallel(net) if use_gpu else net
checkpoint_name = "-".join(["checkpoint", "class", "5" + ".pt"])
torch.save(net_adjusted.state_dict(), os.path.join(tmpdir, checkpoint_name))
state = OrderedDict(
[
(
"linear1.weight",
torch.Tensor([[-1.5551, -1.6631], [-0.7420, -0.8025]]),
),
("linear1.bias", torch.Tensor([1.3508, -0.2618])),
("linear2.weight", torch.Tensor([[-1.4272, 1.0772]])),
("linear2.bias", torch.Tensor([0.8427])),
]
)
net.load_state_dict(state)
net_adjusted = _wrap_model_in_dataparallel(net) if use_gpu else net
checkpoint_name = "-".join(["checkpoint", "class", "6" + ".pt"])
torch.save(net_adjusted.state_dict(), os.path.join(tmpdir, checkpoint_name))
state = OrderedDict(
[
(
"linear1.weight",
torch.Tensor([[-1.5893, -1.6656], [-0.7863, -0.8369]]),
),
("linear1.bias", torch.Tensor([1.3949, -0.3215])),
("linear2.weight", torch.Tensor([[-1.4555, 1.1600]])),
("linear2.bias", torch.Tensor([0.8730])),
]
)
net.load_state_dict(state)
net_adjusted = _wrap_model_in_dataparallel(net) if use_gpu else net
checkpoint_name = "-".join(["checkpoint", "class", "7" + ".pt"])
torch.save(net_adjusted.state_dict(), os.path.join(tmpdir, checkpoint_name))
dataset = BinaryDataset(use_gpu)
return net_adjusted, dataset
parametrized_list = [
(
"none",
DataInfluenceConstructor(
TracInCP, name="TracInCP_linear1", layers=["linear1"]
),
"check_idx",
False,
),
(
"none",
DataInfluenceConstructor(TracInCP, name="TracInCP_all_layers"),
"check_idx",
False,
),
(
None,
DataInfluenceConstructor(TracInCP, name="TracInCP_all_layers"),
"sample_wise_trick",
False,
),
(
None,
DataInfluenceConstructor(
TracInCP, name="TracInCP_linear1_linear2", layers=["linear1", "linear2"]
),
"sample_wise_trick",
False,
),
]
if torch.cuda.is_available() and torch.cuda.device_count() != 0:
parametrized_list.extend(
[
(
"none",
DataInfluenceConstructor(TracInCP, name="TracInCP_all_layers"),
"check_idx",
True,
),
(
"none",
DataInfluenceConstructor(
TracInCP,
name="TracInCP_linear1_linear2",
layers=["module.linear1", "module.linear2"],
),
"check_idx",
True,
),
],
)
@parameterized.expand(
parametrized_list,
name_func=build_test_name_func(args_to_skip=["reduction"]),
)
def test_tracin_xor(
self,
reduction: Optional[str],
tracin_constructor: Callable,
mode: str,
use_gpu: bool,
) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
batch_size = 4
net, dataset = self._test_tracin_xor_setup(tmpdir, use_gpu)
testset = F.normalize(torch.empty(100, 2).normal_(mean=0, std=0.5), dim=1)
mask = ~torch.logical_xor(testset[:, 0] > 0, testset[:, 1] > 0)
testlabels = (
torch.where(mask, torch.tensor(1), torch.tensor(-1))
.unsqueeze(1)
.float()
)
if use_gpu:
testset = testset.cuda()
testlabels = testlabels.cuda()
self.assertTrue(callable(tracin_constructor))
if mode == "check_idx":
self.assertTrue(isinstance(reduction, str))
criterion = nn.MSELoss(reduction=cast(str, reduction))
tracin = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
)
test_scores = tracin.influence((testset, testlabels))
idx = torch.argsort(test_scores, dim=1, descending=True)
# check that top 5 influences have matching binary classification
for i in range(len(idx)):
influence_labels = dataset.labels[idx[i][0:5], 0]
self.assertTrue(torch.all(testlabels[i, 0] == influence_labels))
if mode == "sample_wise_trick":
criterion = nn.MSELoss(reduction="none")
tracin = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
sample_wise_grads_per_batch=False,
)
# With sample-wise trick
criterion = nn.MSELoss(reduction="sum")
tracin_sample_wise_trick = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
sample_wise_grads_per_batch=True,
)
test_scores = tracin.influence((testset, testlabels))
test_scores_sample_wise_trick = tracin_sample_wise_trick.influence(
(testset, testlabels)
)
assertTensorAlmostEqual(
self, test_scores, test_scores_sample_wise_trick
)
|
import os
import tempfile
from typing import Callable, cast, Optional
import torch
import torch.nn as nn
from captum.influence._core.tracincp import TracInCP
from captum.influence._core.tracincp_fast_rand_proj import (
TracInCPFast,
TracInCPFastRandProj,
)
from parameterized import parameterized
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.influence._utils.common import (
_isSorted,
_wrap_model_in_dataparallel,
build_test_name_func,
CoefficientNet,
DataInfluenceConstructor,
IdentityDataset,
RangeDataset,
)
class TestTracInRegression(BaseTest):
def _test_tracin_regression_setup(
self, tmpdir: str, features: int, use_gpu: bool = False
):
low = 1
high = 17
dataset = RangeDataset(low, high, features, use_gpu)
net = CoefficientNet(in_features=features)
checkpoint_name = "-".join(["checkpoint-reg", "0" + ".pt"])
torch.save(net.state_dict(), os.path.join(tmpdir, checkpoint_name))
weights = [0.4379, 0.1653, 0.5132, 0.3651, 0.9992]
for i, weight in enumerate(weights):
net.fc1.weight.data.fill_(weight)
net_adjusted = _wrap_model_in_dataparallel(net) if use_gpu else net
checkpoint_name = "-".join(["checkpoint-reg", str(i + 1) + ".pt"])
torch.save(net_adjusted.state_dict(), os.path.join(tmpdir, checkpoint_name))
return dataset, net_adjusted
use_gpu_list = (
[True, False]
if torch.cuda.is_available() and torch.cuda.device_count() != 0
else [False]
)
param_list = []
for use_gpu in use_gpu_list:
for dim in [1, 20]:
for (mode, reduction, constructor) in [
(
"check_idx",
"none",
DataInfluenceConstructor(TracInCP, name="TracInCP_all_layers"),
),
(
"check_idx",
"none",
DataInfluenceConstructor(
TracInCP,
name="TracInCP_fc1",
layers=["module.fc1"] if use_gpu else ["fc1"],
),
),
(
"sample_wise_trick",
None,
DataInfluenceConstructor(TracInCP, name="TracInCP_fc1"),
),
(
"check_idx",
"sum",
DataInfluenceConstructor(
TracInCPFast, name="TracInCPFast_last_fc_layer"
),
),
(
"check_idx",
"sum",
DataInfluenceConstructor(
TracInCPFastRandProj, name="TracInCPFast_last_fc_layer"
),
),
(
"check_idx",
"mean",
DataInfluenceConstructor(
TracInCPFast, name="TracInCPFast_last_fc_layer"
),
),
(
"check_idx",
"mean",
DataInfluenceConstructor(
TracInCPFastRandProj, name="TracInCPFastRandProj_last_fc_layer"
),
),
(
"check_idx",
"sum",
DataInfluenceConstructor(
TracInCPFastRandProj,
name="TracInCPFastRandProj1DimensionalProjection_last_fc_layer",
projection_dim=1,
),
),
(
"check_idx",
"mean",
DataInfluenceConstructor(
TracInCPFast,
name="TracInCPFastDuplicateLossFn",
duplicate_loss_fn=True,
),
), # add a test where `duplicate_loss_fn` is True
(
"check_idx",
"mean",
DataInfluenceConstructor(
TracInCPFastRandProj,
name="TracInCPFastRandProjDuplicateLossFn",
duplicate_loss_fn=True,
), # add a test where `duplicate_loss_fn` is True
),
]:
if not (mode == "sample_wise_trick" and use_gpu):
param_list.append((reduction, constructor, mode, dim, use_gpu))
@parameterized.expand(
param_list,
name_func=build_test_name_func(args_to_skip=["reduction"]),
)
def test_tracin_regression(
self,
reduction: Optional[str],
tracin_constructor: Callable,
mode: str,
features: int,
use_gpu: bool,
) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
batch_size = 4
dataset, net = self._test_tracin_regression_setup(
tmpdir,
features,
use_gpu,
) # and not mode == 'sample_wise_trick'
# check influence scores of training data
train_inputs = dataset.samples
train_labels = dataset.labels
test_inputs = (
torch.arange(17, 33, dtype=torch.float).unsqueeze(1).repeat(1, features)
)
if use_gpu:
test_inputs = test_inputs.cuda()
test_labels = test_inputs
self.assertTrue(callable(tracin_constructor))
if mode == "check_idx":
self.assertTrue(isinstance(reduction, str))
criterion = nn.MSELoss(reduction=cast(str, reduction))
tracin = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
)
train_scores = tracin.influence((train_inputs, train_labels))
idx, _ = tracin.influence(
(train_inputs, train_labels), k=len(dataset), proponents=True
)
# check that top influence is one with maximal value
# (and hence gradient)
for i in range(len(idx)):
self.assertEqual(idx[i][0], 15)
# check influence scores of test data
test_scores = tracin.influence((test_inputs, test_labels))
idx, _ = tracin.influence(
(test_inputs, test_labels), k=len(test_inputs), proponents=True
)
# check that top influence is one with maximal value
# (and hence gradient)
for i in range(len(idx)):
self.assertTrue(_isSorted(idx[i]))
if mode == "sample_wise_trick":
criterion = nn.MSELoss(reduction="none")
tracin = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
sample_wise_grads_per_batch=False,
)
# With sample-wise trick
criterion = nn.MSELoss(reduction="sum")
tracin_sample_wise_trick = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
sample_wise_grads_per_batch=True,
)
train_scores = tracin.influence((train_inputs, train_labels))
train_scores_sample_wise_trick = tracin_sample_wise_trick.influence(
(train_inputs, train_labels)
)
assertTensorAlmostEqual(
self, train_scores, train_scores_sample_wise_trick
)
test_scores = tracin.influence((test_inputs, test_labels))
test_scores_sample_wise_trick = tracin_sample_wise_trick.influence(
(test_inputs, test_labels)
)
assertTensorAlmostEqual(
self, test_scores, test_scores_sample_wise_trick
)
@parameterized.expand(
[
(
"sum",
DataInfluenceConstructor(TracInCP, sample_wise_grads_per_batch=True),
),
("sum", DataInfluenceConstructor(TracInCPFast)),
("sum", DataInfluenceConstructor(TracInCPFastRandProj)),
("mean", DataInfluenceConstructor(TracInCPFast)),
("mean", DataInfluenceConstructor(TracInCPFastRandProj)),
],
name_func=build_test_name_func(),
)
def test_tracin_regression_1D_numerical(
self, reduction: str, tracin_constructor: Callable
) -> None:
low = 1
high = 17
features = 1
dataset = RangeDataset(low, high, features)
net = CoefficientNet()
self.assertTrue(isinstance(reduction, str))
criterion = nn.MSELoss(reduction=cast(str, reduction))
batch_size = 4
weights = [0.4379, 0.1653, 0.5132, 0.3651, 0.9992]
train_inputs = dataset.samples
train_labels = dataset.labels
with tempfile.TemporaryDirectory() as tmpdir:
for i, weight in enumerate(weights):
net.fc1.weight.data.fill_(weight)
checkpoint_name = "-".join(["checkpoint-reg", str(i + 1) + ".pt"])
torch.save(net.state_dict(), os.path.join(tmpdir, checkpoint_name))
self.assertTrue(callable(tracin_constructor))
tracin = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
)
train_scores = tracin.influence((train_inputs, train_labels), k=None)
r"""
Derivation for gradient / resulting TracIn score:
For each checkpoint: $y = Wx,$ and $loss = (y - label)^2.$ Recall for this
test case, there is no activation on y. For this example, $label = x.$
Fast Rand Proj gives $\nabla_W loss = \nabla_y loss (x^T).$ We have $x$ and
y as scalars so we can simply multiply. So then,
\[\nabla_y loss * x = 2(y-x)*x = 2(Wx -x)*x = 2x^2 (w - 1).\]
And we simply multiply these for x, x'. In this case, $x, x' \in [1..16]$.
"""
for i in range(train_scores.shape[0]):
for j in range(len(train_scores[0])):
_weights = torch.Tensor(weights)
num = 2 * (i + 1) * (i + 1) * (_weights - 1)
num *= 2 * (j + 1) * (j + 1) * (_weights - 1)
assertTensorAlmostEqual(
self, torch.sum(num), train_scores[i][j], delta=0.1
)
def _test_tracin_identity_regression_setup(self, tmpdir: str):
num_features = 7
dataset = IdentityDataset(num_features)
net = CoefficientNet()
num_checkpoints = 5
for i in range(num_checkpoints):
net.fc1.weight.data = torch.rand((1, num_features))
checkpoint_name = "-".join(["checkpoint-reg", str(i) + ".pt"])
torch.save(net.state_dict(), os.path.join(tmpdir, checkpoint_name))
return dataset, net
@parameterized.expand(
[
("check_idx", "none", DataInfluenceConstructor(TracInCP)),
("check_idx", "none", DataInfluenceConstructor(TracInCP, layers=["fc1"])),
("sample_wise_trick", None, DataInfluenceConstructor(TracInCP)),
(
"sample_wise_trick",
None,
DataInfluenceConstructor(TracInCP, layers=["fc1"]),
),
("check_idx", "sum", DataInfluenceConstructor(TracInCPFast)),
("check_idx", "sum", DataInfluenceConstructor(TracInCPFastRandProj)),
("check_idx", "mean", DataInfluenceConstructor(TracInCPFast)),
("check_idx", "mean", DataInfluenceConstructor(TracInCPFastRandProj)),
],
name_func=build_test_name_func(),
)
def test_tracin_identity_regression(
self, mode: str, reduction: Optional[str], tracin_constructor: Callable
) -> None:
"""
This test uses a linear model with positive coefficients, where input feature
matrix is the identity matrix. Since the dot product between 2 different
training instances is always 0, when calculating influence scores on the
training data, only self influence scores will be nonzero. Since the linear
model has positive coefficients, self influence scores will be positive.
Thus, the training instance with the largest influence on another training
instance is itself.
"""
with tempfile.TemporaryDirectory() as tmpdir:
batch_size = 4
dataset, net = self._test_tracin_identity_regression_setup(tmpdir)
train_inputs = dataset.samples
train_labels = dataset.labels
self.assertTrue(callable(tracin_constructor))
if mode == "check_idx":
self.assertTrue(isinstance(reduction, str))
criterion = nn.MSELoss(reduction=cast(str, reduction))
tracin = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
)
# check influence scores of training data
train_scores = tracin.influence((train_inputs, train_labels))
idx, _ = tracin.influence(
(train_inputs, train_labels), k=len(dataset), proponents=True
)
# check that top influence for an instance is itself
for i in range(len(idx)):
self.assertEqual(idx[i][0], i)
if mode == "sample_wise_trick":
criterion = nn.MSELoss(reduction="none")
tracin = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
sample_wise_grads_per_batch=False,
)
# With sample-wise trick
criterion = nn.MSELoss(reduction="sum")
tracin_sample_wise_trick = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
sample_wise_grads_per_batch=True,
)
train_scores = tracin.influence((train_inputs, train_labels))
train_scores_tracin_sample_wise_trick = (
tracin_sample_wise_trick.influence((train_inputs, train_labels))
)
assertTensorAlmostEqual(
self, train_scores, train_scores_tracin_sample_wise_trick
)
@parameterized.expand(
[
("none", "none", DataInfluenceConstructor(TracInCP)),
(
"mean",
"mean",
DataInfluenceConstructor(TracInCP, sample_wise_grads_per_batch=True),
),
("sum", "sum", DataInfluenceConstructor(TracInCPFast)),
("mean", "mean", DataInfluenceConstructor(TracInCPFast)),
("sum", "sum", DataInfluenceConstructor(TracInCPFastRandProj)),
("mean", "mean", DataInfluenceConstructor(TracInCPFastRandProj)),
],
name_func=build_test_name_func(),
)
def test_tracin_constant_test_loss_fn(
self,
reduction: Optional[str],
test_reduction: Optional[str],
tracin_constructor: Callable,
) -> None:
"""
All implementations of `TracInCPBase` can accept `test_loss_fn` in
initialization, which sets the loss function applied to test examples, which
can thus be different from the loss function applied to training examples.
This test passes `test_loss_fn` to be a constant function. Then, the influence
scores should all be 0, because gradients w.r.t. `test_loss_fn` will all be 0.
It re-uses the dataset and model from `test_tracin_identity_regression`.
The reduction for `loss_fn` and `test_loss_fn` initialization arguments is
the same for all parameterized tests, for simplicity, and also because for
`TracInCP`, both loss functions must both be reduction loss functions (i.e.
reduction is "mean" or "sum"), or both be per-example loss functions (i.e.
reduction is "none"). Recall that for `TracInCP`, the
`sample_wise_grads_per_batch` initialization argument determines which of
those cases holds.
"""
with tempfile.TemporaryDirectory() as tmpdir:
batch_size = 4
dataset, net = self._test_tracin_identity_regression_setup(tmpdir)
train_inputs = dataset.samples
train_labels = dataset.labels
self.assertTrue(callable(tracin_constructor))
self.assertTrue(isinstance(reduction, str))
criterion = nn.MSELoss(reduction=cast(str, reduction))
# the output of `net`, i.e. `input` for the loss functions below, is a
# batch_size x 1 2D tensor
if test_reduction == "none":
# loss function returns 1D tensor of all 0's, so is constant
def test_loss_fn(input, target):
return input.squeeze() * 0.0
elif test_reduction in ["sum", "mean"]:
# loss function returns scalar tensor of all 0's, so is constant
def test_loss_fn(input, target):
return input.mean() * 0.0
tracin = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
test_loss_fn=test_loss_fn,
)
# check influence scores of training data. they should all be 0
train_scores = tracin.influence((train_inputs, train_labels), k=None)
assertTensorAlmostEqual(self, train_scores, torch.zeros(train_scores.shape))
|
import io
import tempfile
import unittest
import unittest.mock
from typing import Callable
import torch.nn as nn
from captum.influence._core.tracincp import TracInCP
from captum.influence._core.tracincp_fast_rand_proj import TracInCPFast
from parameterized import parameterized
from tests.helpers.basic import BaseTest
from tests.influence._utils.common import (
build_test_name_func,
DataInfluenceConstructor,
get_random_model_and_data,
)
from torch.utils.data import DataLoader
class TestTracInShowProgress(BaseTest):
"""
This tests that the progress bar correctly shows a "100%" message at some point in
the relevant computations. Progress bars are shown for calls to the `influence`
method for all 3 modes. This is why 3 different modes are tested, and the mode
being tested is a parameter in the test. `TracInCPFastRandProj.influence` is not
tested, because none of its modes involve computations over the entire training
dataset, so that no progress bar is shown (the computation is instead done in
`TracInCPFastRandProj.__init__`. TODO: add progress bar for computations done
in `TracInCPFastRandProj.__init__`).
"""
def _check_error_msg_multiplicity(
self,
mock_stderr: io.StringIO,
msg: str,
msg_multiplicity: int,
greater_than: bool = True,
):
"""
Checks that in `mock_stderr`, the error msg `msg` occurs `msg_multiplicity`
times. If 'greater_than' is true, it checks that the `msg` occurs at least
`msg_multiplicity` times. Otherwise, it checks that `msg` occurs exactly
`msg_multiplicity` times. The reason to let `greater_than` as true by default
is that tqdm sometimes displays the "100%" more than once for each progress bar
because it may want to correct its estimation of it/s. In this case, the
tqdm could remove the original "100%" and then re-display "100%" with the
updated estimate of it/s.
"""
output = mock_stderr.getvalue()
actual_msg_multiplicity = output.count(msg)
assert isinstance(actual_msg_multiplicity, int)
error_msg = (
f"Error in progress of batches with output looking for '{msg}'"
f" at least {msg_multiplicity} times"
f"(found {actual_msg_multiplicity}) in {repr(output)}"
)
if greater_than:
self.assertGreaterEqual(
actual_msg_multiplicity, msg_multiplicity, error_msg
)
else:
self.assertEqual(
actual_msg_multiplicity,
msg_multiplicity,
error_msg,
)
@parameterized.expand(
[
(
reduction,
constr,
mode,
)
for reduction, constr in [
(
"none",
DataInfluenceConstructor(TracInCP),
),
(
"sum",
DataInfluenceConstructor(TracInCPFast),
),
]
for mode in [
"self influence by checkpoints",
"self influence by batches",
"influence",
"k-most",
]
],
name_func=build_test_name_func(args_to_skip=["reduction"]),
)
def test_tracin_show_progress(
self,
reduction: str,
tracin_constructor: Callable,
mode: str,
) -> None:
with unittest.mock.patch("sys.stderr", new_callable=io.StringIO) as mock_stderr:
with tempfile.TemporaryDirectory() as tmpdir:
batch_size = 5
(
net,
train_dataset,
test_samples,
test_labels,
) = get_random_model_and_data(
tmpdir, unpack_inputs=False, return_test_data=True
)
self.assertTrue(isinstance(reduction, str))
criterion = nn.MSELoss(reduction=reduction)
self.assertTrue(callable(tracin_constructor))
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
if mode == "self influence by checkpoints":
# this tests progress for computing self influence scores, when
# `outer_loop_by_checkpoints` is True. In this case, we should see a
# single outer progress bar over checkpoints, and for every
# checkpoints, a separate progress bar over batches
tracin.self_influence(
DataLoader(train_dataset, batch_size=batch_size),
show_progress=True,
outer_loop_by_checkpoints=True,
)
# We are showing nested progress bars for the `self_influence`
# method, with the outer progress bar over checkpoints, and
# the inner progress bar over batches. First, we check that
# the outer progress bar reaches 100% once
self._check_error_msg_multiplicity(
mock_stderr,
(
f"Using {tracin.get_name()} to compute self influence. "
"Processing checkpoint: 100%"
),
1,
)
# Second, we check that the inner progress bar reaches 100%
# once for each checkpoint in `tracin.checkpoints`
self._check_error_msg_multiplicity(
mock_stderr,
(
f"Using {tracin.get_name()} to compute self influence. "
"Processing batch: 100%"
),
len(tracin.checkpoints),
)
elif mode == "self influence by batches":
# This tests progress for computing self influence scores, when
# `outer_loop_by_checkpoints` is False. In this case, we should see
# a single outer progress bar over batches.
tracin.self_influence(
DataLoader(train_dataset, batch_size=batch_size),
show_progress=True,
outer_loop_by_checkpoints=False,
)
self._check_error_msg_multiplicity(
mock_stderr,
(
f"Using {tracin.get_name()} to compute self influence. "
"Processing batch: 100%"
),
1,
)
elif mode == "influence":
tracin.influence(
(test_samples, test_labels),
k=None,
show_progress=True,
)
# Since the computation iterates once over training batches, we
# check that the progress bar over batches reaches 100% once
self._check_error_msg_multiplicity(
mock_stderr,
(
f"Using {tracin.get_name()} to compute influence "
"for training batches: 100%"
),
1,
)
elif mode == "k-most":
tracin.influence(
(test_samples, test_labels),
k=2,
proponents=True,
show_progress=True,
)
# Since the computation iterates once over training batches, we
# check that the progress bar over batches reaches 100% once, and
# that the message is specific for finding proponents.
self._check_error_msg_multiplicity(
mock_stderr,
(
f"Using {tracin.get_name()} to perform computation for "
"getting proponents. Processing training batches: 100%"
),
1,
)
mock_stderr.seek(0)
mock_stderr.truncate(0)
tracin.influence(
(test_samples, test_labels),
k=2,
proponents=False,
show_progress=True,
)
# Since the computation iterates once over training batches, we
# check that the progress bar over batches reaches 100% once, and
# that the message is specific for finding opponents.
self._check_error_msg_multiplicity(
mock_stderr,
(
f"Using {tracin.get_name()} to perform computation for "
"getting opponents. Processing training batches: 100%"
),
1,
)
else:
raise Exception("unknown test mode")
mock_stderr.seek(0)
mock_stderr.truncate(0)
|
import tempfile
from typing import Callable
import torch
import torch.nn as nn
from captum.influence._core.tracincp import TracInCP
from captum.influence._core.tracincp_fast_rand_proj import (
TracInCPFast,
TracInCPFastRandProj,
)
from parameterized import parameterized
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.influence._utils.common import (
_format_batch_into_tuple,
build_test_name_func,
DataInfluenceConstructor,
get_random_model_and_data,
)
from torch.utils.data import DataLoader
class TestTracInIntermediateQuantities(BaseTest):
@parameterized.expand(
[
(reduction, constructor, unpack_inputs)
for unpack_inputs in [True, False]
for (reduction, constructor) in [
("none", DataInfluenceConstructor(TracInCP)),
]
],
name_func=build_test_name_func(),
)
def test_tracin_intermediate_quantities_aggregate(
self, reduction: str, tracin_constructor: Callable, unpack_inputs: bool
) -> None:
"""
tests that calling `compute_intermediate_quantities` with `aggregate=True`
does give the same result as calling it with `aggregate=False`, and then
summing
"""
with tempfile.TemporaryDirectory() as tmpdir:
(net, train_dataset,) = get_random_model_and_data(
tmpdir,
unpack_inputs,
return_test_data=False,
)
# create a dataloader that yields batches from the dataset
train_dataset = DataLoader(train_dataset, batch_size=5)
# create tracin instance
criterion = nn.MSELoss(reduction=reduction)
batch_size = 5
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
intermediate_quantities = tracin.compute_intermediate_quantities(
train_dataset, aggregate=False
)
aggregated_intermediate_quantities = tracin.compute_intermediate_quantities(
train_dataset, aggregate=True
)
assertTensorAlmostEqual(
self,
torch.sum(intermediate_quantities, dim=0, keepdim=True),
aggregated_intermediate_quantities,
delta=1e-4, # due to numerical issues, we can't set this to 0.0
mode="max",
)
@parameterized.expand(
[
(reduction, constructor, unpack_inputs)
for unpack_inputs in [True, False]
for (reduction, constructor) in [
("sum", DataInfluenceConstructor(TracInCPFastRandProj)),
("none", DataInfluenceConstructor(TracInCP)),
]
],
name_func=build_test_name_func(),
)
def test_tracin_intermediate_quantities_api(
self, reduction: str, tracin_constructor: Callable, unpack_inputs: bool
) -> None:
"""
tests that the result of calling the public method
`compute_intermediate_quantities` for a DataLoader of batches is the same as
when the batches are collated into a single batch
"""
with tempfile.TemporaryDirectory() as tmpdir:
(net, train_dataset,) = get_random_model_and_data(
tmpdir,
unpack_inputs,
return_test_data=False,
)
# create a single batch representing the entire dataset
single_batch = next(
iter(DataLoader(train_dataset, batch_size=len(train_dataset)))
)
# create a dataloader that yields batches from the dataset
dataloader = DataLoader(train_dataset, batch_size=5)
# create tracin instance
criterion = nn.MSELoss(reduction=reduction)
batch_size = 5
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
# compute intermediate quantities using `compute_intermediate_quantities`
# when passing in a single batch
single_batch_intermediate_quantities = (
tracin.compute_intermediate_quantities(single_batch)
)
# compute intermediate quantities using `compute_intermediate_quantities`
# when passing in a dataloader with the same examples
dataloader_intermediate_quantities = tracin.compute_intermediate_quantities(
dataloader,
)
# the two self influences should be equal
assertTensorAlmostEqual(
self,
single_batch_intermediate_quantities,
dataloader_intermediate_quantities,
delta=0.01, # due to numerical issues, we can't set this to 0.0
mode="max",
)
@parameterized.expand(
[
(
reduction,
constructor,
intermediate_quantities_tracin_constructor,
unpack_inputs,
)
for unpack_inputs in [True, False]
for (
reduction,
constructor,
intermediate_quantities_tracin_constructor,
) in [
(
"sum",
DataInfluenceConstructor(TracInCPFast),
DataInfluenceConstructor(TracInCPFastRandProj),
),
(
"none",
DataInfluenceConstructor(TracInCP),
DataInfluenceConstructor(TracInCP),
),
]
],
name_func=build_test_name_func(),
)
def test_tracin_intermediate_quantities_consistent(
self,
reduction: str,
tracin_constructor: Callable,
intermediate_quantities_tracin_constructor: Callable,
unpack_inputs: bool,
) -> None:
"""
Since the influence score of a test batch on a training data should be the dot
product of their intermediate quantities, checks that this is the case, by
computing the influence score 2 different ways and checking they give the same
results: 1) with the `influence` method, and by using the
`compute_intermediate_quantities` method on the test and training data, and
taking the dot product. No projection should be done. Otherwise, the
projection will cause error. For 1), we use an implementation that does not use
intermediate quantities, i.e. `TracInCPFast`. For 2), we use a method that
does use intermediate quantities, i.e. `TracInCPFastRandProj`. Since the
methods for the 2 cases are different, we need to parametrize the test with 2
different tracin constructors. `tracin_constructor` is the constructor for the
tracin implementation for case 1. `intermediate_quantities_tracin_constructor`
is the constructor for the tracin implementation for case 2.
"""
with tempfile.TemporaryDirectory() as tmpdir:
(
net,
train_dataset,
test_features,
test_labels,
) = get_random_model_and_data(tmpdir, unpack_inputs, return_test_data=True)
# create a dataloader that yields batches from the dataset
train_dataset = DataLoader(train_dataset, batch_size=5)
# create tracin instance
criterion = nn.MSELoss(reduction=reduction)
batch_size = 5
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
# create tracin instance which exposes `intermediate_quantities`
intermediate_quantities_tracin = intermediate_quantities_tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
# compute influence scores without using `compute_intermediate_quantities`
test_batch = _format_batch_into_tuple(
test_features, test_labels, unpack_inputs
)
scores = tracin.influence(
test_batch,
)
# the influence score is the dot product of intermediate quantities
intermediate_quantities_scores = torch.matmul(
intermediate_quantities_tracin.compute_intermediate_quantities(
test_batch
),
intermediate_quantities_tracin.compute_intermediate_quantities(
train_dataset
).T,
)
# the scores computed using the two methods should be the same
assertTensorAlmostEqual(
self,
scores,
intermediate_quantities_scores,
delta=0.01, # due to numerical issues, we can't set this to 0.0
mode="max",
)
@parameterized.expand(
[
(reduction, constructor, projection_dim, unpack_inputs)
for unpack_inputs in [False]
for (reduction, constructor, projection_dim) in [
("sum", DataInfluenceConstructor(TracInCPFastRandProj), None),
("sum", DataInfluenceConstructor(TracInCPFastRandProj), 2),
("sum", DataInfluenceConstructor(TracInCPFastRandProj), 4),
("sum", DataInfluenceConstructor(TracInCPFastRandProj), 9),
("sum", DataInfluenceConstructor(TracInCPFastRandProj), 10),
("sum", DataInfluenceConstructor(TracInCPFastRandProj), 12),
]
],
name_func=build_test_name_func(),
)
def test_tracin_intermediate_quantities_projection_consistency(
self,
reduction: str,
tracin_constructor: Callable,
projection_dim: int,
unpack_inputs: bool,
) -> None:
"""
tests that the result of calling the public method
"compute_intermediate_quantities" with TracInCPFastRandProj
with/without projection_dim gives embedding of correct size.
if projection_dim None, size should be dim of
input to final layer * num classes * num checkpoints.
otherwise it should be "at most" projection_dim * num checkpoints.
See inline comments for "at most" caveat
"""
with tempfile.TemporaryDirectory() as tmpdir:
(net, train_dataset,) = get_random_model_and_data(
tmpdir,
unpack_inputs,
return_test_data=False,
)
# create a single batch
batch_size = 1
single_batch = next(iter(DataLoader(train_dataset, batch_size=batch_size)))
# NOW add projection_dim as a parameter passed in
kwargs = {"projection_dim": projection_dim}
# create tracin instance
criterion = nn.MSELoss(reduction=reduction)
tracin = tracin_constructor(
net, train_dataset, tmpdir, batch_size, criterion, **kwargs
)
# compute intermediate quantities using `compute_intermediate_quantities`
# when passing in a single batch
single_batch_intermediate_quantities = (
tracin.compute_intermediate_quantities(single_batch)
)
"""
net has
in_features = 5,
hidden_nodes (layer_input_dim) = 4,
out_features (jacobian_dim) = 3
and 5 checkpoints
projection only happens
(A) if project_dim < layer_input_dim * jacobian_dim ( 4 * 3 = 12 here )
also if jacobian_dim < int(sqrt(projection dim)),
then jacobian_dim is not projected down
similarly if layer_input_dim < int(sqrt(projection dim)),
then it is not projected down
in other words,
jacobian_dim_post = min(jacobian_dim, int(sqrt(projection dim)))
layer_input_dim_post = min(layer_input_dim, int(sqrt(projection dim)))
and if not None and projection_dim < layer_input_dim * jacobian_dim
(B) final_projection_dim =
jacobian_dim_post * layer_input_dim_post * num_checkpoints
if project dim = None we expect final dimension size of
layer_input * jacobian_dim * num checkpoints = 4 * 3 * 5 = 60 dimension
otherwise using (B) if
project dim = 2 we expect 1 * 1 * 5 = 5
project dim = 4 we expect 2 * 2 * 5 = 20
project dim = 9 we expect 3 * 3 * 5 = 45
project dim = 10 we expect 3 * 3 * 5 = 45
project dim = 12 we expect 4 * 3 * 5 = 60 ( don't project since not (A))
"""
# print(single_batch_intermediate_quantities.shape)
expected_dim = {None: 60, 2: 5, 4: 20, 9: 45, 10: 45, 12: 60}
self.assertEqual(
expected_dim[projection_dim],
single_batch_intermediate_quantities.shape[1],
)
|
import tempfile
from typing import List
import torch
import torch.nn as nn
from captum.influence._core.similarity_influence import (
cosine_similarity,
euclidean_distance,
SimilarityInfluence,
)
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from torch.utils.data import Dataset
class BasicLinearNet(nn.Module):
def __init__(self, num_features) -> None:
super().__init__()
self.fc1 = nn.Linear(num_features, 5, bias=False)
self.fc1.weight.data.fill_(0.02)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(5, 1, bias=False)
self.fc2.weight.data.fill_(0.02)
def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)
x = self.fc2(x)
return x
class RangeDataset(Dataset):
def __init__(self, low, high, num_features) -> None:
self.samples = (
torch.arange(start=low, end=high, dtype=torch.float)
.repeat(num_features, 1)
.transpose(1, 0)
)
def __len__(self) -> int:
return len(self.samples)
def __getitem__(self, idx):
return self.samples[idx]
class Test(BaseTest):
def test_cosine_with_zeros(self) -> None:
a = torch.cat((torch.zeros((1, 3, 16, 16)), torch.rand((1, 3, 16, 16))))
b = torch.rand((2, 3, 16, 16))
similarity = cosine_similarity(a, b)
self.assertFalse(torch.any(torch.isnan(similarity)))
def test_correct_influences_standard(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
num_features = 4
low, high = 0, 16
batch_size = high // 2
mymodel = BasicLinearNet(num_features)
mydata = RangeDataset(low, high, num_features)
layers = []
for name, _module in mymodel.named_modules():
layers.append(name)
layers: List[str] = list(filter(None, layers))
testlayers = layers[1:]
sim = SimilarityInfluence(
mymodel,
testlayers,
mydata,
tmpdir,
"linear",
batch_size=batch_size,
similarity_metric=euclidean_distance,
similarity_direction="min",
)
inputs = torch.stack((mydata[1], mydata[8], mydata[14]))
influences = sim.influence(inputs, top_k=3)
self.assertEqual(len(influences), len(testlayers))
assertTensorAlmostEqual(
self,
torch.sum(influences[layers[1]][0], 1),
torch.sum(torch.Tensor([[1, 0, 2], [8, 7, 9], [14, 15, 13]]), 1),
)
assertTensorAlmostEqual(
self,
torch.sum(influences[layers[2]][0], 1),
torch.sum(torch.Tensor([[1, 0, 2], [8, 7, 9], [14, 15, 13]]), 1),
)
def test_correct_influences_batch_single(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
num_features = 4
low, high = 0, 16
batch_size = 1
mymodel = BasicLinearNet(num_features)
mydata = RangeDataset(low, high, num_features)
layers = []
for name, _module in mymodel.named_modules():
layers.append(name)
layers: List[str] = list(filter(None, layers))
testlayers = layers[1:]
sim = SimilarityInfluence(
mymodel,
testlayers,
mydata,
tmpdir,
"linear",
batch_size=batch_size,
similarity_metric=euclidean_distance,
similarity_direction="min",
)
inputs = torch.stack((mydata[1], mydata[8], mydata[14]))
influences = sim.influence(inputs, top_k=3)
self.assertEqual(len(influences), len(testlayers))
assertTensorAlmostEqual(
self,
torch.sum(influences[layers[1]][0], 1),
torch.sum(torch.Tensor([[1, 0, 2], [8, 7, 9], [14, 15, 13]]), 1),
)
assertTensorAlmostEqual(
self,
torch.sum(influences[layers[2]][0], 1),
torch.sum(torch.Tensor([[1, 0, 2], [8, 7, 9], [14, 15, 13]]), 1),
)
def test_correct_influences_batch_overflow(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
num_features = 4
low, high = 0, 16
batch_size = 12
mymodel = BasicLinearNet(num_features)
mydata = RangeDataset(low, high, num_features)
layers = []
for name, _module in mymodel.named_modules():
layers.append(name)
layers: List[str] = list(filter(None, layers))
testlayers = layers[1:]
sim = SimilarityInfluence(
mymodel,
testlayers,
mydata,
tmpdir,
"linear",
batch_size=batch_size,
similarity_metric=euclidean_distance,
similarity_direction="min",
)
inputs = torch.stack((mydata[1], mydata[8], mydata[14]))
influences = sim.influence(inputs, top_k=3)
self.assertEqual(len(influences), len(testlayers))
assertTensorAlmostEqual(
self,
torch.sum(influences[layers[1]][0], 1),
torch.sum(torch.Tensor([[1, 0, 2], [8, 7, 9], [14, 15, 13]]), 1),
)
assertTensorAlmostEqual(
self,
torch.sum(influences[layers[2]][0], 1),
torch.sum(torch.Tensor([[1, 0, 2], [8, 7, 9], [14, 15, 13]]), 1),
)
def test_zero_activations(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
num_features = 4
low, high = 0, 16
batch_size = high // 2
mymodel = BasicLinearNet(num_features)
mydata = RangeDataset(low, high, num_features)
layers = []
for name, _module in mymodel.named_modules():
layers.append(name)
layers: List[str] = list(filter(None, layers))
testlayers = layers[1:]
sim1 = SimilarityInfluence(
mymodel, testlayers, mydata, tmpdir, "linear", batch_size=batch_size
)
inputs = torch.stack((mydata[1], mydata[8], mydata[14]))
influences = sim1.influence(inputs, top_k=3)
self.assertEqual(len(influences), len(layers[1:]) + 1) # zero_acts included
self.assertTrue("zero_acts-fc2" in influences)
|
#!/usr/bin/env fbpython
# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.
import unittest
import torch
from captum.module.gaussian_stochastic_gates import GaussianStochasticGates
from parameterized import parameterized_class
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
@parameterized_class(
[
{"testing_device": "cpu"},
{"testing_device": "cuda"},
]
)
class TestGaussianStochasticGates(BaseTest):
def setUp(self) -> None:
super().setUp()
if self.testing_device == "cuda" and not torch.cuda.is_available():
raise unittest.SkipTest("Skipping GPU test since CUDA not available.")
def test_gstg_1d_input(self) -> None:
dim = 3
gstg = GaussianStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
gated_input, reg = gstg(input_tensor)
expected_reg = 2.5213
if self.testing_device == "cpu":
expected_gated_input = [[0.0000, 0.0198, 0.1483], [0.1848, 0.3402, 0.1782]]
elif self.testing_device == "cuda":
expected_gated_input = [[0.0000, 0.0788, 0.0470], [0.0134, 0.0000, 0.1884]]
assertTensorAlmostEqual(self, gated_input, expected_gated_input, mode="max")
assertTensorAlmostEqual(self, reg, expected_reg)
def test_gstg_1d_input_with_reg_reduction(self) -> None:
dim = 3
mean_gstg = GaussianStochasticGates(dim, reg_reduction="mean").to(
self.testing_device
)
none_gstg = GaussianStochasticGates(dim, reg_reduction="none").to(
self.testing_device
)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
_, mean_reg = mean_gstg(input_tensor)
_, none_reg = none_gstg(input_tensor)
expected_mean_reg = 0.8404
expected_none_reg = torch.tensor([0.8424, 0.8384, 0.8438])
assertTensorAlmostEqual(self, mean_reg, expected_mean_reg)
assertTensorAlmostEqual(self, none_reg, expected_none_reg)
def test_gstg_1d_input_with_n_gates_error(self) -> None:
dim = 3
gstg = GaussianStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor([0.0, 0.1, 0.2]).to(self.testing_device)
with self.assertRaises(AssertionError):
gstg(input_tensor)
def test_gstg_1d_input_with_mask(self) -> None:
dim = 2
mask = torch.tensor([0, 0, 1]).to(self.testing_device)
gstg = GaussianStochasticGates(dim, mask=mask).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
gated_input, reg = gstg(input_tensor)
expected_reg = 1.6849
if self.testing_device == "cpu":
expected_gated_input = [[0.0000, 0.0000, 0.1225], [0.0583, 0.0777, 0.3779]]
elif self.testing_device == "cuda":
expected_gated_input = [[0.0000, 0.0000, 0.1577], [0.0736, 0.0981, 0.0242]]
assertTensorAlmostEqual(self, gated_input, expected_gated_input, mode="max")
assertTensorAlmostEqual(self, reg, expected_reg)
def test_gates_values_matching_dim_when_eval(self) -> None:
dim = 3
gstg = GaussianStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
gstg.train(False)
gated_input, reg = gstg(input_tensor)
assert gated_input.shape == input_tensor.shape
def test_gstg_2d_input(self) -> None:
dim = 3 * 2
gstg = GaussianStochasticGates(dim).to(self.testing_device)
# shape(2,3,2)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
gated_input, reg = gstg(input_tensor)
expected_reg = 5.0458
if self.testing_device == "cpu":
expected_gated_input = [
[[0.0000, 0.0851], [0.0713, 0.3000], [0.2180, 0.1878]],
[[0.2538, 0.0000], [0.3391, 0.8501], [0.3633, 0.8913]],
]
elif self.testing_device == "cuda":
expected_gated_input = [
[[0.0000, 0.0788], [0.0470, 0.0139], [0.0000, 0.1960]],
[[0.0000, 0.7000], [0.1052, 0.2120], [0.5978, 0.0166]],
]
assertTensorAlmostEqual(self, gated_input, expected_gated_input, mode="max")
assertTensorAlmostEqual(self, reg, expected_reg)
def test_gstg_2d_input_with_n_gates_error(self) -> None:
dim = 5
gstg = GaussianStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
]
).to(self.testing_device)
with self.assertRaises(AssertionError):
gstg(input_tensor)
def test_gstg_2d_input_with_mask(self) -> None:
dim = 3
mask = torch.tensor(
[
[0, 1],
[1, 1],
[0, 2],
]
).to(self.testing_device)
gstg = GaussianStochasticGates(dim, mask=mask).to(self.testing_device)
# shape(2,3,2)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
gated_input, reg = gstg(input_tensor)
expected_reg = 2.5213
if self.testing_device == "cpu":
expected_gated_input = [
[[0.0000, 0.0198], [0.0396, 0.0594], [0.2435, 0.3708]],
[[0.3696, 0.5954], [0.6805, 0.7655], [0.6159, 0.3921]],
]
elif self.testing_device == "cuda":
expected_gated_input = [
[[0.0000, 0.0788], [0.1577, 0.2365], [0.0000, 0.1174]],
[[0.0269, 0.0000], [0.0000, 0.0000], [0.0448, 0.4145]],
]
assertTensorAlmostEqual(self, gated_input, expected_gated_input, mode="max")
assertTensorAlmostEqual(self, reg, expected_reg)
def test_get_gate_values_1d_input(self) -> None:
dim = 3
gstg = GaussianStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
gstg(input_tensor)
gate_values = gstg.get_gate_values()
expected_gate_values = [0.5005, 0.5040, 0.4899]
assertTensorAlmostEqual(self, gate_values, expected_gate_values, mode="max")
def test_get_gate_values_1d_input_with_mask(self) -> None:
dim = 2
mask = torch.tensor([0, 1, 1])
gstg = GaussianStochasticGates(dim, mask=mask).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
gstg(input_tensor)
gate_values = gstg.get_gate_values()
expected_gate_values = [0.5005, 0.5040]
assertTensorAlmostEqual(self, gate_values, expected_gate_values, mode="max")
def test_get_gate_values_2d_input(self) -> None:
dim = 3 * 2
gstg = GaussianStochasticGates(dim).to(self.testing_device)
# shape(2,3,2)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
gstg(input_tensor)
gate_values = gstg.get_gate_values()
expected_gate_values = [0.5005, 0.5040, 0.4899, 0.5022, 0.4939, 0.5050]
assertTensorAlmostEqual(self, gate_values, expected_gate_values, mode="max")
def test_get_gate_values_2d_input_with_mask(self) -> None:
dim = 3
mask = torch.tensor(
[
[0, 1],
[1, 1],
[0, 2],
]
)
gstg = GaussianStochasticGates(dim, mask=mask).to(self.testing_device)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
gstg(input_tensor)
gate_values = gstg.get_gate_values()
expected_gate_values = [0.5005, 0.5040, 0.4899]
assertTensorAlmostEqual(self, gate_values, expected_gate_values, mode="max")
def test_get_gate_values_clamp(self) -> None:
gstg = GaussianStochasticGates._from_pretrained(
torch.tensor([2.0, -2.0, 2.0])
).to(self.testing_device)
clamped_gate_values = gstg.get_gate_values().cpu().tolist()
assert clamped_gate_values == [1.0, 0.0, 1.0]
unclamped_gate_values = gstg.get_gate_values(clamp=False).cpu().tolist()
assert (
unclamped_gate_values[0] > 1
and unclamped_gate_values[1] < 0
and unclamped_gate_values[2] > 1
)
def test_get_gate_active_probs_1d_input(self) -> None:
dim = 3
gstg = GaussianStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
gstg(input_tensor)
gate_active_probs = gstg.get_gate_active_probs()
expected_gate_active_probs = [0.8416, 0.8433, 0.8364]
assertTensorAlmostEqual(
self, gate_active_probs, expected_gate_active_probs, mode="max"
)
def test_get_gate_active_probs_1d_input_with_mask(self) -> None:
dim = 2
mask = torch.tensor([0, 1, 1])
gstg = GaussianStochasticGates(dim, mask=mask).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
gstg(input_tensor)
gate_active_probs = gstg.get_gate_active_probs()
expected_gate_active_probs = [0.8416, 0.8433]
assertTensorAlmostEqual(
self, gate_active_probs, expected_gate_active_probs, mode="max"
)
def test_get_gate_active_probs_2d_input(self) -> None:
dim = 3 * 2
gstg = GaussianStochasticGates(dim).to(self.testing_device)
# shape(2,3,2)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
gstg(input_tensor)
gate_active_probs = gstg.get_gate_active_probs()
expected_gate_active_probs = [0.8416, 0.8433, 0.8364, 0.8424, 0.8384, 0.8438]
assertTensorAlmostEqual(
self, gate_active_probs, expected_gate_active_probs, mode="max"
)
def test_get_gate_active_probs_2d_input_with_mask(self) -> None:
dim = 3
mask = torch.tensor(
[
[0, 1],
[1, 1],
[0, 2],
]
)
gstg = GaussianStochasticGates(dim, mask=mask).to(self.testing_device)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
gstg(input_tensor)
gate_active_probs = gstg.get_gate_active_probs()
expected_gate_active_probs = [0.8416, 0.8433, 0.8364]
assertTensorAlmostEqual(
self, gate_active_probs, expected_gate_active_probs, mode="max"
)
def test_from_pretrained(self) -> None:
mu = torch.tensor([0.1, 0.2, 0.3, 0.4])
kwargs = {
"mask": torch.tensor([0, 1, 1, 0, 2, 3]),
"reg_weight": 0.1,
"std": 0.01,
}
stg = GaussianStochasticGates._from_pretrained(mu, **kwargs)
for key, expected_val in kwargs.items():
val = getattr(stg, key)
if isinstance(expected_val, torch.Tensor):
assertTensorAlmostEqual(self, val, expected_val, mode="max")
else:
assert val == expected_val
|
#!/usr/bin/env python3
import unittest
import torch
from captum.module.binary_concrete_stochastic_gates import BinaryConcreteStochasticGates
from parameterized import parameterized_class
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
@parameterized_class(
[
{"testing_device": "cpu"},
{"testing_device": "cuda"},
]
)
class TestBinaryConcreteStochasticGates(BaseTest):
def setUp(self):
super().setUp()
if self.testing_device == "cuda" and not torch.cuda.is_available():
raise unittest.SkipTest("Skipping GPU test since CUDA not available.")
def test_bcstg_1d_input(self) -> None:
dim = 3
bcstg = BinaryConcreteStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
gated_input, reg = bcstg(input_tensor)
expected_reg = 2.4947
if self.testing_device == "cpu":
expected_gated_input = [[0.0000, 0.0212, 0.1892], [0.1839, 0.3753, 0.4937]]
elif self.testing_device == "cuda":
expected_gated_input = [[0.0000, 0.0985, 0.1149], [0.2329, 0.0497, 0.5000]]
assertTensorAlmostEqual(self, gated_input, expected_gated_input, mode="max")
assertTensorAlmostEqual(self, reg, expected_reg)
def test_bcstg_1d_input_with_reg_reduction(self) -> None:
dim = 3
mean_bcstg = BinaryConcreteStochasticGates(dim, reg_reduction="mean").to(
self.testing_device
)
none_bcstg = BinaryConcreteStochasticGates(dim, reg_reduction="none").to(
self.testing_device
)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
mean_gated_input, mean_reg = mean_bcstg(input_tensor)
none_gated_input, none_reg = none_bcstg(input_tensor)
expected_mean_reg = 0.8316
expected_none_reg = torch.tensor([0.8321, 0.8310, 0.8325])
assertTensorAlmostEqual(self, mean_reg, expected_mean_reg)
assertTensorAlmostEqual(self, none_reg, expected_none_reg)
def test_bcstg_1d_input_with_n_gates_error(self) -> None:
dim = 3
bcstg = BinaryConcreteStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor([0.0, 0.1, 0.2]).to(self.testing_device)
with self.assertRaises(AssertionError):
bcstg(input_tensor)
def test_bcstg_num_mask_not_equal_dim_error(self) -> None:
dim = 3
mask = torch.tensor([0, 0, 1]) # only two distinct masks, but given dim is 3
with self.assertRaises(AssertionError):
BinaryConcreteStochasticGates(dim, mask=mask).to(self.testing_device)
def test_gates_values_matching_dim_when_eval(self) -> None:
dim = 3
bcstg = BinaryConcreteStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
bcstg.train(False)
gated_input, reg = bcstg(input_tensor)
assert gated_input.shape == input_tensor.shape
def test_bcstg_1d_input_with_mask(self) -> None:
dim = 2
mask = torch.tensor([0, 0, 1]).to(self.testing_device)
bcstg = BinaryConcreteStochasticGates(dim, mask=mask).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
gated_input, reg = bcstg(input_tensor)
expected_reg = 1.6643
if self.testing_device == "cpu":
expected_gated_input = [[0.0000, 0.0000, 0.1679], [0.0000, 0.0000, 0.2223]]
elif self.testing_device == "cuda":
expected_gated_input = [[0.0000, 0.0000, 0.1971], [0.1737, 0.2317, 0.3888]]
assertTensorAlmostEqual(self, gated_input, expected_gated_input, mode="max")
assertTensorAlmostEqual(self, reg, expected_reg)
def test_bcstg_2d_input(self) -> None:
dim = 3 * 2
bcstg = BinaryConcreteStochasticGates(dim).to(self.testing_device)
# shape(2,3,2)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
gated_input, reg = bcstg(input_tensor)
expected_reg = 4.9903
if self.testing_device == "cpu":
expected_gated_input = [
[[0.0000, 0.0990], [0.0261, 0.2431], [0.0551, 0.3863]],
[[0.0476, 0.6177], [0.5400, 0.1530], [0.0984, 0.8013]],
]
elif self.testing_device == "cuda":
expected_gated_input = [
[[0.0000, 0.0985], [0.1149, 0.2331], [0.0486, 0.5000]],
[[0.1840, 0.1571], [0.4612, 0.7937], [0.2975, 0.7393]],
]
assertTensorAlmostEqual(self, gated_input, expected_gated_input, mode="max")
assertTensorAlmostEqual(self, reg, expected_reg)
def test_bcstg_2d_input_with_n_gates_error(self) -> None:
dim = 5
bcstg = BinaryConcreteStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
]
).to(self.testing_device)
with self.assertRaises(AssertionError):
bcstg(input_tensor)
def test_bcstg_2d_input_with_mask(self) -> None:
dim = 3
mask = torch.tensor(
[
[0, 1],
[1, 1],
[0, 2],
]
).to(self.testing_device)
bcstg = BinaryConcreteStochasticGates(dim, mask=mask).to(self.testing_device)
# shape(2,3,2)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
gated_input, reg = bcstg(input_tensor)
expected_reg = 2.4947
if self.testing_device == "cpu":
expected_gated_input = [
[[0.0000, 0.0212], [0.0424, 0.0636], [0.3191, 0.4730]],
[[0.3678, 0.6568], [0.7507, 0.8445], [0.6130, 1.0861]],
]
elif self.testing_device == "cuda":
expected_gated_input = [
[[0.0000, 0.0985], [0.1971, 0.2956], [0.0000, 0.2872]],
[[0.4658, 0.0870], [0.0994, 0.1119], [0.7764, 1.1000]],
]
assertTensorAlmostEqual(self, gated_input, expected_gated_input, mode="max")
assertTensorAlmostEqual(self, reg, expected_reg)
def test_get_gate_values_1d_input(self) -> None:
dim = 3
bcstg = BinaryConcreteStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
bcstg(input_tensor)
gate_values = bcstg.get_gate_values()
expected_gate_values = [0.5001, 0.5012, 0.4970]
assertTensorAlmostEqual(self, gate_values, expected_gate_values, mode="max")
def test_get_gate_values_1d_input_with_mask(self) -> None:
dim = 2
mask = torch.tensor([0, 1, 1])
bcstg = BinaryConcreteStochasticGates(dim, mask=mask).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
bcstg(input_tensor)
gate_values = bcstg.get_gate_values()
expected_gate_values = [0.5001, 0.5012]
assertTensorAlmostEqual(self, gate_values, expected_gate_values, mode="max")
def test_get_gate_values_2d_input(self) -> None:
dim = 3 * 2
bcstg = BinaryConcreteStochasticGates(dim).to(self.testing_device)
# shape(2,3,2)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
bcstg(input_tensor)
gate_values = bcstg.get_gate_values()
expected_gate_values = [0.5001, 0.5012, 0.4970, 0.5007, 0.4982, 0.5015]
assertTensorAlmostEqual(self, gate_values, expected_gate_values, mode="max")
def test_get_gate_values_clamp(self) -> None:
# enlarge the bounds & extremify log_alpha to mock gate values beyond 0 & 1
bcstg = BinaryConcreteStochasticGates._from_pretrained(
torch.tensor([10.0, -10.0, 10.0]), lower_bound=-2, upper_bound=2
).to(self.testing_device)
clamped_gate_values = bcstg.get_gate_values().cpu().tolist()
assert clamped_gate_values == [1.0, 0.0, 1.0]
unclamped_gate_values = bcstg.get_gate_values(clamp=False).cpu().tolist()
assert (
unclamped_gate_values[0] > 1
and unclamped_gate_values[1] < 0
and unclamped_gate_values[2] > 1
)
def test_get_gate_values_2d_input_with_mask(self) -> None:
dim = 3
mask = torch.tensor(
[
[0, 1],
[1, 1],
[0, 2],
]
)
bcstg = BinaryConcreteStochasticGates(dim, mask=mask).to(self.testing_device)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
bcstg(input_tensor)
gate_values = bcstg.get_gate_values()
expected_gate_values = [0.5001, 0.5012, 0.4970]
assertTensorAlmostEqual(self, gate_values, expected_gate_values, mode="max")
def test_get_gate_active_probs_1d_input(self) -> None:
dim = 3
bcstg = BinaryConcreteStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
bcstg(input_tensor)
gate_active_probs = bcstg.get_gate_active_probs()
expected_gate_active_probs = [0.8319, 0.8324, 0.8304]
assertTensorAlmostEqual(
self, gate_active_probs, expected_gate_active_probs, mode="max"
)
def test_get_gate_active_probs_1d_input_with_mask(self) -> None:
dim = 2
mask = torch.tensor([0, 1, 1])
bcstg = BinaryConcreteStochasticGates(dim, mask=mask).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
bcstg(input_tensor)
gate_active_probs = bcstg.get_gate_active_probs()
expected_gate_active_probs = [0.8319, 0.8324]
assertTensorAlmostEqual(
self, gate_active_probs, expected_gate_active_probs, mode="max"
)
def test_get_gate_active_probs_2d_input(self) -> None:
dim = 3 * 2
bcstg = BinaryConcreteStochasticGates(dim).to(self.testing_device)
# shape(2,3,2)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
bcstg(input_tensor)
gate_active_probs = bcstg.get_gate_active_probs()
expected_gate_active_probs = [0.8319, 0.8324, 0.8304, 0.8321, 0.8310, 0.8325]
assertTensorAlmostEqual(
self, gate_active_probs, expected_gate_active_probs, mode="max"
)
def test_get_gate_active_probs_2d_input_with_mask(self) -> None:
dim = 3
mask = torch.tensor(
[
[0, 1],
[1, 1],
[0, 2],
]
)
bcstg = BinaryConcreteStochasticGates(dim, mask=mask).to(self.testing_device)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
bcstg(input_tensor)
gate_active_probs = bcstg.get_gate_active_probs()
expected_gate_active_probs = [0.8319, 0.8324, 0.8304]
assertTensorAlmostEqual(
self, gate_active_probs, expected_gate_active_probs, mode="max"
)
def test_from_pretrained(self) -> None:
log_alpha_param = torch.tensor([0.1, 0.2, 0.3, 0.4])
kwargs = {
"mask": torch.tensor([0, 1, 1, 0, 2, 3]),
"reg_weight": 0.1,
"lower_bound": -0.2,
"upper_bound": 1.2,
}
stg = BinaryConcreteStochasticGates._from_pretrained(log_alpha_param, **kwargs)
for key, expected_val in kwargs.items():
val = getattr(stg, key)
if isinstance(expected_val, torch.Tensor):
assertTensorAlmostEqual(self, val, expected_val, mode="max")
else:
assert val == expected_val
|
#!/usr/bin/env python3
from typing import List, Tuple
import torch
from captum._utils.gradient import (
apply_gradient_requirements,
compute_gradients,
compute_layer_gradients_and_eval,
undo_gradient_requirements,
)
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel,
BasicModel2,
BasicModel4_MultiArgs,
BasicModel5_MultiArgs,
BasicModel6_MultiTensor,
BasicModel_MultiLayer,
)
class Test(BaseTest):
def test_apply_gradient_reqs(self) -> None:
initial_grads = [False, True, False]
test_tensor = torch.tensor([[6.0]], requires_grad=True)
test_tensor.grad = torch.tensor([[7.0]])
test_tensor_tuple = (torch.tensor([[5.0]]), test_tensor, torch.tensor([[7.0]]))
out_mask = apply_gradient_requirements(test_tensor_tuple)
for i in range(len(test_tensor_tuple)):
self.assertTrue(test_tensor_tuple[i].requires_grad)
self.assertEqual(out_mask[i], initial_grads[i])
def test_undo_gradient_reqs(self) -> None:
initial_grads = [False, True, False]
test_tensor = torch.tensor([[6.0]], requires_grad=True)
test_tensor.grad = torch.tensor([[7.0]])
test_tensor_tuple = (
torch.tensor([[6.0]], requires_grad=True),
test_tensor,
torch.tensor([[7.0]], requires_grad=True),
)
undo_gradient_requirements(test_tensor_tuple, initial_grads)
for i in range(len(test_tensor_tuple)):
self.assertEqual(test_tensor_tuple[i].requires_grad, initial_grads[i])
def test_gradient_basic(self) -> None:
model = BasicModel()
input = torch.tensor([[5.0]], requires_grad=True)
input.grad = torch.tensor([[9.0]])
grads = compute_gradients(model, input)[0]
assertTensorAlmostEqual(self, grads, [[0.0]], delta=0.01, mode="max")
# Verify grad attribute is not altered
assertTensorAlmostEqual(self, input.grad, [[9.0]], delta=0.0, mode="max")
def test_gradient_basic_2(self) -> None:
model = BasicModel()
input = torch.tensor([[-3.0]], requires_grad=True)
input.grad = torch.tensor([[14.0]])
grads = compute_gradients(model, input)[0]
assertTensorAlmostEqual(self, grads, [[1.0]], delta=0.01, mode="max")
# Verify grad attribute is not altered
assertTensorAlmostEqual(self, input.grad, [[14.0]], delta=0.0, mode="max")
def test_gradient_multiinput(self) -> None:
model = BasicModel6_MultiTensor()
input1 = torch.tensor([[-3.0, -5.0]], requires_grad=True)
input2 = torch.tensor([[-5.0, 2.0]], requires_grad=True)
grads = compute_gradients(model, (input1, input2))
assertTensorAlmostEqual(self, grads[0], [[0.0, 1.0]], delta=0.01, mode="max")
assertTensorAlmostEqual(self, grads[1], [[0.0, 1.0]], delta=0.01, mode="max")
def test_gradient_additional_args(self) -> None:
model = BasicModel4_MultiArgs()
input1 = torch.tensor([[10.0]], requires_grad=True)
input2 = torch.tensor([[8.0]], requires_grad=True)
grads = compute_gradients(model, (input1, input2), additional_forward_args=(2,))
assertTensorAlmostEqual(self, grads[0], [[1.0]], delta=0.01, mode="max")
assertTensorAlmostEqual(self, grads[1], [[-0.5]], delta=0.01, mode="max")
def test_gradient_additional_args_2(self) -> None:
model = BasicModel5_MultiArgs()
input1 = torch.tensor([[-10.0]], requires_grad=True)
input2 = torch.tensor([[6.0]], requires_grad=True)
grads = compute_gradients(
model, (input1, input2), additional_forward_args=([3, -4],)
)
assertTensorAlmostEqual(self, grads[0], [[0.0]], delta=0.01, mode="max")
assertTensorAlmostEqual(self, grads[1], [[4.0]], delta=0.01, mode="max")
def test_gradient_target_int(self) -> None:
model = BasicModel2()
input1 = torch.tensor([[4.0, -1.0]], requires_grad=True)
input2 = torch.tensor([[2.0, 5.0]], requires_grad=True)
grads0 = compute_gradients(model, (input1, input2), target_ind=0)
grads1 = compute_gradients(model, (input1, input2), target_ind=1)
assertTensorAlmostEqual(self, grads0[0], [[1.0, 0.0]], delta=0.01, mode="max")
assertTensorAlmostEqual(self, grads0[1], [[-1.0, 0.0]], delta=0.01, mode="max")
assertTensorAlmostEqual(self, grads1[0], [[0.0, 0.0]], delta=0.01, mode="max")
assertTensorAlmostEqual(self, grads1[1], [[0.0, 0.0]], delta=0.01, mode="max")
def test_gradient_target_list(self) -> None:
model = BasicModel2()
input1 = torch.tensor([[4.0, -1.0], [3.0, 10.0]], requires_grad=True)
input2 = torch.tensor([[2.0, -5.0], [-2.0, 1.0]], requires_grad=True)
grads = compute_gradients(model, (input1, input2), target_ind=[0, 1])
assertTensorAlmostEqual(
self,
grads[0],
[[1.0, 0.0], [0.0, 1.0]],
delta=0.01,
mode="max",
)
assertTensorAlmostEqual(
self,
grads[1],
[[-1.0, 0.0], [0.0, -1.0]],
delta=0.01,
mode="max",
)
def test_gradient_target_tuple(self) -> None:
model = BasicModel()
input = torch.tensor(
[[[4.0, 2.0], [-1.0, -2.0]], [[3.0, -4.0], [10.0, 5.0]]], requires_grad=True
)
grads = compute_gradients(model, input, target_ind=(0, 1))[0]
assertTensorAlmostEqual(
self,
grads,
[[[0.0, 0.0], [0.0, 0.0]], [[0.0, 1.0], [0.0, 0.0]]],
delta=0.01,
mode="max",
)
def test_gradient_target_listtuple(self) -> None:
model = BasicModel()
input = torch.tensor(
[[[4.0, 2.0], [-1.0, -2.0]], [[3.0, -4.0], [10.0, 5.0]]], requires_grad=True
)
target: List[Tuple[int, ...]] = [(1, 1), (0, 1)]
grads = compute_gradients(model, input, target_ind=target)[0]
assertTensorAlmostEqual(
self,
grads,
[[[0.0, 0.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 0.0]]],
delta=0.01,
mode="max",
)
def test_gradient_inplace(self) -> None:
model = BasicModel_MultiLayer(inplace=True)
input = torch.tensor([[1.0, 6.0, -3.0]], requires_grad=True)
grads = compute_gradients(model, input, target_ind=0)[0]
assertTensorAlmostEqual(self, grads, [[3.0, 3.0, 3.0]], delta=0.01, mode="max")
def test_layer_gradient_linear0(self) -> None:
model = BasicModel_MultiLayer()
input = torch.tensor([[5.0, -11.0, 23.0]], requires_grad=True)
grads, eval = compute_layer_gradients_and_eval(
model, model.linear0, input, target_ind=0
)
assertTensorAlmostEqual(
self, grads[0], [[4.0, 4.0, 4.0]], delta=0.01, mode="max"
)
assertTensorAlmostEqual(
self,
eval[0],
[[5.0, -11.0, 23.0]],
delta=0.01,
mode="max",
)
def test_layer_gradient_linear1(self) -> None:
model = BasicModel_MultiLayer()
input = torch.tensor([[5.0, 2.0, 1.0]], requires_grad=True)
grads, eval = compute_layer_gradients_and_eval(
model, model.linear1, input, target_ind=1
)
assertTensorAlmostEqual(
self,
grads[0],
[[0.0, 1.0, 1.0, 1.0]],
delta=0.01,
mode="max",
)
assertTensorAlmostEqual(
self,
eval[0],
[[-2.0, 9.0, 9.0, 9.0]],
delta=0.01,
mode="max",
)
def test_layer_gradient_linear1_inplace(self) -> None:
model = BasicModel_MultiLayer(inplace=True)
input = torch.tensor([[5.0, 2.0, 1.0]], requires_grad=True)
grads, eval = compute_layer_gradients_and_eval(
model, model.linear1, input, target_ind=1
)
assertTensorAlmostEqual(
self,
grads[0],
[[0.0, 1.0, 1.0, 1.0]],
delta=0.01,
mode="max",
)
assertTensorAlmostEqual(
self,
eval[0],
[[-2.0, 9.0, 9.0, 9.0]],
delta=0.01,
mode="max",
)
def test_layer_gradient_relu_input_inplace(self) -> None:
model = BasicModel_MultiLayer(inplace=True)
input = torch.tensor([[5.0, 2.0, 1.0]], requires_grad=True)
grads, eval = compute_layer_gradients_and_eval(
model, model.relu, input, target_ind=1, attribute_to_layer_input=True
)
assertTensorAlmostEqual(
self,
grads[0],
[[0.0, 1.0, 1.0, 1.0]],
delta=0.01,
mode="max",
)
assertTensorAlmostEqual(
self,
eval[0],
[[-2.0, 9.0, 9.0, 9.0]],
delta=0.01,
mode="max",
)
def test_layer_gradient_output(self) -> None:
model = BasicModel_MultiLayer()
input = torch.tensor([[5.0, 2.0, 1.0]], requires_grad=True)
grads, eval = compute_layer_gradients_and_eval(
model, model.linear2, input, target_ind=1
)
assertTensorAlmostEqual(self, grads[0], [[0.0, 1.0]], delta=0.01, mode="max")
assertTensorAlmostEqual(self, eval[0], [[26.0, 28.0]], delta=0.01, mode="max")
|
#!/usr/bin/env python3
from typing import cast, List, Tuple
import torch
from captum._utils.common import (
_format_feature_mask,
_get_max_feature_index,
_parse_version,
_reduce_list,
_select_targets,
_sort_key_list,
safe_div,
)
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
)
class Test(BaseTest):
def test_safe_div_number_denom(self) -> None:
num = torch.tensor(4.0)
assert safe_div(num, 2) == 2.0
assert safe_div(num, 0, 2) == 2.0
assert safe_div(num, 2.0) == 2.0
assert safe_div(num, 0.0, 2.0) == 2.0
def test_safe_div_tensor_denom(self) -> None:
num = torch.tensor([4.0, 6.0])
exp = torch.tensor([2.0, 3.0])
assert (safe_div(num, torch.tensor([2.0, 2.0])) == exp).all()
# tensor default denom
assert (safe_div(num, torch.tensor([0.0, 0.0]), torch.tensor(2.0)) == exp).all()
assert (
safe_div(
num,
torch.tensor([0.0, 0.0]),
torch.tensor([2.0, 2.0]),
)
== exp
).all()
# float default denom
assert (safe_div(num, torch.tensor([0.0, 0.0]), 2.0) == exp).all()
def test_reduce_list_tensors(self) -> None:
tensors = [torch.tensor([[3, 4, 5]]), torch.tensor([[0, 1, 2]])]
reduced = _reduce_list(tensors)
assertTensorAlmostEqual(self, reduced, [[3, 4, 5], [0, 1, 2]])
def test_reduce_list_tuples(self):
tensors = [
(torch.tensor([[3, 4, 5]]), torch.tensor([[0, 1, 2]])),
(torch.tensor([[3, 4, 5]]), torch.tensor([[0, 1, 2]])),
]
reduced = _reduce_list(tensors)
assertTensorAlmostEqual(self, reduced[0], [[3, 4, 5], [3, 4, 5]])
assertTensorAlmostEqual(self, reduced[1], [[0, 1, 2], [0, 1, 2]])
def test_sort_key_list(self) -> None:
key_list = [
torch.device("cuda:13"),
torch.device("cuda:17"),
torch.device("cuda:10"),
torch.device("cuda:0"),
]
device_index_list = [0, 10, 13, 17]
sorted_keys = _sort_key_list(key_list, device_index_list)
for i in range(len(key_list)):
self.assertEqual(sorted_keys[i].index, device_index_list[i])
def test_sort_key_list_incomplete(self) -> None:
key_list = [torch.device("cuda:10"), torch.device("cuda:0")]
device_index_list = [0, 10, 13, 17]
sorted_keys = _sort_key_list(key_list, device_index_list)
for i in range(len(key_list)):
self.assertEqual(sorted_keys[i].index, device_index_list[i])
def test_select_target_2d(self) -> None:
output_tensor = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assertTensorAlmostEqual(self, _select_targets(output_tensor, 1), [2, 5, 8])
assertTensorAlmostEqual(
self, _select_targets(output_tensor, torch.tensor(0)), [1, 4, 7]
)
assertTensorAlmostEqual(
self,
_select_targets(output_tensor, torch.tensor([1, 2, 0])),
[[2], [6], [7]],
)
assertTensorAlmostEqual(
self, _select_targets(output_tensor, [1, 2, 0]), [[2], [6], [7]]
)
# Verify error is raised if too many dimensions are provided.
with self.assertRaises(AssertionError):
_select_targets(output_tensor, (1, 2))
def test_select_target_3d(self) -> None:
output_tensor = torch.tensor(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[9, 8, 7], [6, 5, 4], [3, 2, 1]]]
)
assertTensorAlmostEqual(self, _select_targets(output_tensor, (0, 1)), [2, 8])
assertTensorAlmostEqual(
self,
_select_targets(
output_tensor, cast(List[Tuple[int, ...]], [(0, 1), (2, 0)])
),
[2, 3],
)
# Verify error is raised if list is longer than number of examples.
with self.assertRaises(AssertionError):
_select_targets(
output_tensor, cast(List[Tuple[int, ...]], [(0, 1), (2, 0), (3, 2)])
)
# Verify error is raised if too many dimensions are provided.
with self.assertRaises(AssertionError):
_select_targets(output_tensor, (1, 2, 3))
def test_format_feature_mask_of_tensor(self) -> None:
formatted_inputs = (torch.tensor([[0.0, 0.0], [0.0, 0.0]]),)
tensor_mask = torch.tensor([[0, 1]])
formatted_tensor_mask = _format_feature_mask(tensor_mask, formatted_inputs)
self.assertEqual(type(formatted_tensor_mask), tuple)
assertTensorTuplesAlmostEqual(self, formatted_tensor_mask, (tensor_mask,))
def test_format_feature_mask_of_tuple(self) -> None:
formatted_inputs = (
torch.tensor([[0.0, 0.0], [0.0, 0.0]]),
torch.tensor([[0.0, 0.0], [0.0, 0.0]]),
)
tuple_mask = (
torch.tensor([[0, 1], [2, 3]]),
torch.tensor([[4, 5], [6, 6]]),
)
formatted_tuple_mask = _format_feature_mask(tuple_mask, formatted_inputs)
self.assertEqual(type(formatted_tuple_mask), tuple)
assertTensorTuplesAlmostEqual(self, formatted_tuple_mask, tuple_mask)
def test_format_feature_mask_of_none(self) -> None:
formatted_inputs = (
torch.tensor([[0.0, 0.0], [0.0, 0.0]]),
torch.tensor([]), # empty tensor
torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
)
expected_mask = (
torch.tensor([[0, 1]]),
torch.tensor([]),
torch.tensor([[2, 3, 4]]),
)
formatted_none_mask = _format_feature_mask(None, formatted_inputs)
self.assertEqual(type(formatted_none_mask), tuple)
assertTensorTuplesAlmostEqual(self, formatted_none_mask, expected_mask)
def test_get_max_feature_index(self) -> None:
mask = (
torch.tensor([[0, 1], [2, 3]]),
torch.tensor([]),
torch.tensor([[4, 5], [6, 100]]),
torch.tensor([[0, 1], [2, 3]]),
)
assert _get_max_feature_index(mask) == 100
class TestParseVersion(BaseTest):
def test_parse_version_dev(self) -> None:
version_str = "1.12.0.dev20201109"
output = _parse_version(version_str)
self.assertEqual(output, (1, 12, 0))
def test_parse_version_post(self) -> None:
version_str = "1.3.0.post2"
output = _parse_version(version_str)
self.assertEqual(output, (1, 3, 0))
def test_parse_version_1_12_0(self) -> None:
version_str = "1.12.0"
output = _parse_version(version_str)
self.assertEqual(output, (1, 12, 0))
def test_parse_version_1_12_2(self) -> None:
version_str = "1.12.2"
output = _parse_version(version_str)
self.assertEqual(output, (1, 12, 2))
def test_parse_version_1_6_0(self) -> None:
version_str = "1.6.0"
output = _parse_version(version_str)
self.assertEqual(output, (1, 6, 0))
def test_parse_version_1_12(self) -> None:
version_str = "1.12"
output = _parse_version(version_str)
self.assertEqual(output, (1, 12))
|
#!/usr/bin/env python3
import torch
import torch.nn as nn
from captum._utils.gradient import (
_compute_jacobian_wrt_params,
_compute_jacobian_wrt_params_with_sample_wise_trick,
)
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicLinearModel2, BasicLinearModel_Multilayer
class Test(BaseTest):
def test_jacobian_scores_single_scalar(self) -> None:
model = BasicLinearModel2(5, 1)
model.linear.weight = nn.Parameter(torch.arange(0, 5).float().reshape(1, 5))
a = torch.ones(5).unsqueeze(0)
grads = _compute_jacobian_wrt_params(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], a)
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], a)
def test_jacobian_scores_single_vector(self) -> None:
model = BasicLinearModel2(5, 2)
model.linear.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
a = torch.ones(5).unsqueeze(0)
grads = _compute_jacobian_wrt_params(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.cat((a, a)))
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.cat((a, a)))
def test_jacobian_scores_single_scalar_multilayer(self) -> None:
model = BasicLinearModel_Multilayer(5, 2, 1)
model.linear1.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
model.linear2.weight = nn.Parameter(torch.arange(1, 3).view(1, 2).float())
a = torch.ones(5).unsqueeze(0)
grads = _compute_jacobian_wrt_params(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.cat((a, 2 * a)))
assertTensorAlmostEqual(self, grads[1][0], torch.Tensor([[10, 35]]))
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.cat((a, 2 * a)))
assertTensorAlmostEqual(self, grads[1][0], torch.Tensor([[10, 35]]))
def test_jacobian_scores_single_vector_multilayer(self) -> None:
model = BasicLinearModel_Multilayer(5, 2, 2)
model.linear1.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
model.linear2.weight = nn.Parameter(torch.arange(0, 4).view(2, 2).float())
a = torch.ones(5).unsqueeze(0)
grads = _compute_jacobian_wrt_params(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.cat((2 * a, 4 * a)))
assertTensorAlmostEqual(self, grads[1][0], torch.Tensor([[10, 35], [10, 35]]))
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.cat((2 * a, 4 * a)))
assertTensorAlmostEqual(self, grads[1][0], torch.Tensor([[10, 35], [10, 35]]))
def test_jacobian_scores_batch_scalar(self) -> None:
model = BasicLinearModel2(5, 1)
model.linear.weight = nn.Parameter(torch.arange(0, 5).float().reshape(1, 5))
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
grads = _compute_jacobian_wrt_params(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], a[0:1])
assertTensorAlmostEqual(self, grads[0][1], a[1:2])
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], a[0:1])
assertTensorAlmostEqual(self, grads[0][1], a[1:2])
def test_jacobian_scores_batch_vector(self) -> None:
model = BasicLinearModel2(5, 2)
model.linear.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
grads = _compute_jacobian_wrt_params(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.stack((a[0], a[0])))
assertTensorAlmostEqual(self, grads[0][1], torch.stack((a[1], a[1])))
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.stack((a[0], a[0])))
assertTensorAlmostEqual(self, grads[0][1], torch.stack((a[1], a[1])))
def test_jacobian_scores_batch_scalar_multilayer(self) -> None:
model = BasicLinearModel_Multilayer(5, 2, 1)
model.linear1.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
model.linear2.weight = nn.Parameter(torch.arange(1, 3).view(1, 2).float())
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
grads = _compute_jacobian_wrt_params(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.stack((a[0], 2 * a[0])))
assertTensorAlmostEqual(self, grads[1][0], torch.Tensor([[10, 35]]))
assertTensorAlmostEqual(self, grads[0][1], torch.stack((a[1], 2 * a[1])))
assertTensorAlmostEqual(self, grads[1][1], torch.Tensor([[20, 70]]))
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.stack((a[0], 2 * a[0])))
assertTensorAlmostEqual(self, grads[1][0], torch.Tensor([[10, 35]]))
assertTensorAlmostEqual(self, grads[0][1], torch.stack((a[1], 2 * a[1])))
assertTensorAlmostEqual(self, grads[1][1], torch.Tensor([[20, 70]]))
def test_jacobian_scores_batch_vector_multilayer(self) -> None:
model = BasicLinearModel_Multilayer(5, 2, 2)
model.linear1.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
model.linear2.weight = nn.Parameter(torch.arange(0, 4).view(2, 2).float())
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
grads = _compute_jacobian_wrt_params(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.stack((2 * a[0], 4 * a[0])))
assertTensorAlmostEqual(self, grads[1][0], torch.Tensor([[10, 35], [10, 35]]))
assertTensorAlmostEqual(self, grads[0][1], torch.stack((2 * a[1], 4 * a[1])))
assertTensorAlmostEqual(self, grads[1][1], torch.Tensor([[20, 70], [20, 70]]))
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.stack((2 * a[0], 4 * a[0])))
assertTensorAlmostEqual(self, grads[1][0], torch.Tensor([[10, 35], [10, 35]]))
assertTensorAlmostEqual(self, grads[0][1], torch.stack((2 * a[1], 4 * a[1])))
assertTensorAlmostEqual(self, grads[1][1], torch.Tensor([[20, 70], [20, 70]]))
def test_jacobian_loss_single_scalar(self) -> None:
model = BasicLinearModel2(5, 1)
model.linear.weight = nn.Parameter(torch.arange(0, 5).view(1, 5).float())
a = torch.ones(5).unsqueeze(0)
label = torch.Tensor([9])
loss_fn = nn.MSELoss(reduction="none")
grads = _compute_jacobian_wrt_params(model, (a,), label, loss_fn)
assertTensorAlmostEqual(self, grads[0][0], 2 * (10 - 9) * a)
loss_fn = nn.MSELoss(reduction="sum")
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(
model, (a,), label, loss_fn
)
assertTensorAlmostEqual(self, grads[0][0], 2 * (10 - 9) * a)
def test_jacobian_loss_single_vector(self) -> None:
model = BasicLinearModel2(5, 2)
model.linear.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
a = torch.ones(5).unsqueeze(0)
label = torch.Tensor([[9, 38]])
loss_fn = nn.MSELoss(reduction="none")
grads = _compute_jacobian_wrt_params(model, (a,), label, loss_fn)
assertTensorAlmostEqual(
self, grads[0][0], torch.cat((2 * (10 - 9) * a, 2 * (35 - 38) * a))
)
loss_fn = nn.MSELoss(reduction="sum")
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(
model, (a,), label, loss_fn
)
assertTensorAlmostEqual(
self, grads[0][0], torch.cat((2 * (10 - 9) * a, 2 * (35 - 38) * a))
)
def test_jacobian_loss_batch_scalar(self) -> None:
model = BasicLinearModel2(5, 1)
model.linear.weight = nn.Parameter(torch.arange(0, 5).float().reshape(1, 5))
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
label = torch.Tensor([[9], [18]])
loss_fn = nn.MSELoss(reduction="none")
grads = _compute_jacobian_wrt_params(model, (a,), label, loss_fn)
assertTensorAlmostEqual(self, grads[0][0], 2 * (10 - 9) * a[0:1])
assertTensorAlmostEqual(self, grads[0][1], 2 * (20 - 18) * a[1:2])
loss_fn = nn.MSELoss(reduction="sum")
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(
model, (a,), label, loss_fn
)
assertTensorAlmostEqual(self, grads[0][0], 2 * (10 - 9) * a[0:1])
assertTensorAlmostEqual(self, grads[0][1], 2 * (20 - 18) * a[1:2])
def test_jacobian_loss_batch_vector(self) -> None:
model = BasicLinearModel2(5, 2)
model.linear.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
label = torch.Tensor([[9, 38], [18, 74]])
loss_fn = nn.MSELoss(reduction="none")
grads = _compute_jacobian_wrt_params(model, (a,), label, loss_fn)
assertTensorAlmostEqual(
self, grads[0][0], torch.stack((2 * (10 - 9) * a[0], 2 * (35 - 38) * a[0]))
)
assertTensorAlmostEqual(
self, grads[0][1], torch.stack((2 * (20 - 18) * a[1], 2 * (70 - 74) * a[1]))
)
loss_fn = nn.MSELoss(reduction="sum")
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(
model, (a,), label, loss_fn
)
assertTensorAlmostEqual(
self, grads[0][0], torch.stack((2 * (10 - 9) * a[0], 2 * (35 - 38) * a[0]))
)
assertTensorAlmostEqual(
self, grads[0][1], torch.stack((2 * (20 - 18) * a[1], 2 * (70 - 74) * a[1]))
)
def test_jacobian_loss_single_scalar_multilayer(self) -> None:
model = BasicLinearModel_Multilayer(5, 2, 1)
model.linear1.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
model.linear2.weight = nn.Parameter(torch.arange(1, 3).view(1, 2).float())
a = torch.ones(5).unsqueeze(0)
label = torch.Tensor([[78]])
loss_fn = nn.MSELoss(reduction="none")
grads = _compute_jacobian_wrt_params(model, (a,), label, loss_fn)
assertTensorAlmostEqual(
self, grads[0][0], torch.cat((2 * (80 - 78) * a, 2 * 2 * (80 - 78) * a))
)
assertTensorAlmostEqual(
self, grads[1][0], 2 * (80 - 78) * torch.Tensor([[10, 35]])
)
loss_fn = nn.MSELoss(reduction="sum")
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(
model, (a,), label, loss_fn
)
assertTensorAlmostEqual(
self, grads[0][0], torch.cat((2 * (80 - 78) * a, 2 * 2 * (80 - 78) * a))
)
assertTensorAlmostEqual(
self, grads[1][0], 2 * (80 - 78) * torch.Tensor([[10, 35]])
)
def test_jacobian_loss_batch_vector_multilayer(self) -> None:
model = BasicLinearModel_Multilayer(5, 2, 2)
model.linear1.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
model.linear2.weight = nn.Parameter(torch.arange(0, 4).view(2, 2).float())
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
label = torch.Tensor([[33, 124], [69, 256]])
loss_fn = nn.MSELoss(reduction="none")
grads = _compute_jacobian_wrt_params(model, (a,), label, loss_fn)
assertTensorAlmostEqual(
self,
grads[0][0],
torch.stack(
(
2 * (0 * (35 - 33) + 2 * (125 - 124)) * a[0],
2 * (1 * (35 - 33) + 3 * (125 - 124)) * a[0],
)
),
)
assertTensorAlmostEqual(
self,
grads[1][0],
torch.Tensor(
[
[2 * (35 - 33) * 10, 2 * (35 - 33) * 35],
[2 * (125 - 124) * 10, 2 * (125 - 124) * 35],
]
),
)
assertTensorAlmostEqual(
self,
grads[0][1],
torch.stack(
(
2 * (0 * (70 - 69) + 2 * (250 - 256)) * a[1],
2 * (1 * (70 - 69) + 3 * (250 - 256)) * a[1],
)
),
)
assertTensorAlmostEqual(
self,
grads[1][1],
torch.Tensor(
[
[2 * (70 - 69) * 10 * 2, 2 * (70 - 69) * 35 * 2],
[2 * (250 - 256) * 10 * 2, 2 * (250 - 256) * 35 * 2],
]
),
)
loss_fn = nn.MSELoss(reduction="sum")
grads_h = _compute_jacobian_wrt_params_with_sample_wise_trick(
model, (a,), label, loss_fn
)
assertTensorAlmostEqual(self, grads_h[0][0], grads[0][0])
assertTensorAlmostEqual(self, grads_h[1][0], grads[1][0])
assertTensorAlmostEqual(self, grads_h[0][1], grads[0][1])
assertTensorAlmostEqual(self, grads_h[1][1], grads[1][1])
def test_jacobian_loss_custom_correct(self) -> None:
model = BasicLinearModel2(5, 2)
model.linear.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
def my_loss(out, label):
return (out - label).pow(2)
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
label = torch.Tensor([[9, 38], [18, 74]])
grads = _compute_jacobian_wrt_params(model, (a,), label, my_loss)
assertTensorAlmostEqual(
self, grads[0][0], torch.stack((2 * (10 - 9) * a[0], 2 * (35 - 38) * a[0]))
)
assertTensorAlmostEqual(
self, grads[0][1], torch.stack((2 * (20 - 18) * a[1], 2 * (70 - 74) * a[1]))
)
def test_jacobian_loss_custom_wrong(self) -> None:
model = BasicLinearModel2(5, 2)
model.linear.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
def my_loss(out, label):
return torch.sum((out - label).pow(2))
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
label = torch.Tensor([[9, 38], [18, 74]])
with self.assertRaises(AssertionError):
_compute_jacobian_wrt_params(model, (a,), label, my_loss)
def test_jacobian_loss_custom_correct_sample_wise_trick(self) -> None:
model = BasicLinearModel2(5, 2)
model.linear.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
def my_loss(out, label):
return torch.sum((out - label).pow(2))
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
label = torch.Tensor([[9, 38], [18, 74]])
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(
model, (a,), label, my_loss # type: ignore
)
assertTensorAlmostEqual(
self, grads[0][0], torch.stack((2 * (10 - 9) * a[0], 2 * (35 - 38) * a[0]))
)
assertTensorAlmostEqual(
self, grads[0][1], torch.stack((2 * (20 - 18) * a[1], 2 * (70 - 74) * a[1]))
)
def test_jacobian_loss_custom_wrong_sample_wise_trick(self) -> None:
model = BasicLinearModel2(5, 2)
model.linear.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
def my_loss(out, label):
return (out - label).pow(2)
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
label = torch.Tensor([[9, 38], [18, 74]])
with self.assertRaises(AssertionError):
_compute_jacobian_wrt_params_with_sample_wise_trick(
model, (a,), label, my_loss # type: ignore
)
def test_jacobian_loss_wrong_reduction_sample_wise_trick(self) -> None:
model = BasicLinearModel2(5, 2)
model.linear.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
loss_fn = nn.MSELoss(reduction="none")
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
label = torch.Tensor([[9, 38], [18, 74]])
with self.assertRaises(AssertionError):
_compute_jacobian_wrt_params_with_sample_wise_trick(
model, (a,), label, loss_fn
)
|
#!/usr/bin/env python3
import io
import unittest
import unittest.mock
from captum._utils.progress import NullProgress, progress
from tests.helpers.basic import BaseTest
class Test(BaseTest):
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_nullprogress(self, mock_stderr) -> None:
count = 0
with NullProgress(["x", "y", "z"]) as np:
for _ in np:
for _ in NullProgress([1, 2, 3]):
count += 1
self.assertEqual(count, 9)
output = mock_stderr.getvalue()
self.assertEqual(output, "")
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_nested_progress_tqdm(self, mock_stderr) -> None:
try:
import tqdm # noqa: F401
except ImportError:
raise unittest.SkipTest("Skipping tqdm test, tqdm not available.")
parent_data = ["x", "y", "z"]
test_data = [1, 2, 3]
with progress(parent_data, desc="parent progress") as parent:
for item in parent:
for _ in progress(test_data, desc=f"test progress {item}"):
pass
output = mock_stderr.getvalue()
self.assertIn("parent progress:", output)
for item in parent_data:
self.assertIn(f"test progress {item}:", output)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_nested_simple_progress(self, mock_stderr) -> None:
parent_data = ["x", "y", "z"]
test_data = [1, 2, 3]
with progress(
parent_data, desc="parent progress", use_tqdm=False, mininterval=0.0
) as parent:
for item in parent:
for _ in progress(
test_data, desc=f"test progress {item}", use_tqdm=False
):
pass
output = mock_stderr.getvalue()
self.assertEqual(
output.count("parent progress:"), 5, "5 'parent' progress bar expected"
)
for item in parent_data:
self.assertIn(f"test progress {item}:", output)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_progress_tqdm(self, mock_stderr) -> None:
try:
import tqdm # noqa: F401
except ImportError:
raise unittest.SkipTest("Skipping tqdm test, tqdm not available.")
test_data = [1, 3, 5]
progressed = progress(test_data, desc="test progress")
assert list(progressed) == test_data
assert "test progress: " in mock_stderr.getvalue()
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_simple_progress(self, mock_stderr) -> None:
test_data = [1, 3, 5]
desc = "test progress"
progressed = progress(test_data, desc=desc, use_tqdm=False)
assert list(progressed) == test_data
assert mock_stderr.getvalue().startswith(f"\r{desc}: 0% 0/3")
assert mock_stderr.getvalue().endswith(f"\r{desc}: 100% 3/3\n")
# progress iterable without len but explicitly specify total
def gen():
for n in test_data:
yield n
mock_stderr.seek(0)
mock_stderr.truncate(0)
progressed = progress(gen(), desc=desc, total=len(test_data), use_tqdm=False)
assert list(progressed) == test_data
assert mock_stderr.getvalue().startswith(f"\r{desc}: 0% 0/3")
assert mock_stderr.getvalue().endswith(f"\r{desc}: 100% 3/3\n")
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_simple_progress_without_total(self, mock_stderr) -> None:
test_data = [1, 3, 5]
desc = "test progress"
def gen():
for n in test_data:
yield n
progressed = progress(gen(), desc=desc, use_tqdm=False)
assert list(progressed) == test_data
assert mock_stderr.getvalue().startswith(f"\r{desc}: ")
assert mock_stderr.getvalue().endswith(f"\r{desc}: ...\n")
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_simple_progress_update_manually(self, mock_stderr) -> None:
desc = "test progress"
p = progress(total=5, desc=desc, use_tqdm=False)
p.update(0)
p.update(2)
p.update(2)
p.update(1)
p.close()
assert mock_stderr.getvalue().startswith(f"\r{desc}: 0% 0/5")
assert mock_stderr.getvalue().endswith(f"\r{desc}: 100% 5/5\n")
|
import glob
import tempfile
from datetime import datetime
from typing import cast, List
import torch
from captum._utils.av import AV
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicLinearReLULinear
from torch.utils.data import DataLoader, Dataset
DEFAULT_IDENTIFIER = "default_identifier"
class RangeDataset(Dataset):
def __init__(self, low, high, num_features) -> None:
self.samples = (
torch.arange(start=low, end=high, dtype=torch.float)
.repeat(num_features, 1)
.transpose(1, 0)
)
def __len__(self) -> int:
return len(self.samples)
def __getitem__(self, idx):
return self.samples[idx]
class Test(BaseTest):
def test_exists_without_version(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
av_0 = torch.randn(64, 16)
self.assertFalse(AV.exists(tmpdir, "dummy", "layer1.0.conv1"))
AV.save(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv1", av_0, "0")
self.assertTrue(
AV.exists(
tmpdir,
"dummy",
DEFAULT_IDENTIFIER,
"layer1.0.conv1",
)
)
def test_exists_with_version(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
idf1 = str(int(datetime.now().microsecond))
idf2 = "idf2"
av_0 = torch.randn(64, 16)
self.assertFalse(AV.exists(tmpdir, "dummy", "layer1.0.conv1", idf1))
self.assertFalse(AV.exists(tmpdir, "dummy", "layer1.0.conv1", idf2))
AV.save(tmpdir, "dummy", idf1, "layer1.0.conv1", av_0, "0")
self.assertTrue(AV.exists(tmpdir, "dummy", idf1, "layer1.0.conv1"))
self.assertFalse(AV.exists(tmpdir, "dummy", idf2, "layer1.0.conv1"))
AV.save(tmpdir, "dummy", idf2, "layer1.0.conv1", av_0, "0")
self.assertTrue(AV.exists(tmpdir, "dummy", idf2, "layer1.0.conv1"))
def test_av_save_two_layers(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
av_0 = torch.randn(64, 16)
AV.save(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv1", av_0, "0")
self.assertTrue(
AV.exists(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv1")
)
self.assertFalse(
AV.exists(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv2")
)
# experimenting with adding to another layer
av_1 = torch.randn(64, 16)
AV.save(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv2", av_1, "0")
self.assertTrue(
AV.exists(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv2")
)
def test_av_save_multi_layer(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
av_0 = torch.randn(64, 16)
av_1 = torch.randn(64, 16)
av_2 = torch.randn(64, 16)
model_path = AV._assemble_model_dir(tmpdir, "dummy")
# save first layer
AV.save(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv1", av_0, "0")
self.assertEqual(len(glob.glob(model_path + "*")), 1)
# add two new layers at once
AV.save(
tmpdir,
"dummy",
DEFAULT_IDENTIFIER,
["layer1.0.conv2", "layer1.1.conv1"],
[av_1, av_2],
"0",
)
self.assertEqual(len(glob.glob(model_path + "/*/*/*")), 3)
# overwrite the first saved layer
AV.save(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv1", av_0, "0")
self.assertEqual(len(glob.glob(model_path + "/*/*/*")), 3)
# save a new version of the first layer
idf1 = str(int(datetime.now().microsecond))
self.assertFalse(AV.exists(tmpdir, "dummy", idf1, "layer1.0.conv1"))
AV.save(tmpdir, "dummy", idf1, "layer1.0.conv1", av_0, "0")
self.assertTrue(AV.exists(tmpdir, "dummy", idf1, "layer1.0.conv1"))
self.assertEqual(len(glob.glob(model_path + "/*/*/*")), 4)
def test_av_save_multiple_batches_per_layer(self) -> None:
def save_and_assert_batch(layer_path, total_num_batches, batch, n_batch_name):
# save n-th batch and verify the number of saved batches
AV.save(
tmpdir,
model_id,
DEFAULT_IDENTIFIER,
"layer1.0.conv1",
batch,
n_batch_name,
)
self.assertEqual(
len(glob.glob("/".join([layer_path, "*.pt"]))),
total_num_batches,
)
self.assertTrue(
AV.exists(
tmpdir, model_id, DEFAULT_IDENTIFIER, "layer1.0.conv1", n_batch_name
)
)
with tempfile.TemporaryDirectory() as tmpdir:
b0 = torch.randn(64, 16)
b1 = torch.randn(64, 16)
b2 = torch.randn(64, 16)
model_id = "dummy"
model_path = AV._assemble_model_dir(tmpdir, model_id)
layer_path = AV._assemble_file_path(
model_path, DEFAULT_IDENTIFIER, "layer1.0.conv1"
)
# save first batch and verify the number of saved batches
save_and_assert_batch(layer_path, 1, b0, "0")
# save second batch and verify the number of saved batches
save_and_assert_batch(layer_path, 2, b1, "1")
# save third batch and verify the number of saved batches
save_and_assert_batch(layer_path, 3, b2, "2")
def test_av_load_multiple_batches_per_layer(self) -> None:
def save_load_and_assert_batch(
layer_path, total_num_batches, batch, n_batch_name
):
# save n-th batch and verify the number of saved batches
AV.save(
tmpdir,
model_id,
DEFAULT_IDENTIFIER,
"layer1.0.conv1",
batch,
n_batch_name,
)
loaded_dataset = AV.load(
tmpdir, model_id, DEFAULT_IDENTIFIER, "layer1.0.conv1", n_batch_name
)
assertTensorAlmostEqual(self, next(iter(loaded_dataset)), batch, 0.0)
loaded_dataset_for_layer = AV.load(
tmpdir, model_id, DEFAULT_IDENTIFIER, "layer1.0.conv1"
)
self.assertEqual(
loaded_dataset_for_layer.__len__(),
total_num_batches,
)
with tempfile.TemporaryDirectory() as tmpdir:
b0 = torch.randn(64, 16)
b1 = torch.randn(64, 16)
b2 = torch.randn(64, 16)
model_id = "dummy"
model_path = AV._assemble_model_dir(tmpdir, model_id)
layer_path = AV._assemble_file_path(
model_path, DEFAULT_IDENTIFIER, "layer1.0.conv1"
)
# save first batch and verify the number of saved batches
save_load_and_assert_batch(layer_path, 1, b0, "0")
# save second batch and verify the number of saved batches
save_load_and_assert_batch(layer_path, 2, b1, "1")
# save third batch and verify the number of saved batches
save_load_and_assert_batch(layer_path, 3, b2, "2")
def test_av_load_non_saved_layer(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
model_id = "dummy"
with self.assertRaises(RuntimeError) as context:
AV.load(tmpdir, model_id)
self.assertTrue(
(
f"Activation vectors for model {model_id} "
f"was not found at path {tmpdir}"
)
== str(context.exception)
)
def test_av_load_one_batch(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
av_0 = torch.randn(64, 16)
av_1 = torch.randn(36, 16)
avs = [av_0, av_1]
# add av_0 to the list of activations
model_id = "dummy"
with self.assertRaises(RuntimeError) as context:
AV.load(tmpdir, model_id)
self.assertTrue(
(
f"Activation vectors for model {model_id} "
f"was not found at path {tmpdir}"
)
== str(context.exception)
)
AV.save(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv1", av_0, "0")
model_id = "dummy"
dataset = AV.load(tmpdir, model_id, identifier=DEFAULT_IDENTIFIER)
for i, av in enumerate(DataLoader(cast(Dataset, dataset))):
assertTensorAlmostEqual(self, av, avs[i].unsqueeze(0))
# add av_1 to the list of activations
dataloader_2 = DataLoader(
cast(
Dataset,
AV.load(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv2"),
)
)
self.assertEqual(len(dataloader_2), 0)
AV.save(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv2", av_1, "0")
dataset = AV.load(tmpdir, "dummy", identifier=DEFAULT_IDENTIFIER)
dataloader = DataLoader(cast(Dataset, dataset))
self.assertEqual(len(dataloader), 2)
for i, av in enumerate(dataloader):
assertTensorAlmostEqual(self, av, avs[i].unsqueeze(0))
def test_av_load_all_identifiers_one_layer(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
av_0 = torch.randn(64, 16)
av_1 = torch.randn(36, 16)
av_2 = torch.randn(16, 16)
av_3 = torch.randn(4, 16)
avs = [av_1, av_2, av_3]
idf1, idf2, idf3 = "idf1", "idf2", "idf3"
AV.save(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv1", av_0, "0")
dataloader = DataLoader(
cast(Dataset, AV.load(tmpdir, "dummy", identifier=DEFAULT_IDENTIFIER))
)
self.assertEqual(len(dataloader), 1)
# add activations for another layer
AV.save(tmpdir, "dummy", idf1, "layer1.0.conv2", av_1, "0")
AV.save(tmpdir, "dummy", idf2, "layer1.0.conv2", av_2, "0")
AV.save(tmpdir, "dummy", idf3, "layer1.0.conv2", av_3, "0")
dataloader_layer = DataLoader(
cast(
Dataset,
AV.load(
tmpdir,
"dummy",
layer="layer1.0.conv2",
),
)
)
self.assertEqual(len(dataloader_layer), 3)
for i, av in enumerate(dataloader_layer):
assertTensorAlmostEqual(self, av, avs[i].unsqueeze(0))
dataloader = DataLoader(cast(Dataset, AV.load(tmpdir, "dummy")))
self.assertEqual(len(dataloader), 4)
def test_av_load_all_layers_one_identifier(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
av_01 = torch.randn(36, 16)
av_02 = torch.randn(16, 16)
av_03 = torch.randn(4, 16)
avs_0 = [av_01, av_02, av_03]
av_11 = torch.randn(36, 16)
av_12 = torch.randn(16, 16)
av_13 = torch.randn(4, 16)
avs_1 = [av_11, av_12, av_13]
idf1, idf2 = "idf1", "idf2"
AV.save(
tmpdir,
"dummy",
idf1,
["layer1.0.conv1", "layer1.0.conv2", "layer1.1.conv1"],
avs_0,
"0",
)
dataloader = DataLoader(cast(Dataset, AV.load(tmpdir, "dummy")))
self.assertEqual(len(dataloader), 3)
AV.save(
tmpdir,
"dummy",
idf2,
["layer1.0.conv1", "layer1.0.conv2", "layer1.1.conv1"],
avs_1,
"0",
)
dataloader = DataLoader(cast(Dataset, AV.load(tmpdir, "dummy")))
self.assertEqual(len(dataloader), 6)
# check activations for idf1
dataloader_layer = DataLoader(
cast(Dataset, AV.load(tmpdir, "dummy", identifier=idf1))
)
self.assertEqual(len(dataloader_layer), 3)
for i, av in enumerate(dataloader_layer):
assertTensorAlmostEqual(self, av, avs_0[i].unsqueeze(0))
# check activations for idf2
dataloader_layer = DataLoader(
cast(Dataset, AV.load(tmpdir, "dummy", identifier=idf2))
)
self.assertEqual(len(dataloader_layer), 3)
for i, av in enumerate(dataloader_layer):
assertTensorAlmostEqual(self, av, avs_1[i].unsqueeze(0))
def test_av_sort_files(self) -> None:
files = ["resnet50-cifar-3000", "resnet50-cifar-1000", "resnet50-cifar-2000"]
exp_files = [
"resnet50-cifar-1000",
"resnet50-cifar-2000",
"resnet50-cifar-3000",
]
files = AV.sort_files(files)
self.assertEqual(files, exp_files)
files = ["resnet50-cifar-0900", "resnet50-cifar-0000", "resnet50-cifar-1000"]
exp_files = [
"resnet50-cifar-0000",
"resnet50-cifar-0900",
"resnet50-cifar-1000",
]
files = AV.sort_files(files)
self.assertEqual(files, exp_files)
files = ["resnet50-cifar-100", "resnet50-cifar-90", "resnet50-cifar-3000"]
exp_files = [
"resnet50-cifar-90",
"resnet50-cifar-100",
"resnet50-cifar-3000",
]
files = AV.sort_files(files)
self.assertEqual(files, exp_files)
files = [
"av/pretrained-net-0/fc1-src10-710935.pt",
"av/pretrained-net-0/fc1-src11-755317.pt",
"av/pretrained-net-0/fc3-src2-655646.pt",
"av/pretrained-net-0/fc1-src9-952381.pt",
"av/pretrained-net-0/conv2-src7-811286.pt",
"av/pretrained-net-0/fc1-src10-176141.pt",
"av/pretrained-net-0/conv11-src9-384927.pt",
]
exp_files = [
"av/pretrained-net-0/conv2-src7-811286.pt",
"av/pretrained-net-0/conv11-src9-384927.pt",
"av/pretrained-net-0/fc1-src9-952381.pt",
"av/pretrained-net-0/fc1-src10-176141.pt",
"av/pretrained-net-0/fc1-src10-710935.pt",
"av/pretrained-net-0/fc1-src11-755317.pt",
"av/pretrained-net-0/fc3-src2-655646.pt",
]
files = AV.sort_files(files)
self.assertEqual(files, exp_files)
def test_generate_activation(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
num_features = 4
low, high = 0, 16
mymodel = BasicLinearReLULinear(num_features)
mydata = RangeDataset(low, high, num_features)
layers: List[str] = [
value[0] for value in mymodel.named_modules() if value[0]
]
# First AV generation on last 2 layers
inputs = torch.stack((mydata[1], mydata[8], mydata[14]))
AV._compute_and_save_activations(
tmpdir, mymodel, "model_id_1", layers[1:], inputs, "test", "0"
)
av_test = AV._construct_file_search(tmpdir, "model_id_1", identifier="test")
av_test = glob.glob(av_test)
self.assertEqual(len(av_test), len(layers[1:]))
# Second AV generation on first 2 layers.
# Second layer overlaps with existing activations, should be loaded.
inputs = torch.stack((mydata[0], mydata[7], mydata[13]))
AV._compute_and_save_activations(
tmpdir, mymodel, "model_id_1", layers[:2], inputs, "test", "0"
)
av_test = AV._construct_file_search(tmpdir, "model_id_1", identifier="test")
av_test = glob.glob(av_test)
self.assertEqual(len(av_test), len(layers))
def test_generate_dataset_activations(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
num_features = 4
low, high = 0, 16
batch_size = high // 2
mymodel = BasicLinearReLULinear(num_features)
mydata = RangeDataset(low, high, num_features)
layers: List[str] = [
value[0] for value in mymodel.named_modules() if value[0]
]
# First AV generation on last 2 layers
layer_AVDatasets = AV.generate_dataset_activations(
tmpdir,
mymodel,
"model_id1",
layers[1:],
DataLoader(mydata, batch_size, shuffle=False),
"src",
return_activations=True,
)
av_src = AV._construct_file_search(
tmpdir, model_id="model_id1", identifier="src"
)
av_src = glob.glob(av_src)
self.assertEqual(len(av_src), high / batch_size * len(layers[1:]))
self.assertTrue(isinstance(layer_AVDatasets, list))
layer_AVDatasets = cast(list, layer_AVDatasets)
self.assertEqual(len(layer_AVDatasets), len(layers[1:]))
for layer_AVDataset in layer_AVDatasets:
self.assertEqual(len(layer_AVDataset), high / batch_size)
# Second AV generation on first 2 layers.
# Second layer overlaps with existing activations, should be loaded.
layer_AVDatasets = AV.generate_dataset_activations(
tmpdir,
mymodel,
"model_id1",
layers[:2],
DataLoader(mydata, batch_size, shuffle=False),
"src",
return_activations=True,
)
av_src = AV._construct_file_search(
tmpdir, model_id="model_id1", identifier="src"
)
av_src = glob.glob(av_src)
self.assertEqual(len(av_src), high / batch_size * len(layers))
self.assertTrue(isinstance(layer_AVDatasets, list))
layer_AVDatasets = cast(list, layer_AVDatasets)
self.assertEqual(len(layer_AVDatasets), len(layers[:2]))
for layer_AVDataset in layer_AVDatasets:
self.assertEqual(len(layer_AVDataset), high / batch_size)
# check that if return_activations is False, None is returned
self.assertIsNone(
AV.generate_dataset_activations(
tmpdir,
mymodel,
"model_id1",
layers[:2],
DataLoader(mydata, batch_size, shuffle=False),
"src",
return_activations=False,
)
)
def test_equal_activation(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
num_features = 4
low, high = 0, 16
mymodel = BasicLinearReLULinear(num_features)
mydata = RangeDataset(low, high, num_features)
layers: List[str] = [
value[0] for value in mymodel.named_modules() if value[0]
]
# First AV generation on last 2 layers
test_input = mydata[1].unsqueeze(0)
model_id = "id_1"
identifier = "test"
num_id = "0"
AV._compute_and_save_activations(
tmpdir, mymodel, model_id, layers[2], test_input, identifier, num_id
)
act_dataset = AV.load(tmpdir, model_id, identifier, layers[2], num_id)
_layer_act = [act.squeeze(0) for act in DataLoader(act_dataset)]
act = torch.cat(_layer_act)
out = mymodel(test_input)
assertTensorAlmostEqual(self, out, act)
|
#!/usr/bin/env python3
import torch
from captum._utils.models.linear_model.model import (
SGDLasso,
SGDLinearRegression,
SGDRidge,
)
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
def _evaluate(test_data, classifier):
classifier.eval()
l1_loss = 0.0
l2_loss = 0.0
n = 0
l2_losses = []
with torch.no_grad():
for data in test_data:
if len(data) == 2:
x, y = data
w = None
else:
x, y, w = data
out = classifier(x)
y = y.view(x.shape[0], -1)
assert y.shape == out.shape
if w is None:
l1_loss += (out - y).abs().sum(0).to(dtype=torch.float64)
l2_loss += ((out - y) ** 2).sum(0).to(dtype=torch.float64)
l2_losses.append(((out - y) ** 2).to(dtype=torch.float64))
else:
l1_loss += (
(w.view(-1, 1) * (out - y)).abs().sum(0).to(dtype=torch.float64)
)
l2_loss += (
(w.view(-1, 1) * ((out - y) ** 2)).sum(0).to(dtype=torch.float64)
)
l2_losses.append(
(w.view(-1, 1) * ((out - y) ** 2)).to(dtype=torch.float64)
)
n += x.shape[0]
l2_losses = torch.cat(l2_losses, dim=0)
assert n > 0
# just to double check
assert ((l2_losses.mean(0) - l2_loss / n).abs() <= 0.1).all()
classifier.train()
return {"l1": l1_loss / n, "l2": l2_loss / n}
class TestLinearModel(BaseTest):
MAX_POINTS: int = 3
def train_and_compare(
self,
model_type,
xs,
ys,
expected_loss,
expected_reg=0.0,
expected_hyperplane=None,
norm_hyperplane=True,
weights=None,
delta=0.1,
init_scheme="zeros",
objective="lasso",
bias=True,
):
assert objective in ["lasso", "ridge", "ols"]
if weights is None:
train_dataset = torch.utils.data.TensorDataset(xs, ys)
else:
train_dataset = torch.utils.data.TensorDataset(xs, ys, weights)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=len(train_dataset), num_workers=0
)
model = model_type(bias=bias)
model.fit(
train_loader,
init_scheme=init_scheme,
max_epoch=150,
initial_lr=0.1,
patience=5,
)
self.assertTrue(model.bias() is not None if bias else model.bias() is None)
l2_loss = _evaluate(train_loader, model)["l2"]
if objective == "lasso":
reg = model.representation().norm(p=1).view_as(l2_loss)
elif objective == "ridge":
reg = model.representation().norm(p=2).view_as(l2_loss)
else:
assert objective == "ols"
reg = torch.zeros_like(l2_loss)
if not isinstance(expected_loss, torch.Tensor):
expected_loss = torch.tensor([expected_loss], dtype=l2_loss.dtype).view(1)
if not isinstance(expected_reg, torch.Tensor):
expected_reg = torch.tensor([expected_reg], dtype=reg.dtype)
assertTensorAlmostEqual(self, l2_loss, expected_loss, delta=delta)
assertTensorAlmostEqual(self, reg, expected_reg, delta=delta)
if expected_hyperplane is not None:
h = model.representation()
if norm_hyperplane:
h /= h.norm(p=2)
assertTensorAlmostEqual(self, h, expected_hyperplane, delta=delta)
def test_simple_linear_regression(self) -> None:
xs = torch.randn(TestLinearModel.MAX_POINTS, 1)
ys = 3 * xs + 1
self.train_and_compare(
SGDLinearRegression,
xs,
ys,
expected_loss=0,
expected_reg=0,
objective="ols",
)
self.train_and_compare(
SGDLasso,
xs,
ys,
expected_loss=3,
expected_reg=0,
objective="lasso",
delta=0.2,
)
self.train_and_compare(
SGDRidge,
xs,
ys,
expected_loss=3,
expected_reg=0,
objective="ridge",
delta=0.2,
)
def test_simple_multi_output(self) -> None:
xs = torch.randn(TestLinearModel.MAX_POINTS, 1)
y1 = 3 * xs + 1
y2 = -5 * xs
ys = torch.stack((y1, y2), dim=1).squeeze()
self.train_and_compare(
SGDLinearRegression,
xs,
ys,
expected_loss=torch.DoubleTensor([0, 0]),
expected_reg=torch.DoubleTensor([0, 0]),
objective="ols",
)
def test_simple_linear_classification(self) -> None:
xs = torch.tensor([[0.5, 0.5], [-0.5, -0.5], [0.5, -0.5], [-0.5, 0.5]])
ys = torch.tensor([1.0, -1.0, 1.0, -1.0])
self.train_and_compare(
SGDLinearRegression,
xs,
ys,
expected_loss=0,
expected_reg=0,
objective="ols",
)
self.train_and_compare(
SGDLasso, xs, ys, expected_loss=1, expected_reg=0.0, objective="lasso"
)
self.train_and_compare(
SGDRidge, xs, ys, expected_loss=1, expected_reg=0.0, objective="ridge"
)
ys = torch.tensor([1.0, 0.0, 1.0, 0.0])
self.train_and_compare(
SGDLinearRegression,
xs,
ys,
expected_loss=0,
expected_reg=0,
objective="ols",
)
self.train_and_compare(
SGDLasso, xs, ys, expected_loss=0.25, expected_reg=0, objective="lasso"
)
self.train_and_compare(
SGDRidge, xs, ys, expected_loss=0.25, expected_reg=0, objective="ridge"
)
def test_simple_xor_problem(self) -> None:
r"""
^
o | x
---|--->
x | o
"""
xs = torch.tensor([[0.5, 0.5], [-0.5, -0.5], [0.5, -0.5], [-0.5, 0.5]])
ys = torch.tensor([1.0, 1.0, -1.0, -1.0])
expected_hyperplane = torch.Tensor([[0, 0]])
self.train_and_compare(
SGDLinearRegression,
xs,
ys,
expected_loss=1,
expected_reg=0,
objective="ols",
expected_hyperplane=expected_hyperplane,
norm_hyperplane=False,
bias=False,
)
self.train_and_compare(
SGDLasso,
xs,
ys,
expected_loss=1,
expected_reg=0,
objective="lasso",
expected_hyperplane=expected_hyperplane,
norm_hyperplane=False,
bias=False,
)
self.train_and_compare(
SGDRidge,
xs,
ys,
expected_loss=1,
expected_reg=0,
objective="ridge",
expected_hyperplane=expected_hyperplane,
norm_hyperplane=False,
bias=False,
)
def test_weighted_problem(self) -> None:
r"""
^
0 | x
---|--->
0 | o
"""
xs = torch.tensor([[0.5, 0.5], [-0.5, -0.5], [0.5, -0.5], [-0.5, 0.5]])
ys = torch.tensor([1.0, 1.0, -1.0, -1.0])
weights = torch.tensor([1.0, 0.0, 1.0, 0.0])
self.train_and_compare(
SGDLinearRegression,
xs,
ys,
expected_loss=0,
expected_reg=0,
expected_hyperplane=torch.Tensor([[0.0, 1.0]]),
weights=weights,
norm_hyperplane=True,
init_scheme="zeros",
objective="ols",
bias=False,
)
self.train_and_compare(
SGDLasso,
xs,
ys,
expected_loss=0.5,
expected_reg=0,
expected_hyperplane=torch.Tensor([[0.0, 0.0]]),
weights=weights,
norm_hyperplane=False,
init_scheme="zeros",
objective="lasso",
bias=False,
)
self.train_and_compare(
SGDRidge,
xs,
ys,
expected_loss=0.5,
expected_reg=0,
expected_hyperplane=torch.Tensor([[0.0, 0.0]]),
weights=weights,
norm_hyperplane=False,
init_scheme="zeros",
objective="ridge",
bias=False,
)
|
#!/usr/bin/env python3
import torch
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
class HelpersTest(BaseTest):
def test_assert_tensor_almost_equal(self) -> None:
with self.assertRaises(AssertionError) as cm:
assertTensorAlmostEqual(self, [[1.0]], [[1.0]])
self.assertEqual(
cm.exception.args,
("Actual parameter given for comparison must be a tensor.",),
)
with self.assertRaises(AssertionError) as cm:
assertTensorAlmostEqual(self, torch.tensor([[]]), torch.tensor([[1.0]]))
self.assertEqual(
cm.exception.args,
(
"Expected tensor with shape: torch.Size([1, 1]). Actual shape torch.Size([1, 0]).", # noqa: E501
),
)
assertTensorAlmostEqual(self, torch.tensor([[1.0]]), [[1.0]])
with self.assertRaises(AssertionError) as cm:
assertTensorAlmostEqual(self, torch.tensor([[1.0]]), [1.0])
self.assertEqual(
cm.exception.args,
(
"Expected tensor with shape: torch.Size([1]). Actual shape torch.Size([1, 1]).", # noqa: E501
),
)
assertTensorAlmostEqual(
self, torch.tensor([[1.0, 1.0]]), [[1.0, 0.0]], delta=1.0, mode="max"
)
with self.assertRaises(AssertionError) as cm:
assertTensorAlmostEqual(
self, torch.tensor([[1.0, 1.0]]), [[1.0, 0.0]], mode="max"
)
self.assertEqual(
cm.exception.args,
(
"Values at index 0, tensor([1., 1.]) and tensor([1., 0.]), differ more than by 0.0001", # noqa: E501
),
)
assertTensorAlmostEqual(
self, torch.tensor([[1.0, 1.0]]), [[1.0, 0.0]], delta=1.0
)
with self.assertRaises(AssertionError):
assertTensorAlmostEqual(self, torch.tensor([[1.0, 1.0]]), [[1.0, 0.0]])
|
#!/usr/bin/env python3
import unittest
from typing import Callable, Tuple
import torch
from captum._utils.gradient import apply_gradient_requirements
from captum._utils.sample_gradient import (
_reset_sample_grads,
SampleGradientWrapper,
SUPPORTED_MODULES,
)
from packaging import version
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel_ConvNet_One_Conv,
BasicModel_ConvNetWithPaddingDilation,
BasicModel_MultiLayer,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_sample_grads_linear_sum(self) -> None:
model = BasicModel_MultiLayer(multi_input_module=True)
inp = (torch.randn(6, 3), torch.randn(6, 3))
self._compare_sample_grads_per_sample(model, inp, lambda x: torch.sum(x), "sum")
def test_sample_grads_linear_mean(self) -> None:
model = BasicModel_MultiLayer(multi_input_module=True)
inp = (20 * torch.randn(6, 3),)
self._compare_sample_grads_per_sample(model, inp, lambda x: torch.mean(x))
def test_sample_grads_conv_sum(self) -> None:
model = BasicModel_ConvNet_One_Conv()
inp = (123 * torch.randn(6, 1, 4, 4),)
self._compare_sample_grads_per_sample(model, inp, lambda x: torch.sum(x), "sum")
def test_sample_grads_conv_mean_multi_inp(self) -> None:
model = BasicModel_ConvNet_One_Conv()
inp = (20 * torch.randn(6, 1, 4, 4), 9 * torch.randn(6, 1, 4, 4))
self._compare_sample_grads_per_sample(model, inp, lambda x: torch.mean(x))
def test_sample_grads_modified_conv_mean(self) -> None:
if version.parse(torch.__version__) < version.parse("1.8.0"):
raise unittest.SkipTest(
"Skipping sample gradient test with 3D linear module"
"since torch version < 1.8"
)
model = BasicModel_ConvNetWithPaddingDilation()
inp = (20 * torch.randn(6, 1, 5, 5),)
self._compare_sample_grads_per_sample(
model, inp, lambda x: torch.mean(x), "mean"
)
def test_sample_grads_modified_conv_sum(self) -> None:
if version.parse(torch.__version__) < version.parse("1.8.0"):
raise unittest.SkipTest(
"Skipping sample gradient test with 3D linear module"
"since torch version < 1.8"
)
model = BasicModel_ConvNetWithPaddingDilation()
inp = (20 * torch.randn(6, 1, 5, 5),)
self._compare_sample_grads_per_sample(model, inp, lambda x: torch.sum(x), "sum")
def _compare_sample_grads_per_sample(
self,
model: Module,
inputs: Tuple[Tensor, ...],
loss_fn: Callable,
loss_type: str = "mean",
):
wrapper = SampleGradientWrapper(model)
wrapper.add_hooks()
apply_gradient_requirements(inputs)
out = model(*inputs)
wrapper.compute_param_sample_gradients(loss_fn(out), loss_type)
batch_size = inputs[0].shape[0]
for i in range(batch_size):
model.zero_grad()
single_inp = tuple(inp[i : i + 1] for inp in inputs)
out = model(*single_inp)
loss_fn(out).backward()
for layer in model.modules():
if isinstance(layer, tuple(SUPPORTED_MODULES.keys())):
assertTensorAlmostEqual(
self,
layer.weight.grad,
layer.weight.sample_grad[i], # type: ignore
mode="max",
)
assertTensorAlmostEqual(
self,
layer.bias.grad,
layer.bias.sample_grad[i], # type: ignore
mode="max",
)
def test_sample_grads_layer_modules(self):
"""
tests that if `layer_modules` argument is specified for `SampleGradientWrapper`
that only per-sample gradients for the specified layers are calculated
"""
model = BasicModel_ConvNet_One_Conv()
inp = (20 * torch.randn(6, 1, 4, 4), 9 * torch.randn(6, 1, 4, 4))
# possible candidates for `layer_modules`, which are the modules whose
# parameters we want to compute sample grads for
layer_moduless = [[model.conv1], [model.fc1], [model.conv1, model.fc1]]
# hard coded all modules we want to check
all_modules = [model.conv1, model.fc1]
for layer_modules in layer_moduless:
# we will call the wrapper multiple times, so should reset each time
for module in all_modules:
_reset_sample_grads(module)
# compute sample grads
wrapper = SampleGradientWrapper(model, layer_modules)
wrapper.add_hooks()
apply_gradient_requirements(inp)
out = model(*inp)
wrapper.compute_param_sample_gradients(torch.sum(out), "sum")
for module in all_modules:
if module in layer_modules:
# If we calculated the sample grads for the layer, none
# of its parameters' `sample_grad` attributes` would be an int,
# since even though they were all set to 0 in beginning of loop
# computing sample grads would override that 0.
# So, check that we did calculate sample grads for the desired
# layers via the above checking approach.
for parameter in module.parameters():
assert not isinstance(parameter.sample_grad, int)
else:
# For the layers we do not want sample grads for, their
# `sample_grad` should still be 0, since they should not have been
# over-written.
for parameter in module.parameters():
assert parameter.sample_grad == 0
|
import argparse
import random
from typing import Optional
import captum._utils.models.linear_model.model as pytorch_model_module
import numpy as np
import sklearn.datasets as datasets
import torch
from tests.utils.test_linear_model import _evaluate
from torch.utils.data import DataLoader, TensorDataset
def sklearn_dataset_to_loaders(
data, train_prop=0.7, batch_size=64, num_workers=4, shuffle=False, one_hot=False
):
xs, ys = data
if one_hot and ys.dtype != float:
oh = np.zeros((ys.size, ys.max() + 1))
oh[np.arange(ys.size), ys] = 1
ys = oh
dataset = TensorDataset(torch.FloatTensor(xs), torch.FloatTensor(ys))
lens = [int(train_prop * len(xs))]
lens += [len(xs) - lens[0]]
train_dset, val_dset = torch.utils.data.random_split(dataset, lens)
train_loader = DataLoader(
train_dset,
batch_size=min(batch_size, lens[0]),
shuffle=shuffle,
num_workers=num_workers,
)
val_loader = DataLoader(
val_dset,
batch_size=min(batch_size, lens[1]),
num_workers=num_workers,
shuffle=False,
)
return train_loader, val_loader, xs.shape[1], xs.shape[0]
def compare_to_sk_learn(
max_epoch: int,
train_loader: DataLoader,
val_loader: DataLoader,
train_prop: float,
sklearn_model_type: str,
pytorch_model_type: str,
norm_type: Optional[str],
objective: str,
alpha: float,
init_scheme: str = "zeros",
):
if "LinearRegression" not in sklearn_model_type:
sklearn_classifier = getattr(pytorch_model_module, sklearn_model_type)(
alpha=alpha
)
else:
sklearn_classifier = getattr(pytorch_model_module, sklearn_model_type)()
pytorch_classifier = getattr(pytorch_model_module, pytorch_model_type)(
norm_type=args.norm_type,
)
sklearn_stats = sklearn_classifier.fit(
train_data=train_loader,
norm_input=args.norm_sklearn,
)
pytorch_stats = pytorch_classifier.fit(
train_data=train_loader,
max_epoch=max_epoch,
init_scheme=init_scheme,
alpha=alpha,
)
sklearn_stats.update(_evaluate(val_loader, sklearn_classifier))
pytorch_stats.update(_evaluate(val_loader, pytorch_classifier))
train_stats_pytorch = _evaluate(train_loader, pytorch_classifier)
train_stats_sklearn = _evaluate(train_loader, sklearn_classifier)
o_pytorch = {"l2": train_stats_pytorch["l2"]}
o_sklearn = {"l2": train_stats_sklearn["l2"]}
pytorch_h = pytorch_classifier.representation()
sklearn_h = sklearn_classifier.representation()
if objective == "ridge":
o_pytorch["l2_reg"] = alpha * pytorch_h.norm(p=2, dim=-1)
o_sklearn["l2_reg"] = alpha * sklearn_h.norm(p=2, dim=-1)
elif objective == "lasso":
o_pytorch["l1_reg"] = alpha * pytorch_h.norm(p=1, dim=-1)
o_sklearn["l1_reg"] = alpha * sklearn_h.norm(p=1, dim=-1)
rel_diff = (sum(o_sklearn.values()) - sum(o_pytorch.values())) / abs(
sum(o_sklearn.values())
)
return (
{
"objective_rel_diff": rel_diff.tolist(),
"objective_pytorch": o_pytorch,
"objective_sklearn": o_sklearn,
},
sklearn_stats,
pytorch_stats,
)
def main(args):
if args.seed:
torch.manual_seed(0)
random.seed(0)
assert args.norm_type in [None, "layer_norm", "batch_norm"]
print(
"dataset,num_samples,dimensionality,objective_diff,objective_pytorch,"
+ "objective_sklearn,pytorch_time,sklearn_time,pytorch_l2_val,sklearn_l2_val"
)
for dataset in args.datasets:
dataset_fn = getattr(datasets, dataset)
data = dataset_fn(return_X_y=True)
(
train_loader,
val_loader,
in_features,
num_samples,
) = sklearn_dataset_to_loaders(
data,
batch_size=args.batch_size,
num_workers=args.workers,
shuffle=args.shuffle,
one_hot=args.one_hot,
)
similarity, sklearn_stats, pytorch_stats = compare_to_sk_learn(
alpha=args.alpha,
max_epoch=args.max_epoch,
train_loader=train_loader,
val_loader=val_loader,
train_prop=args.training_prop,
pytorch_model_type=args.pytorch_model_type,
sklearn_model_type=args.sklearn_model_type,
norm_type=args.norm_type,
init_scheme=args.init_scheme,
objective=args.objective,
)
print(
f"{dataset},{num_samples},{in_features},{similarity['objective_rel_diff']},"
+ f"{similarity['objective_pytorch']},{similarity['objective_sklearn']},"
+ f"{pytorch_stats['train_time']},{sklearn_stats['train_time']},"
+ f"{pytorch_stats['l2']},{sklearn_stats['l2']}"
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="train & test linear model with SGD + compare to sklearn"
)
parser.add_argument(
"--norm_type",
type=str,
default=None,
)
parser.add_argument(
"--datasets",
type=str,
nargs="+",
default=[
"load_boston",
"load_breast_cancer",
"load_diabetes",
"fetch_california_housing",
],
)
parser.add_argument("--initial_lr", type=float, default=0.01)
parser.add_argument("--alpha", type=float, default=1.0)
parser.add_argument("--max_epoch", type=int, default=100)
parser.add_argument("--seed", type=int, default=None)
parser.add_argument("--shuffle", default=False, action="store_true")
parser.add_argument("--one_hot", default=False, action="store_true")
parser.add_argument("--batch_size", type=int, default=256)
parser.add_argument("--training_prop", type=float, default=0.7)
parser.add_argument("--workers", type=int, default=1)
parser.add_argument("--sklearn_model_type", type=str, default="Lasso")
parser.add_argument("--pytorch_model_type", type=str, default="SGDLasso")
parser.add_argument("--init_scheme", type=str, default="xavier")
parser.add_argument("--norm_sklearn", default=False, action="store_true")
parser.add_argument("--objective", type=str, default="lasso")
args = parser.parse_args()
main(args)
|
#!/usr/bin/env python3
from typing import cast, Tuple, Union
import numpy as np
import torch
from captum._utils.typing import Tensor
from captum.attr._core.gradient_shap import GradientShap
from captum.attr._core.integrated_gradients import IntegratedGradients
from numpy import ndarray
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicLinearModel, BasicModel2
from tests.helpers.classification_models import SoftmaxModel
class Test(BaseTest):
# This test reproduces some of the test cases from the original implementation
# https://github.com/slundberg/shap/
# explainers/test_gradient.py
def test_basic_multi_input(self) -> None:
batch_size = 10
x1 = torch.ones(batch_size, 3)
x2 = torch.ones(batch_size, 4)
inputs = (x1, x2)
batch_size_baselines = 20
baselines = (
torch.zeros(batch_size_baselines, 3),
torch.zeros(batch_size_baselines, 4),
)
model = BasicLinearModel()
model.eval()
model.zero_grad()
np.random.seed(0)
torch.manual_seed(0)
gradient_shap = GradientShap(model)
n_samples = 50
attributions, delta = cast(
Tuple[Tuple[Tensor, ...], Tensor],
gradient_shap.attribute(
inputs, baselines, n_samples=n_samples, return_convergence_delta=True
),
)
attributions_without_delta = gradient_shap.attribute((x1, x2), baselines)
_assert_attribution_delta(self, inputs, attributions, n_samples, delta)
# Compare with integrated gradients
ig = IntegratedGradients(model)
baselines = (torch.zeros(batch_size, 3), torch.zeros(batch_size, 4))
attributions_ig = ig.attribute(inputs, baselines=baselines)
self._assert_shap_ig_comparision(attributions, attributions_ig)
# compare attributions retrieved with and without
# `return_convergence_delta` flag
for attribution, attribution_without_delta in zip(
attributions, attributions_without_delta
):
assertTensorAlmostEqual(self, attribution, attribution_without_delta)
def test_basic_multi_input_wo_mutliplying_by_inputs(self) -> None:
batch_size = 10
x1 = torch.ones(batch_size, 3)
x2 = torch.ones(batch_size, 4)
inputs = (x1, x2)
batch_size_baselines = 20
baselines = (
torch.ones(batch_size_baselines, 3) + 2e-5,
torch.ones(batch_size_baselines, 4) + 2e-5,
)
model = BasicLinearModel()
model.eval()
model.zero_grad()
np.random.seed(0)
torch.manual_seed(0)
gradient_shap = GradientShap(model)
gradient_shap_wo_mutliplying_by_inputs = GradientShap(
model, multiply_by_inputs=False
)
n_samples = 50
attributions = cast(
Tuple[Tuple[Tensor, ...], Tensor],
gradient_shap.attribute(
inputs,
baselines,
n_samples=n_samples,
stdevs=0.0,
),
)
attributions_wo_mutliplying_by_inputs = cast(
Tuple[Tuple[Tensor, ...], Tensor],
gradient_shap_wo_mutliplying_by_inputs.attribute(
inputs,
baselines,
n_samples=n_samples,
stdevs=0.0,
),
)
assertTensorAlmostEqual(
self,
attributions_wo_mutliplying_by_inputs[0] * (x1 - baselines[0][0:1]),
attributions[0],
)
assertTensorAlmostEqual(
self,
attributions_wo_mutliplying_by_inputs[1] * (x2 - baselines[1][0:1]),
attributions[1],
)
def test_classification_baselines_as_function(self) -> None:
num_in = 40
inputs = torch.arange(0.0, num_in * 2.0).reshape(2, num_in)
def generate_baselines() -> Tensor:
return torch.arange(0.0, num_in * 4.0).reshape(4, num_in)
def generate_baselines_with_inputs(inputs: Tensor) -> Tensor:
inp_shape = cast(Tuple[int, ...], inputs.shape)
return torch.arange(0.0, inp_shape[1] * 2.0).reshape(2, inp_shape[1])
def generate_baselines_returns_array() -> ndarray:
return np.arange(0.0, num_in * 4.0).reshape(4, num_in)
# 10-class classification model
model = SoftmaxModel(num_in, 20, 10)
model.eval()
model.zero_grad()
gradient_shap = GradientShap(model)
n_samples = 10
attributions, delta = gradient_shap.attribute(
inputs,
baselines=generate_baselines,
target=torch.tensor(1),
n_samples=n_samples,
stdevs=0.009,
return_convergence_delta=True,
)
_assert_attribution_delta(self, (inputs,), (attributions,), n_samples, delta)
attributions, delta = gradient_shap.attribute(
inputs,
baselines=generate_baselines_with_inputs,
target=torch.tensor(1),
n_samples=n_samples,
stdevs=0.00001,
return_convergence_delta=True,
)
_assert_attribution_delta(self, (inputs,), (attributions,), n_samples, delta)
with self.assertRaises(AssertionError):
attributions, delta = gradient_shap.attribute( # type: ignore
inputs,
# Intentionally passing wrong type.
baselines=generate_baselines_returns_array,
target=torch.tensor(1),
n_samples=n_samples,
stdevs=0.00001,
return_convergence_delta=True,
)
def test_classification(self) -> None:
num_in = 40
inputs = torch.arange(0.0, num_in * 2.0).reshape(2, num_in)
baselines = torch.arange(0.0, num_in * 4.0).reshape(4, num_in)
target = torch.tensor(1)
# 10-class classification model
model = SoftmaxModel(num_in, 20, 10)
model.eval()
model.zero_grad()
gradient_shap = GradientShap(model)
n_samples = 10
attributions, delta = gradient_shap.attribute(
inputs,
baselines=baselines,
target=target,
n_samples=n_samples,
stdevs=0.009,
return_convergence_delta=True,
)
_assert_attribution_delta(self, (inputs,), (attributions,), n_samples, delta)
# try to call `compute_convergence_delta` externally
with self.assertRaises(AssertionError):
gradient_shap.compute_convergence_delta(
attributions, inputs, baselines, target=target
)
# now, let's expand target and choose random baselines from `baselines` tensor
rand_indices = np.random.choice(baselines.shape[0], inputs.shape[0]).tolist()
chosen_baselines = baselines[rand_indices]
target_extendes = torch.tensor([1, 1])
external_delta = gradient_shap.compute_convergence_delta(
attributions, chosen_baselines, inputs, target=target_extendes
)
_assert_delta(self, external_delta)
# Compare with integrated gradients
ig = IntegratedGradients(model)
baselines = torch.arange(0.0, num_in * 2.0).reshape(2, num_in)
attributions_ig = ig.attribute(inputs, baselines=baselines, target=target)
self._assert_shap_ig_comparision((attributions,), (attributions_ig,))
def test_basic_relu_multi_input(self) -> None:
model = BasicModel2()
input1 = torch.tensor([[3.0]])
input2 = torch.tensor([[1.0]], requires_grad=True)
baseline1 = torch.tensor([[0.0]])
baseline2 = torch.tensor([[0.0]])
inputs = (input1, input2)
baselines = (baseline1, baseline2)
gs = GradientShap(model)
n_samples = 20000
attributions, delta = cast(
Tuple[Tuple[Tensor, ...], Tensor],
gs.attribute(
inputs,
baselines=baselines,
n_samples=n_samples,
return_convergence_delta=True,
),
)
_assert_attribution_delta(
self, inputs, attributions, n_samples, delta, delta_thresh=0.008
)
ig = IntegratedGradients(model)
attributions_ig = ig.attribute(inputs, baselines=baselines)
self._assert_shap_ig_comparision(attributions, attributions_ig)
def _assert_shap_ig_comparision(
self, attributions1: Tuple[Tensor, ...], attributions2: Tuple[Tensor, ...]
) -> None:
for attribution1, attribution2 in zip(attributions1, attributions2):
for attr_row1, attr_row2 in zip(attribution1, attribution2):
assertTensorAlmostEqual(self, attr_row1, attr_row2, 0.05, "max")
def _assert_attribution_delta(
test: BaseTest,
inputs: Union[Tensor, Tuple[Tensor, ...]],
attributions: Union[Tensor, Tuple[Tensor, ...]],
n_samples: int,
delta: Tensor,
delta_thresh: Union[float, Tensor] = 0.0006,
is_layer: bool = False,
) -> None:
if not is_layer:
for input, attribution in zip(inputs, attributions):
test.assertEqual(attribution.shape, input.shape)
if isinstance(inputs, tuple):
bsz = inputs[0].shape[0]
else:
bsz = inputs.shape[0]
test.assertEqual([bsz * n_samples], list(delta.shape))
delta = torch.mean(delta.reshape(bsz, -1), dim=1)
_assert_delta(test, delta, delta_thresh)
def _assert_delta(
test: BaseTest, delta: Tensor, delta_thresh: Union[Tensor, float] = 0.0006
) -> None:
delta_condition = (delta.abs() < delta_thresh).all()
test.assertTrue(
delta_condition,
"Sum of SHAP values {} does"
" not match the difference of endpoints.".format(delta),
)
|
#!/usr/bin/env python3
from typing import Any, Callable, cast, Dict, Optional, Tuple, Type
import torch
from captum._utils.common import _format_additional_forward_args
from captum.attr._core.feature_permutation import FeaturePermutation
from captum.attr._core.integrated_gradients import IntegratedGradients
from captum.attr._core.lime import Lime
from captum.attr._core.noise_tunnel import NoiseTunnel
from captum.attr._utils.attribution import Attribution, InternalAttribution
from tests.attr.helpers.gen_test_utils import (
gen_test_name,
get_target_layer,
parse_test_config,
should_create_generated_test,
)
from tests.attr.helpers.test_config import config
from tests.helpers.basic import assertTensorTuplesAlmostEqual, BaseTest, deep_copy_args
from tests.helpers.basic_models import BasicModel_MultiLayer
from torch import Tensor
from torch.nn import Module
"""
Tests in this file are dynamically generated based on the config
defined in tests/attr/helpers/test_config.py. To add new test cases,
read the documentation in test_config.py and add cases based on the
schema described there.
"""
class TargetsMeta(type):
"""
Target tests created in TargetsMeta apply to any test case with targets being a
list or tensor.
Attribution of each example is computed independently with the appropriate target
and compared to the corresponding result of attributing to a batch with a tensor
/ list of targets.
"""
def __new__(cls, name: str, bases: Tuple, attrs: Dict):
for test_config in config:
(
algorithms,
model,
args,
layer,
noise_tunnel,
baseline_distr,
) = parse_test_config(test_config)
target_delta = (
test_config["target_delta"] if "target_delta" in test_config else 0.0001
)
if "target" not in args or not isinstance(args["target"], (list, Tensor)):
continue
for algorithm in algorithms:
# FeaturePermutation requires a batch of inputs
# so skipping tests
if issubclass(
algorithm, FeaturePermutation
) or not should_create_generated_test(algorithm):
continue
test_method = cls.make_single_target_test(
algorithm,
model,
layer,
args,
target_delta,
noise_tunnel,
baseline_distr,
)
test_name = gen_test_name(
"test_target",
cast(str, test_config["name"]),
algorithm,
noise_tunnel,
)
if test_name in attrs:
raise AssertionError(
"Trying to overwrite existing test with name: %r" % test_name
)
attrs[test_name] = test_method
return super(TargetsMeta, cls).__new__(cls, name, bases, attrs)
# Arguments are deep copied to ensure tests are independent and are not affected
# by any modifications within a previous test.
@classmethod
@deep_copy_args
def make_single_target_test(
cls,
algorithm: Type[Attribution],
model: Module,
layer: Optional[str],
args: Dict[str, Any],
target_delta: float,
noise_tunnel: bool,
baseline_distr: bool,
) -> Callable:
"""
This method creates a single target test for the given algorithm and parameters.
"""
target_layer = get_target_layer(model, layer) if layer is not None else None
# Obtains initial arguments to replace with each example
# individually.
original_inputs = args["inputs"]
original_targets = args["target"]
original_additional_forward_args = (
_format_additional_forward_args(args["additional_forward_args"])
if "additional_forward_args" in args
else None
)
num_examples = (
len(original_inputs)
if isinstance(original_inputs, Tensor)
else len(original_inputs[0])
)
replace_baselines = "baselines" in args and not baseline_distr
if replace_baselines:
original_baselines = args["baselines"]
def target_test_assert(self) -> None:
attr_method: Attribution
if target_layer:
internal_algorithm = cast(Type[InternalAttribution], algorithm)
attr_method = internal_algorithm(model, target_layer)
else:
attr_method = algorithm(model)
if noise_tunnel:
attr_method = NoiseTunnel(attr_method)
attributions_orig = attr_method.attribute(**args)
self.setUp()
for i in range(num_examples):
args["target"] = (
original_targets[i]
if len(original_targets) == num_examples
else original_targets
)
args["inputs"] = (
original_inputs[i : i + 1]
if isinstance(original_inputs, Tensor)
else tuple(
original_inp[i : i + 1] for original_inp in original_inputs
)
)
if original_additional_forward_args is not None:
args["additional_forward_args"] = tuple(
single_add_arg[i : i + 1]
if isinstance(single_add_arg, Tensor)
else single_add_arg
for single_add_arg in original_additional_forward_args
)
if replace_baselines:
if isinstance(original_inputs, Tensor):
args["baselines"] = original_baselines[i : i + 1]
elif isinstance(original_baselines, tuple):
args["baselines"] = tuple(
single_baseline[i : i + 1]
if isinstance(single_baseline, Tensor)
else single_baseline
for single_baseline in original_baselines
)
# Since Lime methods compute attributions for a batch
# sequentially, random seed should not be reset after
# each example after the first.
if not issubclass(algorithm, Lime):
self.setUp()
single_attr = attr_method.attribute(**args)
current_orig_attributions = (
attributions_orig[i : i + 1]
if isinstance(attributions_orig, Tensor)
else tuple(
single_attrib[i : i + 1] for single_attrib in attributions_orig
)
)
assertTensorTuplesAlmostEqual(
self,
current_orig_attributions,
single_attr,
delta=target_delta,
mode="max",
)
if (
not issubclass(algorithm, Lime)
and len(original_targets) == num_examples
):
# If original_targets contained multiple elements, then
# we also compare with setting targets to a list with
# a single element.
args["target"] = original_targets[i : i + 1]
self.setUp()
single_attr_target_list = attr_method.attribute(**args)
assertTensorTuplesAlmostEqual(
self,
current_orig_attributions,
single_attr_target_list,
delta=target_delta,
mode="max",
)
return target_test_assert
class TestTargets(BaseTest, metaclass=TargetsMeta):
def test_simple_target_missing_error(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.zeros((1, 3))
with self.assertRaises(AssertionError):
attr = IntegratedGradients(net)
attr.attribute(inp)
def test_multi_target_error(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.zeros((1, 3))
with self.assertRaises(AssertionError):
attr = IntegratedGradients(net)
attr.attribute(inp, additional_forward_args=(None, True), target=(1, 0))
|
#!/usr/bin/env python3
from typing import Any, cast
import torch
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._core.input_x_gradient import InputXGradient
from captum.attr._core.noise_tunnel import NoiseTunnel
from tests.attr.test_saliency import _get_basic_config, _get_multiargs_basic_config
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.classification_models import SoftmaxModel
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_input_x_gradient_test_basic_vanilla(self) -> None:
self._input_x_gradient_base_assert(*_get_basic_config())
def test_input_x_gradient_test_basic_smoothgrad(self) -> None:
self._input_x_gradient_base_assert(*_get_basic_config(), nt_type="smoothgrad")
def test_input_x_gradient_test_basic_vargrad(self) -> None:
self._input_x_gradient_base_assert(*_get_basic_config(), nt_type="vargrad")
def test_saliency_test_basic_multi_variable_vanilla(self) -> None:
self._input_x_gradient_base_assert(*_get_multiargs_basic_config())
def test_saliency_test_basic_multi_variable_smoothgrad(self) -> None:
self._input_x_gradient_base_assert(
*_get_multiargs_basic_config(), nt_type="smoothgrad"
)
def test_saliency_test_basic_multi_vargrad(self) -> None:
self._input_x_gradient_base_assert(
*_get_multiargs_basic_config(), nt_type="vargrad"
)
def test_input_x_gradient_classification_vanilla(self) -> None:
self._input_x_gradient_classification_assert()
def test_input_x_gradient_classification_smoothgrad(self) -> None:
self._input_x_gradient_classification_assert(nt_type="smoothgrad")
def test_input_x_gradient_classification_vargrad(self) -> None:
self._input_x_gradient_classification_assert(nt_type="vargrad")
def _input_x_gradient_base_assert(
self,
model: Module,
inputs: TensorOrTupleOfTensorsGeneric,
expected_grads: TensorOrTupleOfTensorsGeneric,
additional_forward_args: Any = None,
nt_type: str = "vanilla",
) -> None:
input_x_grad = InputXGradient(model)
self.assertTrue(input_x_grad.multiplies_by_inputs)
attributions: TensorOrTupleOfTensorsGeneric
if nt_type == "vanilla":
attributions = input_x_grad.attribute(
inputs,
additional_forward_args=additional_forward_args,
)
else:
nt = NoiseTunnel(input_x_grad)
attributions = nt.attribute(
inputs,
nt_type=nt_type,
nt_samples=10,
stdevs=0.0002,
additional_forward_args=additional_forward_args,
)
if isinstance(attributions, tuple):
for input, attribution, expected_grad in zip(
inputs, attributions, expected_grads
):
if nt_type == "vanilla":
self._assert_attribution(expected_grad, input, attribution)
self.assertEqual(input.shape, attribution.shape)
elif isinstance(attributions, Tensor):
if nt_type == "vanilla":
self._assert_attribution(expected_grads, inputs, attributions)
self.assertEqual(
cast(Tensor, inputs).shape, cast(Tensor, attributions).shape
)
def _assert_attribution(self, expected_grad, input, attribution):
assertTensorAlmostEqual(
self,
attribution,
(expected_grad * input),
delta=0.05,
mode="max",
)
def _input_x_gradient_classification_assert(self, nt_type: str = "vanilla") -> None:
num_in = 5
input = torch.tensor([[0.0, 1.0, 2.0, 3.0, 4.0]], requires_grad=True)
target = torch.tensor(5)
# 10-class classification model
model = SoftmaxModel(num_in, 20, 10)
input_x_grad = InputXGradient(model.forward)
if nt_type == "vanilla":
attributions = input_x_grad.attribute(input, target)
output = model(input)[:, target]
output.backward()
expected = input.grad * input
assertTensorAlmostEqual(self, attributions, expected, 0.00001, "max")
else:
nt = NoiseTunnel(input_x_grad)
attributions = nt.attribute(
input, nt_type=nt_type, nt_samples=10, stdevs=1.0, target=target
)
self.assertEqual(attributions.shape, input.shape)
|
#!/usr/bin/env python3
import torch
from captum.attr._core.noise_tunnel import SUPPORTED_NOISE_TUNNEL_TYPES
from captum.attr._utils.common import _validate_input, _validate_noise_tunnel_type
from tests.helpers.basic import BaseTest
class Test(BaseTest):
def test_validate_input(self) -> None:
with self.assertRaises(AssertionError):
_validate_input((torch.tensor([-1.0, 1.0]),), (torch.tensor([-2.0]),))
_validate_input(
(torch.tensor([-1.0, 1.0]),), (torch.tensor([-1.0, 1.0]),), n_steps=-1
)
_validate_input(
(torch.tensor([-1.0, 1.0]),),
(torch.tensor([-1.0, 1.0]),),
method="abcde",
)
_validate_input((torch.tensor([-1.0]),), (torch.tensor([-2.0]),))
_validate_input(
(torch.tensor([-1.0]),), (torch.tensor([-2.0]),), method="gausslegendre"
)
def test_validate_nt_type(self) -> None:
with self.assertRaises(AssertionError):
_validate_noise_tunnel_type("abc", SUPPORTED_NOISE_TUNNEL_TYPES)
_validate_noise_tunnel_type("smoothgrad", SUPPORTED_NOISE_TUNNEL_TYPES)
_validate_noise_tunnel_type("smoothgrad_sq", SUPPORTED_NOISE_TUNNEL_TYPES)
_validate_noise_tunnel_type("vargrad", SUPPORTED_NOISE_TUNNEL_TYPES)
|
#!/usr/bin/env python3
import unittest
from typing import Any
import torch
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._core.guided_grad_cam import GuidedGradCam
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModel_ConvNet_One_Conv
from torch.nn import Module
class Test(BaseTest):
def test_simple_input_conv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = 1.0 * torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
ex = [
[
[
[0.0, 0.0, 4.0, 4.0],
[0.0, 0.0, 12.0, 8.0],
[28.0, 84.0, 97.5, 65.0],
[28.0, 56.0, 65.0, 32.5],
]
]
]
self._guided_grad_cam_test_assert(net, net.relu1, inp, ex)
def test_simple_multi_input_conv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
ex = [
[
[
[14.5, 29.0, 38.0, 19.0],
[29.0, 58.0, 76.0, 38.0],
[65.0, 130.0, 148.0, 74.0],
[32.5, 65.0, 74.0, 37.0],
]
]
]
self._guided_grad_cam_test_assert(net, net.conv1, (inp, inp2), (ex, ex))
def test_simple_multi_input_relu_input(self) -> None:
net = BasicModel_ConvNet_One_Conv(inplace=True)
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
ex = [
[
[
[14.5, 29.0, 38.0, 19.0],
[29.0, 58.0, 76.0, 38.0],
[65.0, 130.0, 148.0, 74.0],
[32.5, 65.0, 74.0, 37.0],
]
]
]
self._guided_grad_cam_test_assert(
net, net.relu1, (inp, inp2), (ex, ex), attribute_to_layer_input=True
)
def test_simple_multi_input_conv_inplace(self) -> None:
net = BasicModel_ConvNet_One_Conv(inplace=True)
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
ex = [
[
[
[14.5, 29.0, 38.0, 19.0],
[29.0, 58.0, 76.0, 38.0],
[65.0, 130.0, 148.0, 74.0],
[32.5, 65.0, 74.0, 37.0],
]
]
]
self._guided_grad_cam_test_assert(net, net.conv1, (inp, inp2), (ex, ex))
def test_improper_dims_multi_input_conv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones(1)
ex = [
[
[
[14.5, 29.0, 38.0, 19.0],
[29.0, 58.0, 76.0, 38.0],
[65.0, 130.0, 148.0, 74.0],
[32.5, 65.0, 74.0, 37.0],
]
]
]
self._guided_grad_cam_test_assert(net, net.conv1, (inp, inp2), (ex, []))
def test_improper_method_multi_input_conv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones(1)
self._guided_grad_cam_test_assert(
net, net.conv1, (inp, inp2), ([], []), interpolate_mode="made_up_nonlinear"
)
def _guided_grad_cam_test_assert(
self,
model: Module,
target_layer: Module,
test_input: TensorOrTupleOfTensorsGeneric,
expected,
additional_input: Any = None,
interpolate_mode: str = "nearest",
attribute_to_layer_input: bool = False,
) -> None:
guided_gc = GuidedGradCam(model, target_layer)
self.assertFalse(guided_gc.multiplies_by_inputs)
attributions = guided_gc.attribute(
test_input,
target=0,
additional_forward_args=additional_input,
interpolate_mode=interpolate_mode,
attribute_to_layer_input=attribute_to_layer_input,
)
if isinstance(test_input, tuple):
for i in range(len(test_input)):
assertTensorAlmostEqual(
self,
attributions[i],
expected[i],
delta=0.01,
)
else:
assertTensorAlmostEqual(
self,
attributions,
expected,
delta=0.01,
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
from enum import Enum
from typing import Any, Callable, cast, Dict, Optional, Tuple, Type
import torch
from captum.attr._core.noise_tunnel import NoiseTunnel
from captum.attr._models.base import _set_deep_layer_value
from captum.attr._utils.attribution import Attribution, InternalAttribution
from tests.attr.helpers.gen_test_utils import (
gen_test_name,
get_target_layer,
parse_test_config,
should_create_generated_test,
)
from tests.attr.helpers.test_config import config
from tests.helpers.basic import BaseTest, deep_copy_args
from torch.nn import Module
"""
Tests in this file are dynamically generated based on the config
defined in tests/attr/helpers/test_config.py. To add new test cases,
read the documentation in test_config.py and add cases based on the
schema described there.
"""
class HookRemovalMode(Enum):
"""
Defines modes for hook removal tests:
`normal` - Verifies no hooks remain after running an attribution method
normally
`incorrect_target_or_neuron` - Verifies no hooks remain after an incorrect
target and neuron_selector are provided, which causes an assertion error
in the algorithm.
`invalid_module` - Verifies no hooks remain after an invalid module
is executed, which causes an assertion error in model execution.
"""
normal = 1
incorrect_target_or_neuron = 2
invalid_module = 3
class ErrorModule(Module):
def __init__(
self,
) -> None:
super().__init__()
self.relu = torch.nn.ReLU()
def forward(*args, **kwargs):
raise AssertionError("Raising error on execution")
class HookRemovalMeta(type):
"""
Attribution is computed either normally or with the changes based on the
mode, which cause an error. Once attribution is calculated, test verifies
that no forward, backward or forward pre hooks remain on any modules.
"""
def __new__(cls, name: str, bases: Tuple, attrs: Dict):
created_tests: Dict[Tuple[Type[Attribution], HookRemovalMode], bool] = {}
for test_config in config:
(
algorithms,
model,
args,
layer,
noise_tunnel,
_,
) = parse_test_config(test_config)
for algorithm in algorithms:
if not should_create_generated_test(algorithm):
continue
for mode in HookRemovalMode:
if mode is HookRemovalMode.invalid_module and layer is None:
continue
# Only one test per algorithm and mode is necessary
if (algorithm, mode) in created_tests:
continue
test_method = cls.make_single_hook_removal_test(
algorithm,
model,
layer,
args,
noise_tunnel,
mode,
)
test_name = gen_test_name(
"test_hook_removal_" + mode.name,
cast(str, test_config["name"]),
algorithm,
noise_tunnel,
)
if test_name in attrs:
raise AssertionError(
"Trying to overwrite existing test with name: %r"
% test_name
)
attrs[test_name] = test_method
created_tests[(algorithm, mode)] = True
return super(HookRemovalMeta, cls).__new__(cls, name, bases, attrs)
# Arguments are deep copied to ensure tests are independent and are not affected
# by any modifications within a previous test.
@classmethod
@deep_copy_args
def make_single_hook_removal_test(
cls,
algorithm: Type[Attribution],
model: Module,
layer: Optional[str],
args: Dict[str, Any],
noise_tunnel: bool,
mode: HookRemovalMode,
) -> Callable:
"""
This method creates a single hook removal test for the given
algorithm and parameters.
"""
def hook_removal_test_assert(self) -> None:
attr_method: Attribution
expect_error = False
if layer is not None:
if mode is HookRemovalMode.invalid_module:
expect_error = True
if isinstance(layer, list):
_set_deep_layer_value(model, layer[0], ErrorModule())
else:
_set_deep_layer_value(model, layer, ErrorModule())
target_layer = get_target_layer(model, layer)
internal_algorithm = cast(Type[InternalAttribution], algorithm)
attr_method = internal_algorithm(model, target_layer)
else:
attr_method = algorithm(model)
if noise_tunnel:
attr_method = NoiseTunnel(attr_method)
if mode is HookRemovalMode.incorrect_target_or_neuron:
# Overwriting target and neuron index arguments to
# incorrect values.
if "target" in args:
args["target"] = (9999,) * 20
expect_error = True
if "neuron_selector" in args:
args["neuron_selector"] = (9999,) * 20
expect_error = True
if expect_error:
with self.assertRaises(AssertionError):
attr_method.attribute(**args)
else:
attr_method.attribute(**args)
def check_leftover_hooks(module):
self.assertEqual(len(module._forward_hooks), 0)
self.assertEqual(len(module._backward_hooks), 0)
self.assertEqual(len(module._forward_pre_hooks), 0)
model.apply(check_leftover_hooks)
return hook_removal_test_assert
class TestHookRemoval(BaseTest, metaclass=HookRemovalMeta):
pass
|
#!/usr/bin/env python3
import functools
import inspect
from typing import Callable, Dict, Tuple
import torch
from captum._utils.gradient import _forward_layer_eval
from captum.attr import (
DeepLift,
DeepLiftShap,
FeatureAblation,
GradientShap,
InputXGradient,
IntegratedGradients,
LayerDeepLift,
LayerDeepLiftShap,
LayerFeatureAblation,
LayerGradientShap,
LayerGradientXActivation,
LayerIntegratedGradients,
)
from captum.attr._utils.input_layer_wrapper import ModelInputWrapper
from tests.helpers.basic import assertTensorTuplesAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel,
BasicModel_MultiLayer_TrueMultiInput,
MixedKwargsAndArgsModule,
)
layer_methods_to_test_with_equiv = [
# layer_method, equiv_method, whether or not to use multiple layers
(LayerIntegratedGradients, IntegratedGradients, [True, False]),
(LayerGradientXActivation, InputXGradient, [True, False]),
(LayerFeatureAblation, FeatureAblation, [False]),
(LayerDeepLift, DeepLift, [False]),
(LayerDeepLiftShap, DeepLiftShap, [False]),
(LayerGradientShap, GradientShap, [False]),
# TODO: add other algorithms here
]
class InputLayerMeta(type):
def __new__(cls, name: str, bases: Tuple, attrs: Dict):
for (
layer_method,
equiv_method,
multi_layers,
) in layer_methods_to_test_with_equiv:
for multi_layer in multi_layers:
test_name = (
f"test_{layer_method.__name__}"
+ f"_{equiv_method.__name__}_{multi_layer}"
)
attrs[
test_name
] = lambda self: self.layer_method_with_input_layer_patches(
layer_method, equiv_method, multi_layer
)
return super(InputLayerMeta, cls).__new__(cls, name, bases, attrs)
class TestInputLayerWrapper(BaseTest, metaclass=InputLayerMeta):
def test_forward_layer_eval_on_mixed_args_kwargs_module(self) -> None:
x = torch.randn(10, 5)
y = torch.randn(10, 5)
model = MixedKwargsAndArgsModule()
self.forward_eval_layer_with_inputs_helper(model, {"x": x})
self.forward_eval_layer_with_inputs_helper(model, {"x": x, "y": y})
def layer_method_with_input_layer_patches(
self,
layer_method_class: Callable,
equiv_method_class: Callable,
multi_layer: bool,
) -> None:
model = BasicModel_MultiLayer_TrueMultiInput() if multi_layer else BasicModel()
input_names = ["x1", "x2", "x3", "x4"] if multi_layer else ["input"]
model = ModelInputWrapper(model)
layers = [model.input_maps[inp] for inp in input_names]
layer_method = layer_method_class(
model, layer=layers if multi_layer else layers[0]
)
equivalent_method = equiv_method_class(model)
inputs = tuple(torch.rand(5, 3) for _ in input_names)
baseline = tuple(torch.zeros(5, 3) for _ in input_names)
args = inspect.getfullargspec(equivalent_method.attribute.__wrapped__).args
args_to_use = [inputs]
if "baselines" in args:
args_to_use += [baseline]
a1 = layer_method.attribute(*args_to_use, target=0)
a2 = layer_method.attribute(
*args_to_use, target=0, attribute_to_layer_input=True
)
real_attributions = equivalent_method.attribute(*args_to_use, target=0)
if not isinstance(a1, tuple):
a1 = (a1,)
a2 = (a2,)
if not isinstance(real_attributions, tuple):
real_attributions = (real_attributions,)
assertTensorTuplesAlmostEqual(self, a1, a2)
assertTensorTuplesAlmostEqual(self, a1, real_attributions)
def forward_eval_layer_with_inputs_helper(self, model, inputs_to_test):
# hard coding for simplicity
# 0 if using args, 1 if using kwargs
# => no 0s after first 1 (left to right)
#
# used to test utilization of args/kwargs
use_args_or_kwargs = [
[[0], [1]],
[
[0, 0],
[0, 1],
[1, 1],
],
]
model = ModelInputWrapper(model)
def forward_func(*args, args_or_kwargs=None):
# convert to args or kwargs to test *args and **kwargs wrapping behavior
new_args = []
new_kwargs = {}
for args_or_kwarg, name, inp in zip(
args_or_kwargs, inputs_to_test.keys(), args
):
if args_or_kwarg:
new_kwargs[name] = inp
else:
new_args.append(inp)
return model(*new_args, **new_kwargs)
for args_or_kwargs in use_args_or_kwargs[len(inputs_to_test) - 1]:
with self.subTest(args_or_kwargs=args_or_kwargs):
inputs = _forward_layer_eval(
functools.partial(forward_func, args_or_kwargs=args_or_kwargs),
inputs=tuple(inputs_to_test.values()),
layer=[model.input_maps[name] for name in inputs_to_test.keys()],
)
inputs_with_attrib_to_inp = _forward_layer_eval(
functools.partial(forward_func, args_or_kwargs=args_or_kwargs),
inputs=tuple(inputs_to_test.values()),
layer=[model.input_maps[name] for name in inputs_to_test.keys()],
attribute_to_layer_input=True,
)
for i1, i2, i3 in zip(
inputs, inputs_with_attrib_to_inp, inputs_to_test.values()
):
self.assertTrue((i1[0] == i2[0]).all())
self.assertTrue((i1[0] == i3).all())
|
#!/usr/bin/env python3
from typing import Union
import torch
from captum._utils.typing import TargetType
from captum.attr._core.deep_lift import DeepLift, DeepLiftShap
from captum.attr._core.integrated_gradients import IntegratedGradients
from tests.helpers.basic import assertAttributionComparision, BaseTest
from tests.helpers.basic_models import (
BasicModel_ConvNet,
BasicModel_ConvNet_MaxPool1d,
BasicModel_ConvNet_MaxPool3d,
)
from tests.helpers.classification_models import (
SigmoidDeepLiftModel,
SoftmaxDeepLiftModel,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_sigmoid_classification(self) -> None:
num_in = 20
input = torch.arange(0.0, num_in * 1.0, requires_grad=True).unsqueeze(0)
baseline = 0 * input
target = torch.tensor(0)
# TODO add test cases for multiple different layers
model = SigmoidDeepLiftModel(num_in, 5, 1)
dl = DeepLift(model)
model.zero_grad()
attributions, delta = dl.attribute(
input, baseline, target=target, return_convergence_delta=True
)
self._assert_attributions(model, attributions, input, baseline, delta, target)
# compare with integrated gradients
ig = IntegratedGradients(model)
attributions_ig = ig.attribute(input, baseline, target=target)
assertAttributionComparision(self, (attributions,), (attributions_ig,))
def test_softmax_classification_zero_baseline(self) -> None:
num_in = 20
input = torch.arange(0.0, num_in * 1.0, requires_grad=True).unsqueeze(0)
baselines = 0.0
model = SoftmaxDeepLiftModel(num_in, 20, 10)
dl = DeepLift(model)
self.softmax_classification(model, dl, input, baselines, torch.tensor(2))
def test_softmax_classification_batch_zero_baseline(self) -> None:
num_in = 40
input = torch.arange(0.0, num_in * 3.0, requires_grad=True).reshape(3, num_in)
baselines = 0
model = SoftmaxDeepLiftModel(num_in, 20, 10)
dl = DeepLift(model)
self.softmax_classification(
model, dl, input, baselines, torch.tensor([2, 2, 2])
)
def test_softmax_classification_batch_multi_target(self) -> None:
num_in = 40
inputs = torch.arange(0.0, num_in * 3.0, requires_grad=True).reshape(3, num_in)
baselines = torch.arange(1.0, num_in + 1).reshape(1, num_in)
model = SoftmaxDeepLiftModel(num_in, 20, 10)
dl = DeepLift(model)
self.softmax_classification(
model, dl, inputs, baselines, torch.tensor([2, 2, 2])
)
def test_softmax_classification_multi_baseline(self) -> None:
num_in = 40
input = torch.arange(0.0, num_in * 1.0, requires_grad=True).unsqueeze(0)
baselines = torch.randn(5, 40)
model = SoftmaxDeepLiftModel(num_in, 20, 10)
dl = DeepLiftShap(model)
self.softmax_classification(model, dl, input, baselines, torch.tensor(2))
def test_softmax_classification_batch_multi_baseline(self) -> None:
num_in = 40
input = torch.arange(0.0, num_in * 2.0, requires_grad=True).reshape(2, num_in)
baselines = torch.randn(5, 40)
model = SoftmaxDeepLiftModel(num_in, 20, 10)
dl = DeepLiftShap(model)
self.softmax_classification(model, dl, input, baselines, torch.tensor(2))
def test_convnet_with_maxpool3d(self) -> None:
input = 100 * torch.randn(2, 1, 10, 10, 10, requires_grad=True)
baseline = 20 * torch.randn(2, 1, 10, 10, 10)
model = BasicModel_ConvNet_MaxPool3d()
dl = DeepLift(model)
self.softmax_classification(model, dl, input, baseline, torch.tensor(2))
def test_convnet_with_maxpool3d_large_baselines(self) -> None:
input = 100 * torch.randn(2, 1, 10, 10, 10, requires_grad=True)
baseline = 600 * torch.randn(2, 1, 10, 10, 10)
model = BasicModel_ConvNet_MaxPool3d()
dl = DeepLift(model)
self.softmax_classification(model, dl, input, baseline, torch.tensor(2))
def test_convnet_with_maxpool2d(self) -> None:
input = 100 * torch.randn(2, 1, 10, 10, requires_grad=True)
baseline = 20 * torch.randn(2, 1, 10, 10)
model = BasicModel_ConvNet()
dl = DeepLift(model)
self.softmax_classification(model, dl, input, baseline, torch.tensor(2))
def test_convnet_with_maxpool2d_large_baselines(self) -> None:
input = 100 * torch.randn(2, 1, 10, 10, requires_grad=True)
baseline = 500 * torch.randn(2, 1, 10, 10)
model = BasicModel_ConvNet()
dl = DeepLift(model)
self.softmax_classification(model, dl, input, baseline, torch.tensor(2))
def test_convnet_with_maxpool1d(self) -> None:
input = 100 * torch.randn(2, 1, 10, requires_grad=True)
baseline = 20 * torch.randn(2, 1, 10)
model = BasicModel_ConvNet_MaxPool1d()
dl = DeepLift(model)
self.softmax_classification(model, dl, input, baseline, torch.tensor(2))
def test_convnet_with_maxpool1d_large_baselines(self) -> None:
input = 100 * torch.randn(2, 1, 10, requires_grad=True)
baseline = 500 * torch.randn(2, 1, 10)
model = BasicModel_ConvNet_MaxPool1d()
dl = DeepLift(model)
self.softmax_classification(model, dl, input, baseline, torch.tensor(2))
def softmax_classification(
self,
model: Module,
attr_method: Union[DeepLift, DeepLiftShap],
input: Tensor,
baselines,
target: TargetType,
) -> None:
# TODO add test cases for multiple different layers
model.zero_grad()
attributions, delta = attr_method.attribute(
input, baselines=baselines, target=target, return_convergence_delta=True
)
self._assert_attributions(model, attributions, input, baselines, delta, target)
target2 = torch.tensor(1)
attributions, delta = attr_method.attribute(
input, baselines=baselines, target=target2, return_convergence_delta=True
)
self._assert_attributions(model, attributions, input, baselines, delta, target2)
def _assert_attributions(
self,
model: Module,
attributions: Tensor,
inputs: Tensor,
baselines: Union[Tensor, int, float],
delta: Tensor,
target: TargetType = None,
) -> None:
self.assertEqual(inputs.shape, attributions.shape)
delta_condition = (delta.abs() < 0.003).all()
self.assertTrue(
delta_condition,
"The sum of attribution values {} is not "
"nearly equal to the difference between the endpoint for "
"some samples".format(delta),
)
# compare with integrated gradients
if isinstance(baselines, (int, float)) or inputs.shape == baselines.shape:
ig = IntegratedGradients(model)
attributions_ig = ig.attribute(inputs, baselines=baselines, target=target)
assertAttributionComparision(self, attributions, attributions_ig)
|
#!/usr/bin/env python3
from typing import List, Tuple
import torch
from captum.attr._core.feature_permutation import _permute_feature, FeaturePermutation
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModelWithSparseInputs
from torch import Tensor
class Test(BaseTest):
def _check_features_are_permuted(
self, inp: Tensor, perm_inp: Tensor, mask: Tensor
) -> None:
permuted_features = mask.expand_as(inp[0])
unpermuted_features = permuted_features.bitwise_not()
self.assertTrue(inp.dtype == perm_inp.dtype)
self.assertTrue(inp.shape == perm_inp.shape)
self.assertTrue(
(inp[:, permuted_features] != perm_inp[:, permuted_features]).any()
)
self.assertTrue(
(inp[:, unpermuted_features] == perm_inp[:, unpermuted_features]).all()
)
def _check_perm_fn_with_mask(self, inp: Tensor, mask: Tensor) -> None:
perm_inp = _permute_feature(inp, mask)
self._check_features_are_permuted(inp, perm_inp, mask)
def test_perm_fn_single_feature(self) -> None:
batch_size = 2
sizes_to_test: List[Tuple[int, ...]] = [(10,), (4, 5), (3, 4, 5)]
for inp_size in sizes_to_test:
inp = torch.randn((batch_size,) + inp_size)
flat_mask = torch.zeros_like(inp[0]).flatten().bool()
num_features = inp.numel() // batch_size
for i in range(num_features):
flat_mask[i] = 1
self._check_perm_fn_with_mask(inp, flat_mask.view_as(inp[0]))
flat_mask[i] = 0
def test_perm_fn_broadcastable_masks(self) -> None:
batch_size = 5
inp_size = (3, 20, 30)
inp = torch.randn((batch_size,) + inp_size)
# To be broadcastable dimensions have
# match from end to beginning, by equalling 1 or the dim.
#
# If a dimension is missing then it must be the
# last dim provided (from right to left). The missing
# dimensions are implied to be = 1
#
# Here I write them explicitly for clarity
mask_sizes: List[Tuple[int, ...]] = [
# dims = 1
(1, 20, 30),
(3, 1, 30),
(3, 20, 1),
(1, 1, 30),
(1, 20, 1),
# missing
(1,), # empty set (all features)
(30,),
(20, 30),
(3, 20, 30),
]
for mask_size in mask_sizes:
mask = torch.randint(0, 2, mask_size).bool()
self.assertTrue(mask.shape == mask_size)
self._check_perm_fn_with_mask(inp, mask)
def test_single_input(self) -> None:
batch_size = 2
input_size = (6,)
constant_value = 10000
def forward_func(x: Tensor) -> Tensor:
return x.sum(dim=-1)
feature_importance = FeaturePermutation(forward_func=forward_func)
inp = torch.randn((batch_size,) + input_size)
inp[:, 0] = constant_value
zeros = torch.zeros_like(inp[:, 0])
attribs = feature_importance.attribute(inp)
self.assertTrue(attribs.squeeze(0).size() == (batch_size,) + input_size)
assertTensorAlmostEqual(self, attribs[:, 0], zeros, delta=0.05, mode="max")
self.assertTrue((attribs[:, 1 : input_size[0]].abs() > 0).all())
def test_multi_input(self) -> None:
batch_size = 20
inp1_size = (5, 2)
inp2_size = (5, 3)
labels = torch.randn(batch_size)
def forward_func(*x: Tensor) -> Tensor:
y = torch.zeros(x[0].shape[0:2])
for xx in x:
y += xx[:, :, 0] * xx[:, :, 1]
y = y.sum(dim=-1)
return torch.mean((y - labels) ** 2)
feature_importance = FeaturePermutation(forward_func=forward_func)
inp = (
torch.randn((batch_size,) + inp1_size),
torch.randn((batch_size,) + inp2_size),
)
feature_mask = (
torch.arange(inp[0][0].numel()).view_as(inp[0][0]).unsqueeze(0),
torch.arange(inp[1][0].numel()).view_as(inp[1][0]).unsqueeze(0),
)
inp[1][:, :, 1] = 4
attribs = feature_importance.attribute(inp, feature_mask=feature_mask)
self.assertTrue(isinstance(attribs, tuple))
self.assertTrue(len(attribs) == 2)
self.assertTrue(attribs[0].squeeze(0).size() == inp1_size)
self.assertTrue(attribs[1].squeeze(0).size() == inp2_size)
self.assertTrue((attribs[1][:, :, 1] == 0).all())
self.assertTrue((attribs[1][:, :, 2] == 0).all())
self.assertTrue((attribs[0] != 0).all())
self.assertTrue((attribs[1][:, :, 0] != 0).all())
def test_mulitple_perturbations_per_eval(self) -> None:
perturbations_per_eval = 4
batch_size = 2
input_size = (4,)
inp = torch.randn((batch_size,) + input_size)
def forward_func(x):
return 1 - x
target = 1
feature_importance = FeaturePermutation(forward_func=forward_func)
attribs = feature_importance.attribute(
inp, perturbations_per_eval=perturbations_per_eval, target=target
)
self.assertTrue(attribs.size() == (batch_size,) + input_size)
for i in range(inp.size(1)):
if i == target:
continue
assertTensorAlmostEqual(
self, attribs[:, i], torch.zeros_like(attribs[:, i])
)
y = forward_func(inp)
actual_diff = torch.stack([(y[0] - y[1])[target], (y[1] - y[0])[target]])
assertTensorAlmostEqual(self, attribs[:, target], actual_diff)
def test_broadcastable_masks(self) -> None:
# integration test to ensure that
# permutation function works with custom masks
def forward_func(x: Tensor) -> Tensor:
return x.view(x.shape[0], -1).sum(dim=-1)
batch_size = 2
inp = torch.randn((batch_size,) + (3, 4, 4))
feature_importance = FeaturePermutation(forward_func=forward_func)
masks = [
torch.tensor([0]),
torch.tensor([[0, 1, 2, 3]]),
torch.tensor([[[0, 1, 2, 3], [3, 3, 4, 5], [6, 6, 4, 6], [7, 8, 9, 10]]]),
]
for mask in masks:
attribs = feature_importance.attribute(inp, feature_mask=mask)
self.assertTrue(attribs is not None)
self.assertTrue(attribs.shape == inp.shape)
fm = mask.expand_as(inp[0])
features = set(mask.flatten())
for feature in features:
m = (fm == feature).bool()
attribs_for_feature = attribs[:, m]
assertTensorAlmostEqual(
self,
attribs_for_feature[0],
-attribs_for_feature[1],
delta=0.05,
mode="max",
)
def test_empty_sparse_features(self) -> None:
model = BasicModelWithSparseInputs()
inp1 = torch.tensor([[1.0, -2.0, 3.0], [2.0, -1.0, 3.0]])
inp2 = torch.tensor([])
# test empty sparse tensor
feature_importance = FeaturePermutation(model)
attr1, attr2 = feature_importance.attribute((inp1, inp2))
self.assertEqual(attr1.shape, (1, 3))
self.assertEqual(attr2.shape, (1,))
def test_sparse_features(self) -> None:
model = BasicModelWithSparseInputs()
inp1 = torch.tensor([[1.0, -2.0, 3.0], [2.0, -1.0, 3.0]])
# Length of sparse index list may not match # of examples
inp2 = torch.tensor([1, 7, 2, 4, 5, 3, 6])
feature_importance = FeaturePermutation(model)
total_attr1, total_attr2 = feature_importance.attribute((inp1, inp2))
for _ in range(50):
attr1, attr2 = feature_importance.attribute((inp1, inp2))
total_attr1 += attr1
total_attr2 += attr2
total_attr1 /= 50
total_attr2 /= 50
self.assertEqual(total_attr2.shape, (1,))
assertTensorAlmostEqual(self, total_attr1, torch.zeros_like(total_attr1))
assertTensorAlmostEqual(self, total_attr2, [-6.0], delta=0.2)
|
#!/usr/bin/env python3
import torch
from captum.attr import ClassSummarizer, CommonStats
from tests.helpers.basic import BaseTest
class Test(BaseTest):
def class_test(self, data, classes, x_sizes):
summarizer = ClassSummarizer(stats=CommonStats())
for x, y in data:
summarizer.update(x, y)
summ = summarizer.summary
self.assertIsNotNone(summ)
self.assertIsInstance(summ, list)
for s, size in zip(summ, x_sizes):
self.assertIsInstance(s, dict)
for key in s:
self.assertEqual(s[key].size(), size)
self.assertIsNotNone(summarizer.class_summaries)
all_classes = torch.zeros(len(classes))
class_summaries = summarizer.class_summaries
all_keys = set(class_summaries.keys())
for i, clazz in enumerate(classes):
self.assertTrue(clazz in class_summaries)
all_keys.remove(clazz)
all_classes[i] = 1
summ = class_summaries[clazz]
self.assertIsNotNone(summ)
self.assertIsInstance(summ, list)
for s, size in zip(summ, x_sizes):
self.assertIsInstance(s, dict)
for key in s:
self.assertEqual(s[key].size(), size)
self.assertEqual(len(all_keys), 0)
self.assertEqual(all_classes.sum(), len(classes))
def test_classes(self):
sizes_to_test = [
# ((1,),),
((3, 2, 10, 3), (1,)),
# ((20,),),
]
list_of_classes = [
list(range(100)),
["%d" % i for i in range(100)],
list(range(300, 400)),
]
for batch_size in [None, 1, 4]:
for sizes, classes in zip(sizes_to_test, list_of_classes):
def create_batch_labels(batch_idx):
if batch_size is None:
# batch_size = 1
return classes[batch_idx]
return classes[
batch_idx * batch_size : (batch_idx + 1) * batch_size
]
bs = 1 if batch_size is None else batch_size
num_batches = len(classes) // bs
sizes_plus_batch = tuple((bs,) + si for si in sizes)
data = [
(
tuple(torch.randn(si) for si in sizes_plus_batch),
create_batch_labels(batch_idx),
)
for batch_idx in range(num_batches)
]
with self.subTest(
batch_size=batch_size, sizes=sizes_plus_batch, classes=classes
):
self.class_test(data, classes, sizes)
def test_no_class(self) -> None:
size = (30, 20)
summarizer = ClassSummarizer(stats=CommonStats())
for _ in range(10):
x = torch.randn(size)
summarizer.update(x)
summ = summarizer.summary
self.assertIsNotNone(summ)
self.assertIsInstance(summ, dict)
for key in summ:
self.assertTrue(summ[key].size() == size)
self.assertIsNotNone(summarizer.class_summaries)
self.assertIsInstance(summarizer.class_summaries, dict)
self.assertEqual(len(summarizer.class_summaries), 0)
def test_single_label(self) -> None:
size = (4, 3, 2, 1)
data = torch.randn((100,) + size)
single_labels = [1, "apple"]
for label in single_labels:
summarizer = ClassSummarizer(stats=CommonStats())
summarizer.update(data, label)
summ1 = summarizer.summary
summ2 = summarizer.class_summaries
self.assertIsNotNone(summ1)
self.assertIsNotNone(summ2)
self.assertIsInstance(summ1, list)
self.assertTrue(len(summ1) == 1)
self.assertIsInstance(summ2, dict)
self.assertTrue(label in summ2)
self.assertTrue(len(summ1) == len(summ2[label]))
for key in summ1[0].keys():
self.assertTrue((summ1[0][key] == summ2[label][0][key]).all())
|
#!/usr/bin/env python3
import torch
from captum.attr import CommonStats, Summarizer
from tests.helpers.basic import BaseTest
class Test(BaseTest):
def test_single_input(self) -> None:
size = (2, 3)
summarizer = Summarizer(stats=CommonStats())
for _ in range(10):
attrs = torch.randn(size)
summarizer.update(attrs)
summ = summarizer.summary
self.assertIsNotNone(summ)
self.assertTrue(isinstance(summ, dict))
for k in summ:
self.assertTrue(summ[k].size() == size)
def test_multi_input(self) -> None:
size1 = (10, 5, 5)
size2 = (3, 5)
summarizer = Summarizer(stats=CommonStats())
for _ in range(10):
a1 = torch.randn(size1)
a2 = torch.randn(size2)
summarizer.update((a1, a2))
summ = summarizer.summary
self.assertIsNotNone(summ)
self.assertTrue(len(summ) == 2)
self.assertTrue(isinstance(summ[0], dict))
self.assertTrue(isinstance(summ[1], dict))
for k in summ[0]:
self.assertTrue(summ[0][k].size() == size1)
self.assertTrue(summ[1][k].size() == size2)
|
#!/usr/bin/env python3
import copy
import os
from enum import Enum
from typing import Any, Callable, cast, Dict, Optional, Tuple, Type
import torch
import torch.distributed as dist
from captum.attr._core.guided_grad_cam import GuidedGradCam
from captum.attr._core.layer.layer_deep_lift import LayerDeepLift, LayerDeepLiftShap
from captum.attr._core.layer.layer_lrp import LayerLRP
from captum.attr._core.neuron.neuron_deep_lift import NeuronDeepLift, NeuronDeepLiftShap
from captum.attr._core.neuron.neuron_guided_backprop_deconvnet import (
NeuronDeconvolution,
NeuronGuidedBackprop,
)
from captum.attr._core.noise_tunnel import NoiseTunnel
from captum.attr._utils.attribution import Attribution, InternalAttribution
from tests.attr.helpers.gen_test_utils import (
gen_test_name,
get_target_layer,
parse_test_config,
should_create_generated_test,
)
from tests.attr.helpers.test_config import config
from tests.helpers.basic import assertTensorTuplesAlmostEqual, BaseTest, deep_copy_args
from torch import Tensor
from torch.nn import Module
"""
Tests in this file are dynamically generated based on the config
defined in tests/attr/helpers/test_config.py. To add new test cases,
read the documentation in test_config.py and add cases based on the
schema described there.
"""
# Distributed Data Parallel env setup
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = "29500"
dist.init_process_group(backend="gloo", rank=0, world_size=1)
class DataParallelCompareMode(Enum):
"""
Defines modes for DataParallel tests:
`cpu_cuda` - Compares results when running attribution method on CPU vs GPU / CUDA
`data_parallel_default` - Compares results when running attribution method on GPU
with DataParallel
`data_parallel_alt_dev_ids` - Compares results when running attribution method on
GPU with DataParallel, but with an alternate device ID ordering (not default)
"""
cpu_cuda = 1
data_parallel_default = 2
data_parallel_alt_dev_ids = 3
dist_data_parallel = 4
class DataParallelMeta(type):
def __new__(cls, name: str, bases: Tuple, attrs: Dict):
for test_config in config:
(
algorithms,
model,
args,
layer,
noise_tunnel,
baseline_distr,
) = parse_test_config(test_config)
dp_delta = test_config["dp_delta"] if "dp_delta" in test_config else 0.0001
for algorithm in algorithms:
if not should_create_generated_test(algorithm):
continue
for mode in DataParallelCompareMode:
# Creates test case corresponding to each algorithm and
# DataParallelCompareMode
test_method = cls.make_single_dp_test(
algorithm,
model,
layer,
args,
dp_delta,
noise_tunnel,
baseline_distr,
mode,
)
test_name = gen_test_name(
"test_dp_" + mode.name,
cast(str, test_config["name"]),
algorithm,
noise_tunnel,
)
if test_name in attrs:
raise AssertionError(
"Trying to overwrite existing test with name: %r"
% test_name
)
attrs[test_name] = test_method
return super(DataParallelMeta, cls).__new__(cls, name, bases, attrs)
# Arguments are deep copied to ensure tests are independent and are not affected
# by any modifications within a previous test.
@classmethod
@deep_copy_args
def make_single_dp_test(
cls,
algorithm: Type[Attribution],
model: Module,
target_layer: Optional[str],
args: Dict[str, Any],
dp_delta: float,
noise_tunnel: bool,
baseline_distr: bool,
mode: DataParallelCompareMode,
) -> Callable:
"""
This method creates a single Data Parallel / GPU test for the given
algorithm and parameters.
"""
def data_parallel_test_assert(self) -> None:
# Construct cuda_args, moving all tensor inputs in args to CUDA device
cuda_args = {}
for key in args:
if isinstance(args[key], Tensor):
cuda_args[key] = args[key].cuda()
elif isinstance(args[key], tuple):
cuda_args[key] = tuple(
elem.cuda() if isinstance(elem, Tensor) else elem
for elem in args[key]
)
else:
cuda_args[key] = args[key]
alt_device_ids = None
cuda_model = copy.deepcopy(model).cuda()
# Initialize models based on DataParallelCompareMode
if mode is DataParallelCompareMode.cpu_cuda:
model_1, model_2 = model, cuda_model
args_1, args_2 = args, cuda_args
elif mode is DataParallelCompareMode.data_parallel_default:
model_1, model_2 = (
cuda_model,
torch.nn.parallel.DataParallel(cuda_model),
)
args_1, args_2 = cuda_args, cuda_args
elif mode is DataParallelCompareMode.data_parallel_alt_dev_ids:
alt_device_ids = [0] + [
x for x in range(torch.cuda.device_count() - 1, 0, -1)
]
model_1, model_2 = (
cuda_model,
torch.nn.parallel.DataParallel(
cuda_model, device_ids=alt_device_ids
),
)
args_1, args_2 = cuda_args, cuda_args
elif mode is DataParallelCompareMode.dist_data_parallel:
model_1, model_2 = (
cuda_model,
torch.nn.parallel.DistributedDataParallel(
cuda_model, device_ids=[0], output_device=0
),
)
args_1, args_2 = cuda_args, cuda_args
else:
raise AssertionError("DataParallel compare mode type is not valid.")
attr_method_1: Attribution
attr_method_2: Attribution
if target_layer:
internal_algorithm = cast(Type[InternalAttribution], algorithm)
attr_method_1 = internal_algorithm(
model_1, get_target_layer(model_1, target_layer)
)
# cuda_model is used to obtain target_layer since DataParallel
# adds additional wrapper.
# model_2 is always either the CUDA model itself or DataParallel
if alt_device_ids is None:
attr_method_2 = internal_algorithm(
model_2, get_target_layer(cuda_model, target_layer)
)
else:
# LayerDeepLift and LayerDeepLiftShap do not take device ids
# as a parameter, since they must always have the DataParallel
# model object directly.
# Some neuron methods and GuidedGradCAM also require the
# model and cannot take a forward function.
if issubclass(
internal_algorithm,
(
LayerDeepLift,
LayerDeepLiftShap,
LayerLRP,
NeuronDeepLift,
NeuronDeepLiftShap,
NeuronDeconvolution,
NeuronGuidedBackprop,
GuidedGradCam,
),
):
attr_method_2 = internal_algorithm(
model_2,
get_target_layer(cuda_model, target_layer), # type: ignore
)
else:
attr_method_2 = internal_algorithm(
model_2.forward,
get_target_layer(cuda_model, target_layer),
device_ids=alt_device_ids,
)
else:
attr_method_1 = algorithm(model_1)
attr_method_2 = algorithm(model_2)
if noise_tunnel:
attr_method_1 = NoiseTunnel(attr_method_1)
attr_method_2 = NoiseTunnel(attr_method_2)
if attr_method_1.has_convergence_delta():
attributions_1, delta_1 = attr_method_1.attribute(
return_convergence_delta=True, **args_1
)
self.setUp()
attributions_2, delta_2 = attr_method_2.attribute(
return_convergence_delta=True, **args_2
)
if isinstance(attributions_1, list):
for i in range(len(attributions_1)):
assertTensorTuplesAlmostEqual(
self,
attributions_1[i],
attributions_2[i],
mode="max",
delta=dp_delta,
)
else:
assertTensorTuplesAlmostEqual(
self, attributions_1, attributions_2, mode="max", delta=dp_delta
)
assertTensorTuplesAlmostEqual(
self, delta_1, delta_2, mode="max", delta=dp_delta
)
else:
attributions_1 = attr_method_1.attribute(**args_1)
self.setUp()
attributions_2 = attr_method_2.attribute(**args_2)
if isinstance(attributions_1, list):
for i in range(len(attributions_1)):
assertTensorTuplesAlmostEqual(
self,
attributions_1[i],
attributions_2[i],
mode="max",
delta=dp_delta,
)
else:
assertTensorTuplesAlmostEqual(
self, attributions_1, attributions_2, mode="max", delta=dp_delta
)
return data_parallel_test_assert
if torch.cuda.is_available() and torch.cuda.device_count() != 0:
class DataParallelTest(BaseTest, metaclass=DataParallelMeta):
@classmethod
def tearDownClass(cls):
if torch.distributed.is_initialized():
dist.destroy_process_group()
|
#!/usr/bin/env python3
import io
import unittest
import unittest.mock
from functools import partial
from typing import Any, Callable, Generator, List, Optional, Tuple, Union
import torch
from captum._utils.models.linear_model import SGDLasso, SkLearnLasso
from captum._utils.models.model import Model
from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.lime import get_exp_kernel_similarity_function, Lime, LimeBase
from captum.attr._utils.batching import _batch_example_iterator
from captum.attr._utils.common import (
_construct_default_feature_mask,
_format_input_baseline,
_format_tensor_into_tuples,
)
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
)
from tests.helpers.basic_models import (
BasicLinearModel,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
BasicModelBoolInput,
)
from torch import Tensor
def alt_perturb_func(
original_inp: TensorOrTupleOfTensorsGeneric, **kwargs
) -> TensorOrTupleOfTensorsGeneric:
if isinstance(original_inp, Tensor):
device = original_inp.device
else:
device = original_inp[0].device
feature_mask = kwargs["feature_mask"]
probs = torch.ones(1, kwargs["num_interp_features"]) * 0.5
curr_sample = torch.bernoulli(probs).to(device=device)
binary_mask: TensorOrTupleOfTensorsGeneric
if isinstance(original_inp, Tensor):
binary_mask = curr_sample[0][feature_mask]
return binary_mask * original_inp + (1 - binary_mask) * kwargs["baselines"]
else:
binary_mask = tuple(
curr_sample[0][feature_mask[j]] for j in range(len(feature_mask))
)
return tuple(
binary_mask[j] * original_inp[j]
+ (1 - binary_mask[j]) * kwargs["baselines"][j]
for j in range(len(feature_mask))
)
def alt_perturb_generator(
original_inp: TensorOrTupleOfTensorsGeneric, **kwargs
) -> Generator[TensorOrTupleOfTensorsGeneric, None, None]:
while True:
yield alt_perturb_func(original_inp, **kwargs)
def alt_to_interp_rep(
curr_sample: TensorOrTupleOfTensorsGeneric,
original_input: TensorOrTupleOfTensorsGeneric,
**kwargs: Any,
) -> Tensor:
binary_vector = torch.zeros(1, kwargs["num_interp_features"])
feature_mask = kwargs["feature_mask"]
for i in range(kwargs["num_interp_features"]):
curr_total = 1
if isinstance(curr_sample, Tensor):
if (
torch.sum(
torch.abs(
(feature_mask == i).float() * (curr_sample - original_input)
)
)
> 0.001
):
curr_total = 0
else:
sum_diff = sum(
torch.sum(torch.abs((mask == i).float() * (sample - inp)))
for inp, sample, mask in zip(original_input, curr_sample, feature_mask)
)
if sum_diff > 0.001:
curr_total = 0
binary_vector[0][i] = curr_total
return binary_vector
class Test(BaseTest):
def setUp(self) -> None:
super().setUp()
try:
import sklearn # noqa: F401
assert (
sklearn.__version__ >= "0.23.0"
), "Must have sklearn version 0.23.0 or higher to use "
"sample_weight in Lasso regression."
except (ImportError, AssertionError):
raise unittest.SkipTest("Skipping Lime tests, sklearn not available.")
def test_simple_lime(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._lime_test_assert(
net,
inp,
[[73.3716, 193.3349, 113.3349]],
perturbations_per_eval=(1, 2, 3),
n_samples=500,
expected_coefs_only=[[73.3716, 193.3349, 113.3349]],
test_generator=True,
)
def test_simple_lime_sgd_model(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
interpretable_model = SGDLasso()
interpretable_model.fit = partial( # type: ignore
interpretable_model.fit, initial_lr=0.1, max_epoch=500
)
self._lime_test_assert(
net,
inp,
[[73.3716, 193.3349, 113.3349]],
n_samples=1000,
expected_coefs_only=[[73.3716, 193.3349, 113.3349]],
interpretable_model=interpretable_model,
)
def test_simple_lime_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._lime_test_assert(
net,
inp,
[[271.0, 271.0, 111.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
n_samples=500,
expected_coefs_only=[[271.0, 111.0]],
)
def test_simple_lime_with_baselines(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]])
self._lime_test_assert(
net,
inp,
[[244.0, 244.0, 100.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
baselines=4,
perturbations_per_eval=(1, 2, 3),
expected_coefs_only=[[244.0, 100.0]],
test_generator=True,
)
def test_simple_lime_boolean(self) -> None:
net = BasicModelBoolInput()
inp = torch.tensor([[True, False, True]])
self._lime_test_assert(
net,
inp,
[[31.42, 31.42, 30.90]],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
test_generator=True,
)
def test_simple_lime_boolean_with_baselines(self) -> None:
net = BasicModelBoolInput()
inp = torch.tensor([[True, False, True]])
self._lime_test_assert(
net,
inp,
[[-36.0, -36.0, 0.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
baselines=True,
perturbations_per_eval=(1, 2, 3),
test_generator=True,
)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_simple_lime_with_show_progress(self, mock_stderr) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
# test progress output for each batch size
for bsz in (1, 2, 3):
self._lime_test_assert(
net,
inp,
[[73.3716, 193.3349, 113.3349]],
perturbations_per_eval=(bsz,),
n_samples=500,
test_generator=True,
show_progress=True,
)
output = mock_stderr.getvalue()
# to test if progress calculation aligns with the actual iteration
# all perturbations_per_eval should reach progress of 100%
assert (
"Lime attribution: 100%" in output
), f"Error progress output: {repr(output)}"
mock_stderr.seek(0)
mock_stderr.truncate(0)
def test_simple_batch_lime(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0], [10.0, 14.0, 4.0]], requires_grad=True)
self._lime_test_assert(
net,
inp,
[[73.4450, 193.5979, 113.4363], [32.11, 48.00, 11.00]],
perturbations_per_eval=(1, 2, 3),
n_samples=800,
expected_coefs_only=[[73.4450, 193.5979, 113.4363], [32.11, 48.00, 11.00]],
)
def test_simple_batch_lime_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0], [10.0, 14.0, 4.0]], requires_grad=True)
self._lime_test_assert(
net,
inp,
[[271.0, 271.0, 111.0], [32.11, 48.00, 11.00]],
feature_mask=torch.tensor([[0, 0, 1], [0, 1, 2]]),
perturbations_per_eval=(1, 2, 3),
n_samples=600,
expected_coefs_only=[[271.0, 111.0, 0.0], [32.11, 48.00, 11.00]],
test_generator=True,
)
def test_multi_input_lime_without_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 0.0, 0.0]])
inp2 = torch.tensor([[20.0, 0.0, 50.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0]])
expected = (
[[87, 0, 0]],
[[75, 0, 195]],
[[0, 395, 35]],
)
self._lime_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
n_samples=2000,
expected_coefs_only=[[87, 0, 0, 75, 0, 195, 0, 395, 35]],
)
def test_multi_input_lime_with_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[20.0, 50.0, 30.0]])
inp2 = torch.tensor([[0.0, 100.0, 0.0]])
inp3 = torch.tensor([[2.0, 10.0, 3.0]])
mask1 = torch.tensor([[0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 0, 0]])
expected = (
[[251.0, 591.0, 251.0]],
[[251.0, 591.0, 0.0]],
[[251.0, 251.0, 251.0]],
)
self._lime_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
n_samples=500,
expected_coefs_only=[[251.0, 591.0, 0.0]],
)
expected_with_baseline = (
[[180, 576.0, 180]],
[[180, 576.0, -8.0]],
[[180, 180, 180]],
)
self._lime_test_assert(
net,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
n_samples=500,
expected_coefs_only=[[180, 576.0, -8.0]],
test_generator=True,
)
def test_multi_input_lime_with_empty_input(self) -> None:
net = BasicLinearModel()
inp1 = torch.tensor([[23.0, 0.0, 0.0, 23.0, 0.0, 0.0, 23.0]])
inp2 = torch.tensor([[]]) # empty input
mask1 = torch.tensor([[0, 1, 2, 3, 4, 5, 6]])
mask2 = torch.tensor([[]], dtype=torch.long) # empty mask
expected: Tuple[List[List[float]], ...] = (
[[-4.0, 0, 0, 0, 0, 0, -4.0]],
[[]],
)
# no mask
self._lime_test_assert(
net,
(inp1, inp2),
expected,
n_samples=2000,
expected_coefs_only=[[-4.0, 0, 0, 0, 0, 0, -4.0]],
)
# with mask
self._lime_test_assert(
net,
(inp1, inp2),
expected,
n_samples=2000,
expected_coefs_only=[[-4.0, 0, 0, 0, 0, 0, -4.0]],
feature_mask=(mask1, mask2),
)
def test_multi_input_batch_lime_without_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 0.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 0.0, 50.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [0.0, 10.0, 0.0]])
expected = (
[[87.8777, 0.0000, 0.0000], [75.8461, 195.6842, 115.3390]],
[[74.7283, 0.0000, 195.1708], [0.0000, 395.3823, 0.0000]],
[[0.0000, 395.5216, 35.5530], [0.0000, 35.1349, 0.0000]],
)
self._lime_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
n_samples=1000,
expected_coefs_only=[
[87.8777, 0.0, 0.0, 74.7283, 0.0, 195.1708, 0.0, 395.5216, 35.5530],
[
75.8461,
195.6842,
115.3390,
0.0000,
395.3823,
0.0000,
0.0000,
35.1349,
0.0000,
],
],
delta=1.2,
)
def test_multi_input_batch_lime(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
mask1 = torch.tensor([[1, 1, 1], [0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2], [0, 0, 0]])
expected = (
[[1086.2802, 1086.2802, 1086.2802], [250.8907, 590.9789, 250.8907]],
[[73.2166, 1086.2802, 152.6888], [250.8907, 590.9789, 0.0000]],
[[73.2166, 1086.2802, 152.6888], [250.8907, 250.8907, 250.8907]],
)
self._lime_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
)
expected_with_baseline = (
[[1036.4233, 1036.4233, 1036.4233], [180.3035, 575.8969, 180.3035]],
[[48.2441, 1036.4233, 128.3161], [180.3035, 575.8969, -8.3229]],
[[48.2441, 1036.4233, 128.3161], [180.3035, 180.3035, 180.3035]],
)
self._lime_test_assert(
net,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
expected_coefs_only=[
[48.2441, 1036.4233, 128.3161],
[180.3035, 575.8969, -8.3229],
],
n_samples=500,
test_generator=True,
)
# Remaining tests are for cases where forward function returns a scalar
# as either a float, integer, 0d tensor or 1d tensor.
def test_single_lime_scalar_float(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_lime_assert(lambda inp: torch.sum(net(inp)).item())
def test_single_lime_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_lime_assert(lambda inp: torch.sum(net(inp)))
def test_single_lime_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_lime_assert(
lambda inp: torch.sum(net(inp)).reshape(1)
)
def test_single_lime_scalar_int(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_lime_assert(
lambda inp: int(torch.sum(net(inp)).item())
)
def _single_input_scalar_lime_assert(self, func: Callable) -> None:
inp = torch.tensor([[2.0, 10.0, 3.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1]])
self._lime_test_assert(
func,
inp,
[[75.0, 75.0, 17.0]],
feature_mask=mask,
perturbations_per_eval=(1,),
target=None,
expected_coefs_only=[[75.0, 17.0]],
n_samples=700,
)
def test_multi_inp_lime_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_lime_assert(lambda *inp: torch.sum(net(*inp)))
def test_multi_inp_lime_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_lime_assert(
lambda *inp: torch.sum(net(*inp)).reshape(1)
)
def test_multi_inp_lime_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_lime_assert(
lambda *inp: int(torch.sum(net(*inp)).item())
)
def test_multi_inp_lime_scalar_float(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_lime_assert(lambda *inp: torch.sum(net(*inp)).item())
def _multi_input_scalar_lime_assert(self, func: Callable) -> None:
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [20.0, 10.0, 13.0]])
mask1 = torch.tensor([[1, 1, 1]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2]])
expected = (
[[3850.6666, 3850.6666, 3850.6666]] * 2,
[[305.5, 3850.6666, 410.1]] * 2,
[[305.5, 3850.6666, 410.1]] * 2,
)
self._lime_test_assert(
func,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
perturbations_per_eval=(1,),
target=None,
n_samples=1500,
expected_coefs_only=[[305.5, 3850.6666, 410.1]],
delta=1.5,
batch_attr=True,
test_generator=True,
)
def _lime_test_assert(
self,
model: Callable,
test_input: TensorOrTupleOfTensorsGeneric,
expected_attr,
expected_coefs_only=None,
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
additional_input: Any = None,
perturbations_per_eval: Tuple[int, ...] = (1,),
baselines: BaselineType = None,
target: Union[None, int] = 0,
n_samples: int = 100,
delta: float = 1.0,
batch_attr: bool = False,
test_generator: bool = False,
show_progress: bool = False,
interpretable_model: Optional[Model] = None,
) -> None:
for batch_size in perturbations_per_eval:
lime = Lime(
model,
similarity_func=get_exp_kernel_similarity_function("cosine", 10.0),
interpretable_model=interpretable_model
if interpretable_model
else SkLearnLasso(alpha=1.0),
)
attributions = lime.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
show_progress=show_progress,
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_attr, delta=delta, mode="max"
)
if expected_coefs_only is not None:
# Test with return_input_shape = False
attributions = lime.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
return_input_shape=False,
show_progress=show_progress,
)
assertTensorAlmostEqual(
self, attributions, expected_coefs_only, delta=delta, mode="max"
)
lime_alt = LimeBase(
model,
interpretable_model
if interpretable_model
else SkLearnLasso(alpha=1.0),
get_exp_kernel_similarity_function("euclidean", 1000.0),
alt_perturb_generator if test_generator else alt_perturb_func,
False,
None,
alt_to_interp_rep,
)
# Test with equivalent sampling in original input space
formatted_inputs, baselines = _format_input_baseline(
test_input, baselines
)
if feature_mask is None:
(
formatted_feature_mask,
num_interp_features,
) = _construct_default_feature_mask(formatted_inputs)
else:
formatted_feature_mask = _format_tensor_into_tuples(feature_mask)
num_interp_features = int(
max(
torch.max(single_mask).item()
for single_mask in feature_mask
if single_mask.numel()
)
+ 1
)
if batch_attr:
attributions = lime_alt.attribute(
test_input,
target=target,
feature_mask=formatted_feature_mask
if isinstance(test_input, tuple)
else formatted_feature_mask[0],
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
num_interp_features=num_interp_features,
show_progress=show_progress,
)
assertTensorAlmostEqual(
self, attributions, expected_coefs_only, delta=delta, mode="max"
)
return
bsz = formatted_inputs[0].shape[0]
for (
curr_inps,
curr_target,
curr_additional_args,
curr_baselines,
curr_feature_mask,
expected_coef_single,
) in _batch_example_iterator(
bsz,
test_input,
target,
additional_input,
baselines if isinstance(test_input, tuple) else baselines[0],
formatted_feature_mask
if isinstance(test_input, tuple)
else formatted_feature_mask[0],
expected_coefs_only,
):
attributions = lime_alt.attribute(
curr_inps,
target=curr_target,
feature_mask=curr_feature_mask,
additional_forward_args=curr_additional_args,
baselines=curr_baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
num_interp_features=num_interp_features,
show_progress=show_progress,
)
assertTensorAlmostEqual(
self,
attributions,
expected_coef_single,
delta=delta,
mode="max",
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import unittest
from enum import Enum
from typing import Any, Callable, cast, Dict, Tuple, Type
import torch
from captum._utils.common import (
_format_additional_forward_args,
_format_tensor_into_tuples,
)
from captum.attr._core.feature_ablation import FeatureAblation
from captum.attr._core.feature_permutation import FeaturePermutation
from captum.attr._core.gradient_shap import GradientShap
from captum.attr._core.input_x_gradient import InputXGradient
from captum.attr._core.integrated_gradients import IntegratedGradients
from captum.attr._core.kernel_shap import KernelShap
from captum.attr._core.lime import Lime
from captum.attr._core.noise_tunnel import NoiseTunnel
from captum.attr._core.occlusion import Occlusion
from captum.attr._core.saliency import Saliency
from captum.attr._core.shapley_value import ShapleyValueSampling
from captum.attr._utils.attribution import Attribution
from tests.attr.helpers.gen_test_utils import (
gen_test_name,
parse_test_config,
should_create_generated_test,
)
from tests.attr.helpers.test_config import config
from tests.helpers.basic import assertTensorTuplesAlmostEqual, BaseTest, deep_copy_args
from torch import Tensor
from torch.nn import Module
JIT_SUPPORTED = [
IntegratedGradients,
FeatureAblation,
FeaturePermutation,
GradientShap,
InputXGradient,
Occlusion,
Saliency,
ShapleyValueSampling,
Lime,
KernelShap,
]
"""
Tests in this file are dynamically generated based on the config
defined in tests/attr/helpers/test_config.py. To add new test cases,
read the documentation in test_config.py and add cases based on the
schema described there.
"""
class JITCompareMode(Enum):
"""
Defines modes for JIT tests:
`cpu_jit_trace` - Compares results of running the test case with a standard model
on CPU with the result of JIT tracing the model and computing attributions
`cpu_jit_script` - Compares results of running the test case with a standard model
on CPU with the result of JIT scripting the model and computing attributions
`data_parallel_jit_trace` - Compares results of running the test case with a
standard model on CPU with the result of JIT tracing the model wrapped in
DataParallel and computing attributions
`data_parallel_jit_script` - Compares results of running the test case with a
standard model on CPU with the result of JIT scripting the model wrapped
in DataParallel and computing attributions
"""
cpu_jit_trace = 1
cpu_jit_script = 2
data_parallel_jit_trace = 3
data_parallel_jit_script = 3
class JITMeta(type):
def __new__(cls, name: str, bases: Tuple, attrs: Dict):
for test_config in config:
(
algorithms,
model,
args,
layer,
noise_tunnel,
baseline_distr,
) = parse_test_config(test_config)
for algorithm in algorithms:
if not should_create_generated_test(algorithm):
continue
if algorithm in JIT_SUPPORTED:
for mode in JITCompareMode:
# Creates test case corresponding to each algorithm and
# JITCompareMode
test_method = cls.make_single_jit_test(
algorithm, model, args, noise_tunnel, baseline_distr, mode
)
test_name = gen_test_name(
"test_jit_" + mode.name,
cast(str, test_config["name"]),
algorithm,
noise_tunnel,
)
if test_name in attrs:
raise AssertionError(
"Trying to overwrite existing test with name: %r"
% test_name
)
attrs[test_name] = test_method
return super(JITMeta, cls).__new__(cls, name, bases, attrs)
# Arguments are deep copied to ensure tests are independent and are not affected
# by any modifications within a previous test.
@classmethod
@deep_copy_args
def make_single_jit_test(
cls,
algorithm: Type[Attribution],
model: Module,
args: Dict[str, Any],
noise_tunnel: bool,
baseline_distr: bool,
mode: JITCompareMode,
) -> Callable:
"""
This method creates a single JIT test for the given algorithm and parameters.
"""
def jit_test_assert(self) -> None:
model_1 = model
attr_args = args
if (
mode is JITCompareMode.data_parallel_jit_trace
or JITCompareMode.data_parallel_jit_script
):
if not torch.cuda.is_available() or torch.cuda.device_count() == 0:
raise unittest.SkipTest(
"Skipping GPU test since CUDA not available."
)
# Construct cuda_args, moving all tensor inputs in args to CUDA device
cuda_args = {}
for key in args:
if isinstance(args[key], Tensor):
cuda_args[key] = args[key].cuda()
elif isinstance(args[key], tuple):
cuda_args[key] = tuple(
elem.cuda() if isinstance(elem, Tensor) else elem
for elem in args[key]
)
else:
cuda_args[key] = args[key]
attr_args = cuda_args
model_1 = model_1.cuda()
# Initialize models based on JITCompareMode
if (
mode is JITCompareMode.cpu_jit_script
or JITCompareMode.data_parallel_jit_script
):
model_2 = torch.jit.script(model_1) # type: ignore
elif (
mode is JITCompareMode.cpu_jit_trace
or JITCompareMode.data_parallel_jit_trace
):
all_inps = _format_tensor_into_tuples(args["inputs"]) + (
_format_additional_forward_args(args["additional_forward_args"])
if "additional_forward_args" in args
and args["additional_forward_args"] is not None
else ()
)
model_2 = torch.jit.trace(model_1, all_inps) # type: ignore
else:
raise AssertionError("JIT compare mode type is not valid.")
attr_method_1 = algorithm(model_1)
attr_method_2 = algorithm(model_2)
if noise_tunnel:
attr_method_1 = NoiseTunnel(attr_method_1)
attr_method_2 = NoiseTunnel(attr_method_2)
if attr_method_1.has_convergence_delta():
attributions_1, delta_1 = attr_method_1.attribute(
return_convergence_delta=True, **attr_args
)
self.setUp()
attributions_2, delta_2 = attr_method_2.attribute(
return_convergence_delta=True, **attr_args
)
assertTensorTuplesAlmostEqual(
self, attributions_1, attributions_2, mode="max"
)
assertTensorTuplesAlmostEqual(self, delta_1, delta_2, mode="max")
else:
attributions_1 = attr_method_1.attribute(**attr_args)
self.setUp()
attributions_2 = attr_method_2.attribute(**attr_args)
assertTensorTuplesAlmostEqual(
self, attributions_1, attributions_2, mode="max"
)
return jit_test_assert
if torch.cuda.is_available() and torch.cuda.device_count() != 0:
class JITTest(BaseTest, metaclass=JITMeta):
pass
|
#!/usr/bin/env python3
from inspect import signature
from typing import Callable, List, Tuple, Union
import torch
from captum.attr._core.deep_lift import DeepLift, DeepLiftShap
from captum.attr._core.integrated_gradients import IntegratedGradients
from tests.helpers.basic import (
assertAttributionComparision,
assertTensorAlmostEqual,
BaseTest,
)
from tests.helpers.basic_models import (
BasicModelWithReusedModules,
Conv1dSeqModel,
LinearMaxPoolLinearModel,
ReLUDeepLiftModel,
ReLULinearModel,
TanhDeepLiftModel,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_relu_deeplift(self) -> None:
x1 = torch.tensor([1.0], requires_grad=True)
x2 = torch.tensor([2.0], requires_grad=True)
b1 = torch.tensor([0.0], requires_grad=True)
b2 = torch.tensor([0.0], requires_grad=True)
inputs = (x1, x2)
baselines = (b1, b2)
model = ReLUDeepLiftModel()
self._deeplift_assert(model, DeepLift(model), inputs, baselines)
def test_relu_deeplift_exact_match(self) -> None:
x1 = torch.tensor([1.0], requires_grad=True)
x2 = torch.tensor([2.0], requires_grad=True)
b1 = torch.tensor([0.0], requires_grad=True)
b2 = torch.tensor([0.0], requires_grad=True)
inputs = (x1, x2)
baselines = (b1, b2)
model = ReLUDeepLiftModel()
dl = DeepLift(model)
attributions, delta = dl.attribute(
inputs, baselines, return_convergence_delta=True
)
self.assertEqual(attributions[0][0], 2.0)
self.assertEqual(attributions[1][0], 1.0)
self.assertEqual(delta[0], 0.0)
def test_relu_deeplift_exact_match_wo_mutliplying_by_inputs(self) -> None:
x1 = torch.tensor([1.0])
x2 = torch.tensor([2.0])
inputs = (x1, x2)
model = ReLUDeepLiftModel()
dl = DeepLift(model, multiply_by_inputs=False)
attributions = dl.attribute(inputs)
self.assertEqual(attributions[0][0], 2.0)
self.assertEqual(attributions[1][0], 0.5)
def test_tanh_deeplift(self) -> None:
x1 = torch.tensor([-1.0], requires_grad=True)
x2 = torch.tensor([-2.0], requires_grad=True)
b1 = torch.tensor([0.0], requires_grad=True)
b2 = torch.tensor([0.0], requires_grad=True)
inputs = (x1, x2)
baselines = (b1, b2)
model = TanhDeepLiftModel()
self._deeplift_assert(model, DeepLift(model), inputs, baselines)
def test_relu_deeplift_batch(self) -> None:
x1 = torch.tensor([[1.0], [1.0], [1.0], [1.0]], requires_grad=True)
x2 = torch.tensor([[2.0], [2.0], [2.0], [2.0]], requires_grad=True)
b1 = torch.tensor([[0.0], [0.0], [0.0], [0.0]], requires_grad=True)
b2 = torch.tensor([[0.0], [0.0], [0.0], [0.0]], requires_grad=True)
inputs = (x1, x2)
baselines = (b1, b2)
model = ReLUDeepLiftModel()
self._deeplift_assert(model, DeepLift(model), inputs, baselines)
def test_relu_linear_deeplift(self) -> None:
model = ReLULinearModel(inplace=False)
x1 = torch.tensor([[-10.0, 1.0, -5.0]], requires_grad=True)
x2 = torch.tensor([[3.0, 3.0, 1.0]], requires_grad=True)
inputs = (x1, x2)
baselines = (0, 0.0001)
# expected = [[[0.0, 0.0]], [[6.0, 2.0]]]
self._deeplift_assert(model, DeepLift(model), inputs, baselines)
def test_relu_linear_deeplift_compare_inplace(self) -> None:
model1 = ReLULinearModel(inplace=True)
x1 = torch.tensor([[-10.0, 1.0, -5.0], [2.0, 3.0, 4.0]], requires_grad=True)
x2 = torch.tensor([[3.0, 3.0, 1.0], [2.3, 5.0, 4.0]], requires_grad=True)
inputs = (x1, x2)
attributions1 = DeepLift(model1).attribute(inputs)
model2 = ReLULinearModel()
attributions2 = DeepLift(model2).attribute(inputs)
assertTensorAlmostEqual(self, attributions1[0], attributions2[0])
assertTensorAlmostEqual(self, attributions1[1], attributions2[1])
def test_relu_linear_deepliftshap_compare_inplace(self) -> None:
model1 = ReLULinearModel(inplace=True)
x1 = torch.tensor([[-10.0, 1.0, -5.0], [2.0, 3.0, 4.0]], requires_grad=True)
x2 = torch.tensor([[3.0, 3.0, 1.0], [2.3, 5.0, 4.0]], requires_grad=True)
inputs = (x1, x2)
b1 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
b2 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
baselines = (b1, b2)
attributions1 = DeepLiftShap(model1).attribute(inputs, baselines)
model2 = ReLULinearModel()
attributions2 = DeepLiftShap(model2).attribute(inputs, baselines)
assertTensorAlmostEqual(self, attributions1[0], attributions2[0])
assertTensorAlmostEqual(self, attributions1[1], attributions2[1])
def test_relu_linear_deeplift_batch(self) -> None:
model = ReLULinearModel(inplace=True)
x1 = torch.tensor([[-10.0, 1.0, -5.0], [2.0, 3.0, 4.0]], requires_grad=True)
x2 = torch.tensor([[3.0, 3.0, 1.0], [2.3, 5.0, 4.0]], requires_grad=True)
inputs = (x1, x2)
baselines = (torch.zeros(1, 3), torch.rand(1, 3) * 0.001)
# expected = [[[0.0, 0.0]], [[6.0, 2.0]]]
self._deeplift_assert(model, DeepLift(model), inputs, baselines)
def test_relu_deeplift_with_hypothetical_contrib_func(self) -> None:
model = Conv1dSeqModel()
rand_seq_data = torch.abs(torch.randn(2, 4, 1000))
rand_seq_ref = torch.abs(torch.randn(2, 4, 1000))
dls = DeepLift(model)
attr = dls.attribute(
rand_seq_data,
rand_seq_ref,
custom_attribution_func=_hypothetical_contrib_func,
target=(1, 0),
)
self.assertEqual(attr.shape, rand_seq_data.shape)
def test_relu_deepliftshap_batch_4D_input(self) -> None:
x1 = torch.ones(4, 1, 1, 1)
x2 = torch.tensor([[[[2.0]]]] * 4)
b1 = torch.zeros(4, 1, 1, 1)
b2 = torch.zeros(4, 1, 1, 1)
inputs = (x1, x2)
baselines = (b1, b2)
model = ReLUDeepLiftModel()
self._deeplift_assert(model, DeepLiftShap(model), inputs, baselines)
def test_relu_deepliftshap_batch_4D_input_wo_mutliplying_by_inputs(self) -> None:
x1 = torch.ones(4, 1, 1, 1)
x2 = torch.tensor([[[[2.0]]]] * 4)
b1 = torch.zeros(4, 1, 1, 1)
b2 = torch.zeros(4, 1, 1, 1)
inputs = (x1, x2)
baselines = (b1, b2)
model = ReLUDeepLiftModel()
attr = DeepLiftShap(model, multiply_by_inputs=False).attribute(
inputs, baselines
)
assertTensorAlmostEqual(self, attr[0], 2 * torch.ones(4, 1, 1, 1))
assertTensorAlmostEqual(self, attr[1], 0.5 * torch.ones(4, 1, 1, 1))
def test_relu_deepliftshap_multi_ref(self) -> None:
x1 = torch.tensor([[1.0]], requires_grad=True)
x2 = torch.tensor([[2.0]], requires_grad=True)
b1 = torch.tensor([[0.0], [0.0], [0.0], [0.0]], requires_grad=True)
b2 = torch.tensor([[0.0], [0.0], [0.0], [0.0]], requires_grad=True)
inputs = (x1, x2)
baselines = (b1, b2)
model = ReLUDeepLiftModel()
self._deeplift_assert(model, DeepLiftShap(model), inputs, baselines)
def test_relu_deepliftshap_baselines_as_func(self) -> None:
model = ReLULinearModel(inplace=True)
x1 = torch.tensor([[-10.0, 1.0, -5.0]])
x2 = torch.tensor([[3.0, 3.0, 1.0]])
def gen_baselines() -> Tuple[Tensor, ...]:
b1 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
b2 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
return (b1, b2)
def gen_baselines_scalar() -> Tuple[float, ...]:
return (0.0, 0.0001)
def gen_baselines_with_inputs(inputs: Tuple[Tensor, ...]) -> Tuple[Tensor, ...]:
b1 = torch.cat([inputs[0], inputs[0] - 10])
b2 = torch.cat([inputs[1], inputs[1] - 10])
return (b1, b2)
def gen_baselines_returns_array() -> Tuple[List[List[float]], ...]:
b1 = [[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]
b2 = [[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]
return (b1, b2)
inputs = (x1, x2)
dl_shap = DeepLiftShap(model)
self._deeplift_assert(model, dl_shap, inputs, gen_baselines)
self._deeplift_assert(model, dl_shap, inputs, gen_baselines_with_inputs)
with self.assertRaises(AssertionError):
self._deeplift_assert(
model, DeepLiftShap(model), inputs, gen_baselines_returns_array
)
with self.assertRaises(AssertionError):
self._deeplift_assert(model, dl_shap, inputs, gen_baselines_scalar)
baselines = gen_baselines()
attributions = dl_shap.attribute(inputs, baselines)
attributions_with_func = dl_shap.attribute(inputs, gen_baselines)
assertTensorAlmostEqual(self, attributions[0], attributions_with_func[0])
assertTensorAlmostEqual(self, attributions[1], attributions_with_func[1])
def test_relu_deepliftshap_with_custom_attr_func(self) -> None:
def custom_attr_func(
multipliers: Tuple[Tensor, ...],
inputs: Tuple[Tensor, ...],
baselines: Tuple[Tensor, ...],
) -> Tuple[Tensor, ...]:
return tuple(multiplier * 0.0 for multiplier in multipliers)
model = ReLULinearModel(inplace=True)
x1 = torch.tensor([[-10.0, 1.0, -5.0]])
x2 = torch.tensor([[3.0, 3.0, 1.0]])
b1 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
b2 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
inputs = (x1, x2)
baselines = (b1, b2)
dls = DeepLiftShap(model)
attr_w_func = dls.attribute(
inputs, baselines, custom_attribution_func=custom_attr_func
)
assertTensorAlmostEqual(self, attr_w_func[0], [[0.0, 0.0, 0.0]], 0.0)
assertTensorAlmostEqual(self, attr_w_func[1], [[0.0, 0.0, 0.0]], 0.0)
def test_relu_deepliftshap_with_hypothetical_contrib_func(self) -> None:
model = Conv1dSeqModel()
rand_seq_data = torch.abs(torch.randn(2, 4, 1000))
rand_seq_ref = torch.abs(torch.randn(3, 4, 1000))
dls = DeepLiftShap(model)
attr = dls.attribute(
rand_seq_data,
rand_seq_ref,
custom_attribution_func=_hypothetical_contrib_func,
target=(0, 0),
)
self.assertEqual(attr.shape, rand_seq_data.shape)
def test_reusable_modules(self) -> None:
model = BasicModelWithReusedModules()
input = torch.rand(1, 3)
dl = DeepLift(model)
with self.assertRaises(RuntimeError):
dl.attribute(input, target=0)
def test_lin_maxpool_lin_classification(self) -> None:
inputs = torch.ones(2, 4)
baselines = torch.tensor([[1, 2, 3, 9], [4, 8, 6, 7]]).float()
model = LinearMaxPoolLinearModel()
dl = DeepLift(model)
attrs, delta = dl.attribute(
inputs, baselines, target=0, return_convergence_delta=True
)
expected = torch.Tensor([[0.0, 0.0, 0.0, -8.0], [0.0, -7.0, 0.0, 0.0]])
expected_delta = torch.Tensor([0.0, 0.0])
assertTensorAlmostEqual(self, attrs, expected, 0.0001)
assertTensorAlmostEqual(self, delta, expected_delta, 0.0001)
def _deeplift_assert(
self,
model: Module,
attr_method: Union[DeepLift, DeepLiftShap],
inputs: Tuple[Tensor, ...],
baselines,
custom_attr_func: Callable[..., Tuple[Tensor, ...]] = None,
) -> None:
input_bsz = len(inputs[0])
if callable(baselines):
baseline_parameters = signature(baselines).parameters
if len(baseline_parameters) > 0:
baselines = baselines(inputs)
else:
baselines = baselines()
baseline_bsz = (
len(baselines[0]) if isinstance(baselines[0], torch.Tensor) else 1
)
# Run attribution multiple times to make sure that it is
# working as expected
for _ in range(5):
model.zero_grad()
attributions, delta = attr_method.attribute(
inputs,
baselines,
return_convergence_delta=True,
custom_attribution_func=custom_attr_func,
)
attributions_without_delta = attr_method.attribute(
inputs, baselines, custom_attribution_func=custom_attr_func
)
for attribution, attribution_without_delta in zip(
attributions, attributions_without_delta
):
self.assertTrue(
torch.all(torch.eq(attribution, attribution_without_delta))
)
if isinstance(attr_method, DeepLiftShap):
self.assertEqual([input_bsz * baseline_bsz], list(delta.shape))
else:
self.assertEqual([input_bsz], list(delta.shape))
delta_external = attr_method.compute_convergence_delta(
attributions, baselines, inputs
)
assertTensorAlmostEqual(
self, delta, delta_external, delta=0.0, mode="max"
)
delta_condition = (delta.abs() < 0.00001).all()
self.assertTrue(
delta_condition,
"The sum of attribution values {} is not "
"nearly equal to the difference between the endpoint for "
"some samples".format(delta),
)
for input, attribution in zip(inputs, attributions):
self.assertEqual(input.shape, attribution.shape)
if (
isinstance(baselines[0], (int, float))
or inputs[0].shape == baselines[0].shape
):
# Compare with Integrated Gradients
ig = IntegratedGradients(model)
attributions_ig = ig.attribute(inputs, baselines)
assertAttributionComparision(self, attributions, attributions_ig)
def _hypothetical_contrib_func(
multipliers: Tuple[Tensor, ...],
inputs: Tuple[Tensor, ...],
baselines: Tuple[Tensor, ...],
) -> Tuple[Tensor, ...]:
r"""
Implements hypothetical input contributions based on the logic described here:
https://github.com/kundajelab/deeplift/pull/36/files
This is using a dummy model for test purposes
"""
# we assume that multiplies, inputs and baselines have the following shape:
# tuple((bsz x len x channel), )
assert len(multipliers[0].shape) == 3, multipliers[0].shape
assert len(inputs[0].shape) == 3, inputs[0].shape
assert len(baselines[0].shape) == 3, baselines[0].shape
assert len(multipliers) == len(inputs) and len(inputs) == len(baselines), (
"multipliers, inputs and baselines must have the same shape but"
"multipliers: {}, inputs: {}, baselines: {}".format(
len(multipliers), len(inputs), len(baselines)
)
)
attributions = []
for k in range(len(multipliers)):
sub_attributions = torch.zeros_like(inputs[k])
for i in range(inputs[k].shape[-1]):
hypothetical_input = torch.zeros_like(inputs[k])
hypothetical_input[:, :, i] = 1.0
hypothetical_input_ref_diff = hypothetical_input - baselines[k]
sub_attributions[:, :, i] = torch.sum(
hypothetical_input_ref_diff * multipliers[k], dim=-1
)
attributions.append(sub_attributions)
return tuple(attributions)
|
#!/usr/bin/env python3
from __future__ import print_function
import unittest
from typing import Any, Tuple, Union
import torch
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._core.guided_backprop_deconvnet import Deconvolution
from captum.attr._core.neuron.neuron_guided_backprop_deconvnet import (
NeuronDeconvolution,
)
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModel_ConvNet_One_Conv
from torch.nn import Module
class Test(BaseTest):
def test_simple_input_conv_deconv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = 1.0 * torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
exp = [
[2.0, 3.0, 3.0, 1.0],
[3.0, 5.0, 5.0, 2.0],
[3.0, 5.0, 5.0, 2.0],
[1.0, 2.0, 2.0, 1.0],
]
exp = torch.tensor(exp).view(1, 1, 4, 4)
self._deconv_test_assert(net, (inp,), (exp,))
def test_simple_input_conv_neuron_deconv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = 1.0 * torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
exp = [
[2.0, 3.0, 3.0, 1.0],
[3.0, 5.0, 5.0, 2.0],
[3.0, 5.0, 5.0, 2.0],
[1.0, 2.0, 2.0, 1.0],
]
exp = torch.tensor(exp).view(1, 1, 4, 4)
self._neuron_deconv_test_assert(net, net.fc1, (0,), (inp,), (exp,))
def test_simple_input_conv_neuron_deconv_agg_neurons(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = 1.0 * torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
exp = [
[2.0, 3.0, 3.0, 1.0],
[3.0, 5.0, 5.0, 2.0],
[3.0, 5.0, 5.0, 2.0],
[1.0, 2.0, 2.0, 1.0],
]
exp = torch.tensor(exp).view(1, 1, 4, 4)
self._neuron_deconv_test_assert(net, net.fc1, (slice(0, 1, 1),), (inp,), (exp,))
def test_simple_multi_input_conv_deconv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
ex_attr = [
[2.0, 3.0, 3.0, 1.0],
[3.0, 5.0, 5.0, 2.0],
[3.0, 5.0, 5.0, 2.0],
[1.0, 2.0, 2.0, 1.0],
]
ex_attr = torch.tensor(ex_attr).view(1, 1, 4, 4)
self._deconv_test_assert(net, (inp, inp2), (ex_attr, ex_attr))
def test_simple_multi_input_conv_neuron_deconv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
ex_attr = [
[2.0, 3.0, 3.0, 1.0],
[3.0, 5.0, 5.0, 2.0],
[3.0, 5.0, 5.0, 2.0],
[1.0, 2.0, 2.0, 1.0],
]
ex_attr = torch.tensor(ex_attr).view(1, 1, 4, 4)
self._neuron_deconv_test_assert(
net, net.fc1, (3,), (inp, inp2), (ex_attr, ex_attr)
)
def test_deconv_matching(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = 100.0 * torch.randn(1, 1, 4, 4)
self._deconv_matching_assert(net, net.relu2, inp)
def _deconv_test_assert(
self,
model: Module,
test_input: TensorOrTupleOfTensorsGeneric,
expected: Tuple[torch.Tensor, ...],
additional_input: Any = None,
) -> None:
deconv = Deconvolution(model)
attributions = deconv.attribute(
test_input, target=0, additional_forward_args=additional_input
)
for i in range(len(test_input)):
assertTensorAlmostEqual(self, attributions[i], expected[i], delta=0.01)
def _neuron_deconv_test_assert(
self,
model: Module,
layer: Module,
neuron_selector: Union[int, Tuple[Union[int, slice], ...]],
test_input: TensorOrTupleOfTensorsGeneric,
expected: Tuple[torch.Tensor, ...],
additional_input: Any = None,
) -> None:
deconv = NeuronDeconvolution(model, layer)
attributions = deconv.attribute(
test_input,
neuron_selector=neuron_selector,
additional_forward_args=additional_input,
)
for i in range(len(test_input)):
assertTensorAlmostEqual(self, attributions[i], expected[i], delta=0.01)
def _deconv_matching_assert(
self,
model: Module,
output_layer: Module,
test_input: TensorOrTupleOfTensorsGeneric,
) -> None:
out = model(test_input)
attrib = Deconvolution(model)
self.assertFalse(attrib.multiplies_by_inputs)
neuron_attrib = NeuronDeconvolution(model, output_layer)
for i in range(out.shape[1]):
deconv_vals = attrib.attribute(test_input, target=i)
neuron_deconv_vals = neuron_attrib.attribute(test_input, (i,))
assertTensorAlmostEqual(self, deconv_vals, neuron_deconv_vals, delta=0.01)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import io
import unittest
import unittest.mock
from typing import Any, Callable, Tuple, Union
import torch
from captum._utils.typing import (
BaselineType,
TargetType,
TensorLikeList,
TensorOrTupleOfTensorsGeneric,
)
from captum.attr._core.occlusion import Occlusion
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel3,
BasicModel_ConvNet_One_Conv,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from torch import Tensor
class Test(BaseTest):
def test_improper_window_shape(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
occ = Occlusion(net)
# Check error when too few sliding window dimensions
with self.assertRaises(AssertionError):
_ = occ.attribute(inp, sliding_window_shapes=((1, 2),), target=0)
# Check error when too many sliding window dimensions
with self.assertRaises(AssertionError):
_ = occ.attribute(
(inp, inp), sliding_window_shapes=((1, 1, 2), (1, 1, 1, 2)), target=0
)
# Check error when too many sliding window tuples
with self.assertRaises(AssertionError):
_ = occ.attribute(
(inp, inp),
sliding_window_shapes=((1, 1, 2), (1, 1, 2), (1, 1, 2)),
target=0,
)
def test_improper_stride(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
occ = Occlusion(net)
# Check error when too few stride dimensions
with self.assertRaises(AssertionError):
_ = occ.attribute(
inp, sliding_window_shapes=(1, 2, 2), strides=(1, 2), target=0
)
# Check error when too many stride dimensions
with self.assertRaises(AssertionError):
_ = occ.attribute(
(inp, inp),
sliding_window_shapes=((1, 1, 2), (1, 2, 2)),
strides=((1, 1, 2), (2, 1, 2, 2)),
target=0,
)
# Check error when too many stride tuples
with self.assertRaises(AssertionError):
_ = occ.attribute(
(inp, inp),
sliding_window_shapes=((1, 1, 2), (1, 2, 2)),
strides=((1, 1, 2), (1, 2, 2), (1, 2, 2)),
target=0,
)
def test_too_large_stride(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
occ = Occlusion(net)
with self.assertRaises(AssertionError):
_ = occ.attribute(
inp, sliding_window_shapes=((1, 1, 2),), strides=2, target=0
)
with self.assertRaises(AssertionError):
_ = occ.attribute(
(inp, inp),
sliding_window_shapes=((1, 1, 2), (1, 4, 2)),
strides=(2, (1, 2, 3)),
target=0,
)
with self.assertRaises(AssertionError):
_ = occ.attribute(
inp, sliding_window_shapes=((2, 1, 2),), strides=2, target=0
)
def test_simple_input(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._occlusion_test_assert(
net,
inp,
[[80.0, 200.0, 120.0]],
perturbations_per_eval=(1, 2, 3),
sliding_window_shapes=((1,)),
)
def test_simple_multi_input_int_to_int(self) -> None:
net = BasicModel3()
inp1 = torch.tensor([[-10], [3]])
inp2 = torch.tensor([[-5], [1]])
self._occlusion_test_assert(
net,
(inp1, inp2),
([[0.0], [1.0]], [[0.0], [-1.0]]),
sliding_window_shapes=((1,), (1,)),
)
def test_simple_multi_input_int_to_float(self) -> None:
net = BasicModel3()
def wrapper_func(*inp):
return net(*inp).float()
inp1 = torch.tensor([[-10], [3]])
inp2 = torch.tensor([[-5], [1]])
self._occlusion_test_assert(
wrapper_func,
(inp1, inp2),
([[0.0], [1.0]], [[0.0], [-1.0]]),
sliding_window_shapes=((1,), (1,)),
)
def test_simple_multi_input(self) -> None:
net = BasicModel3()
inp1 = torch.tensor([[-10.0], [3.0]])
inp2 = torch.tensor([[-5.0], [1.0]])
self._occlusion_test_assert(
net,
(inp1, inp2),
([[0.0], [1.0]], [[0.0], [-1.0]]),
sliding_window_shapes=((1,), (1,)),
)
def test_simple_multi_input_0d(self) -> None:
net = BasicModel3()
inp1 = torch.tensor([-10.0, 3.0])
inp2 = torch.tensor([-5.0, 1.0])
self._occlusion_test_assert(
net,
(inp1, inp2),
([0.0, 1.0], [0.0, -1.0]),
sliding_window_shapes=((), ()),
target=None,
)
def test_simple_input_larger_shape(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._occlusion_test_assert(
net,
inp,
[[200.0, 220.0, 240.0]],
perturbations_per_eval=(1, 2, 3),
sliding_window_shapes=((2,)),
baselines=torch.tensor([10.0, 10.0, 10.0]),
)
def test_simple_input_shape_with_stride(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._occlusion_test_assert(
net,
inp,
[[280.0, 280.0, 120.0]],
perturbations_per_eval=(1, 2, 3),
sliding_window_shapes=((2,)),
strides=2,
)
def test_multi_sample_ablation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
self._occlusion_test_assert(
net,
inp,
[[8.0, 35.0, 12.0], [80.0, 200.0, 120.0]],
perturbations_per_eval=(1, 2, 3),
sliding_window_shapes=((1,),),
)
def test_multi_input_ablation_with_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
expected = (
[[492.0, 492.0, 492.0], [400.0, 400.0, 400.0]],
[[80.0, 200.0, 120.0], [0.0, 400.0, 0.0]],
[[400.0, 420.0, 440.0], [48.0, 50.0, 52.0]],
)
self._occlusion_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
sliding_window_shapes=((3,), (1,), (2,)),
)
self._occlusion_test_assert(
net,
(inp1, inp2),
expected[0:1],
additional_input=(inp3, 1),
perturbations_per_eval=(1, 2, 3),
sliding_window_shapes=((3,), (1,)),
)
def test_multi_input_ablation_with_baselines(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
expected = (
[[444.0, 444.0, 444.0], [328.0, 328.0, 328.0]],
[[68.0, 188.0, 108.0], [-12.0, 388.0, -12.0]],
[[368.0, 368.0, 24.0], [0.0, 0.0, -12.0]],
)
self._occlusion_test_assert(
net,
(inp1, inp2, inp3),
expected,
baselines=(
torch.tensor([[1.0, 4, 7], [3.0, 6, 9]]),
3.0,
torch.tensor([[4.0], [6]]),
),
additional_input=(1,),
sliding_window_shapes=((3,), (1,), (2,)),
strides=(2, 1, 2),
)
def test_simple_multi_input_conv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
self._occlusion_test_assert(
net,
(inp, inp2),
(67 * torch.ones_like(inp), 13 * torch.ones_like(inp2)),
perturbations_per_eval=(1, 2, 4, 8, 12, 16),
sliding_window_shapes=((1, 4, 4), (1, 4, 4)),
)
self._occlusion_test_assert(
net,
(inp, inp2),
(
[
[
[
[17.0, 17.0, 17.0, 17.0],
[17.0, 17.0, 17.0, 17.0],
[64.0, 65.5, 65.5, 67.0],
[64.0, 65.5, 65.5, 67.0],
]
]
],
[
[
[
[3.0, 3.0, 3.0, 3.0],
[3.0, 3.0, 3.0, 3.0],
[3.0, 3.0, 3.0, 3.0],
[0.0, 0.0, 0.0, 0.0],
]
]
],
),
perturbations_per_eval=(1, 3, 7, 14),
sliding_window_shapes=((1, 2, 3), (1, 1, 2)),
strides=((1, 2, 1), (1, 1, 2)),
)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_simple_input_with_show_progress(self, mock_stderr) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
# test progress output for each batch size
for bsz in (1, 2, 3):
self._occlusion_test_assert(
net,
inp,
[[80.0, 200.0, 120.0]],
perturbations_per_eval=(bsz,),
sliding_window_shapes=((1,)),
show_progress=True,
)
output = mock_stderr.getvalue()
# to test if progress calculation aligns with the actual iteration
# all perturbations_per_eval should reach progress of 100%
assert (
"Occlusion attribution: 100%" in output
), f"Error progress output: {repr(output)}"
mock_stderr.seek(0)
mock_stderr.truncate(0)
def _occlusion_test_assert(
self,
model: Callable,
test_input: TensorOrTupleOfTensorsGeneric,
expected_ablation: Union[
float,
TensorLikeList,
Tuple[TensorLikeList, ...],
Tuple[Tensor, ...],
],
sliding_window_shapes: Union[Tuple[int, ...], Tuple[Tuple[int, ...], ...]],
target: TargetType = 0,
additional_input: Any = None,
perturbations_per_eval: Tuple[int, ...] = (1,),
baselines: BaselineType = None,
strides: Union[None, int, Tuple[Union[int, Tuple[int, ...]], ...]] = None,
show_progress: bool = False,
) -> None:
for batch_size in perturbations_per_eval:
ablation = Occlusion(model)
attributions = ablation.attribute(
test_input,
sliding_window_shapes=sliding_window_shapes,
target=target,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
strides=strides,
show_progress=show_progress,
)
if isinstance(expected_ablation, tuple):
for i in range(len(expected_ablation)):
assertTensorAlmostEqual(
self,
attributions[i],
expected_ablation[i],
)
else:
assertTensorAlmostEqual(
self,
attributions,
expected_ablation,
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
from typing import Any, cast, Tuple, Union
import torch
from captum._utils.gradient import compute_gradients
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._core.noise_tunnel import NoiseTunnel
from captum.attr._core.saliency import Saliency
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
)
from tests.helpers.basic_models import BasicModel, BasicModel5_MultiArgs
from tests.helpers.classification_models import SoftmaxModel
from torch import Tensor
from torch.nn import Module
def _get_basic_config() -> Tuple[Module, Tensor, Tensor, Any]:
input = torch.tensor([1.0, 2.0, 3.0, 0.0, -1.0, 7.0], requires_grad=True).T
# manually percomputed gradients
grads = torch.tensor([-0.0, -0.0, -0.0, 1.0, 1.0, -0.0])
return BasicModel(), input, grads, None
def _get_multiargs_basic_config() -> Tuple[
Module, Tuple[Tensor, ...], Tuple[Tensor, ...], Any
]:
model = BasicModel5_MultiArgs()
additional_forward_args = ([2, 3], 1)
inputs = (
torch.tensor([[1.5, 2.0, 34.3], [3.4, 1.2, 2.0]], requires_grad=True),
torch.tensor([[3.0, 3.5, 23.2], [2.3, 1.2, 0.3]], requires_grad=True),
)
grads = compute_gradients(
model, inputs, additional_forward_args=additional_forward_args
)
return model, inputs, grads, additional_forward_args
def _get_multiargs_basic_config_large() -> Tuple[
Module, Tuple[Tensor, ...], Tuple[Tensor, ...], Any
]:
model = BasicModel5_MultiArgs()
additional_forward_args = ([2, 3], 1)
inputs = (
torch.tensor(
[[10.5, 12.0, 34.3], [43.4, 51.2, 32.0]], requires_grad=True
).repeat_interleave(3, dim=0),
torch.tensor(
[[1.0, 3.5, 23.2], [2.3, 1.2, 0.3]], requires_grad=True
).repeat_interleave(3, dim=0),
)
grads = compute_gradients(
model, inputs, additional_forward_args=additional_forward_args
)
return model, inputs, grads, additional_forward_args
class Test(BaseTest):
def test_saliency_test_basic_vanilla(self) -> None:
self._saliency_base_assert(*_get_basic_config())
def test_saliency_test_basic_smoothgrad(self) -> None:
self._saliency_base_assert(*_get_basic_config(), nt_type="smoothgrad")
def test_saliency_test_basic_vargrad(self) -> None:
self._saliency_base_assert(*_get_basic_config(), nt_type="vargrad")
def test_saliency_test_basic_multi_variable_vanilla(self) -> None:
self._saliency_base_assert(*_get_multiargs_basic_config())
def test_saliency_test_basic_multi_variable_smoothgrad(self) -> None:
self._saliency_base_assert(*_get_multiargs_basic_config(), nt_type="smoothgrad")
def test_saliency_test_basic_multivar_sg_n_samples_batch_size_2(self) -> None:
attributions_batch_size = self._saliency_base_assert(
*_get_multiargs_basic_config_large(),
nt_type="smoothgrad",
n_samples_batch_size=2,
)
attributions = self._saliency_base_assert(
*_get_multiargs_basic_config_large(),
nt_type="smoothgrad",
)
assertTensorTuplesAlmostEqual(self, attributions_batch_size, attributions)
def test_saliency_test_basic_multivar_sg_n_samples_batch_size_3(self) -> None:
attributions_batch_size = self._saliency_base_assert(
*_get_multiargs_basic_config_large(),
nt_type="smoothgrad_sq",
n_samples_batch_size=3,
)
attributions = self._saliency_base_assert(
*_get_multiargs_basic_config_large(),
nt_type="smoothgrad_sq",
)
assertTensorTuplesAlmostEqual(self, attributions_batch_size, attributions)
def test_saliency_test_basic_multivar_vg_n_samples_batch_size_1(self) -> None:
attributions_batch_size = self._saliency_base_assert(
*_get_multiargs_basic_config_large(),
nt_type="vargrad",
n_samples_batch_size=1,
)
attributions = self._saliency_base_assert(
*_get_multiargs_basic_config_large(),
nt_type="vargrad",
)
assertTensorTuplesAlmostEqual(self, attributions_batch_size, attributions)
def test_saliency_test_basic_multivar_vg_n_samples_batch_size_6(self) -> None:
attributions_batch_size = self._saliency_base_assert(
*_get_multiargs_basic_config_large(),
nt_type="vargrad",
n_samples_batch_size=6,
)
attributions = self._saliency_base_assert(
*_get_multiargs_basic_config_large(),
nt_type="vargrad",
)
assertTensorTuplesAlmostEqual(self, attributions_batch_size, attributions)
def test_saliency_test_basic_multi_vargrad(self) -> None:
self._saliency_base_assert(*_get_multiargs_basic_config(), nt_type="vargrad")
def test_saliency_classification_vanilla(self) -> None:
self._saliency_classification_assert()
def test_saliency_classification_smoothgrad(self) -> None:
self._saliency_classification_assert(nt_type="smoothgrad")
def test_saliency_classification_vargrad(self) -> None:
self._saliency_classification_assert(nt_type="vargrad")
def test_saliency_grad_unchanged(self) -> None:
model, inp, grads, add_args = _get_basic_config()
inp.grad = torch.randn_like(inp)
grad = inp.grad.detach().clone()
self._saliency_base_assert(model, inp, grads, add_args)
assertTensorTuplesAlmostEqual(self, inp.grad, grad, delta=0.0)
def _saliency_base_assert(
self,
model: Module,
inputs: TensorOrTupleOfTensorsGeneric,
expected: TensorOrTupleOfTensorsGeneric,
additional_forward_args: Any = None,
nt_type: str = "vanilla",
n_samples_batch_size=None,
) -> Union[Tensor, Tuple[Tensor, ...]]:
saliency = Saliency(model)
self.assertFalse(saliency.multiplies_by_inputs)
if nt_type == "vanilla":
attributions = saliency.attribute(
inputs, additional_forward_args=additional_forward_args
)
else:
nt = NoiseTunnel(saliency)
attributions = nt.attribute(
inputs,
nt_type=nt_type,
nt_samples=10,
nt_samples_batch_size=n_samples_batch_size,
stdevs=0.0000002,
additional_forward_args=additional_forward_args,
)
for input, attribution, expected_attr in zip(inputs, attributions, expected):
if nt_type == "vanilla":
self._assert_attribution(attribution, expected_attr)
self.assertEqual(input.shape, attribution.shape)
return attributions
def _assert_attribution(self, attribution: Tensor, expected: Tensor) -> None:
expected = torch.abs(expected)
if len(attribution.shape) == 0:
assert (attribution - expected).abs() < 0.001
else:
assertTensorAlmostEqual(self, expected, attribution, delta=0.5, mode="max")
def _saliency_classification_assert(self, nt_type: str = "vanilla") -> None:
num_in = 5
input = torch.tensor([[0.0, 1.0, 2.0, 3.0, 4.0]], requires_grad=True)
target = torch.tensor(5)
# 10-class classification model
model = SoftmaxModel(num_in, 20, 10)
saliency = Saliency(model)
if nt_type == "vanilla":
attributions = saliency.attribute(input, target)
output = model(input)[:, target]
output.backward()
expected = torch.abs(cast(Tensor, input.grad))
assertTensorAlmostEqual(self, attributions, expected)
else:
nt = NoiseTunnel(saliency)
attributions = nt.attribute(
input, nt_type=nt_type, nt_samples=10, stdevs=0.0002, target=target
)
self.assertEqual(input.shape, attributions.shape)
|
#!/usr/bin/env python3
import random
import torch
from captum.attr import Max, Mean, Min, MSE, StdDev, Sum, Summarizer, Var
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
def get_values(n=100, lo=None, hi=None, integers=False):
for _ in range(n):
if integers:
yield random.randint(lo, hi)
else:
yield random.random() * (hi - lo) + lo
class Test(BaseTest):
def test_div0(self) -> None:
summarizer = Summarizer([Var(), Mean()])
summ = summarizer.summary
self.assertIsNone(summ)
summarizer.update(torch.tensor(10))
summ = summarizer.summary
assertTensorAlmostEqual(self, summ["mean"], 10)
assertTensorAlmostEqual(self, summ["variance"], 0)
summarizer.update(torch.tensor(10))
summ = summarizer.summary
assertTensorAlmostEqual(self, summ["mean"], 10)
assertTensorAlmostEqual(self, summ["variance"], 0)
def test_var_defin(self) -> None:
"""
Variance is avg squared distance to mean. Thus it should be positive.
This test is to ensure this is the case.
To test it, we will we make a skewed distribution leaning to one end
(either very large or small values).
We will also compare to numpy and ensure it is approximately the same.
This is assuming numpy is correct, for which it should be.
"""
SMALL_VAL = -10000
BIG_VAL = 10000
AMOUNT_OF_SMALLS = [100, 10]
AMOUNT_OF_BIGS = [10, 100]
for sm, big in zip(AMOUNT_OF_SMALLS, AMOUNT_OF_BIGS):
summ = Summarizer([Var()])
values = []
for _ in range(sm):
values.append(SMALL_VAL)
summ.update(torch.tensor(SMALL_VAL, dtype=torch.float64))
for _ in range(big):
values.append(BIG_VAL)
summ.update(torch.tensor(BIG_VAL, dtype=torch.float64))
actual_var = torch.var(torch.tensor(values).double(), unbiased=False)
var = summ.summary["variance"]
assertTensorAlmostEqual(self, var, actual_var)
self.assertTrue((var > 0).all())
def test_multi_dim(self) -> None:
x1 = torch.tensor([1.0, 2.0, 3.0, 4.0])
x2 = torch.tensor([2.0, 1.0, 2.0, 4.0])
x3 = torch.tensor([3.0, 3.0, 1.0, 4.0])
summarizer = Summarizer([Mean(), Var()])
summarizer.update(x1)
assertTensorAlmostEqual(
self, summarizer.summary["mean"], x1, delta=0.05, mode="max"
)
assertTensorAlmostEqual(
self,
summarizer.summary["variance"],
torch.zeros_like(x1),
delta=0.05,
mode="max",
)
summarizer.update(x2)
assertTensorAlmostEqual(
self,
summarizer.summary["mean"],
torch.tensor([1.5, 1.5, 2.5, 4]),
delta=0.05,
mode="max",
)
assertTensorAlmostEqual(
self,
summarizer.summary["variance"],
torch.tensor([0.25, 0.25, 0.25, 0]),
delta=0.05,
mode="max",
)
summarizer.update(x3)
assertTensorAlmostEqual(
self,
summarizer.summary["mean"],
torch.tensor([2, 2, 2, 4]),
delta=0.05,
mode="max",
)
assertTensorAlmostEqual(
self,
summarizer.summary["variance"],
torch.tensor([2.0 / 3.0, 2.0 / 3.0, 2.0 / 3.0, 0]),
delta=0.05,
mode="max",
)
def test_stats_random_data(self):
N = 1000
BIG_VAL = 100000
_values = list(get_values(lo=-BIG_VAL, hi=BIG_VAL, n=N))
values = torch.tensor(_values, dtype=torch.float64)
stats_to_test = [
Mean(),
Var(),
Var(order=1),
StdDev(),
StdDev(order=1),
Min(),
Max(),
Sum(),
MSE(),
]
stat_names = [
"mean",
"variance",
"sample_variance",
"std_dev",
"sample_std_dev",
"min",
"max",
"sum",
"mse",
]
gt_fns = [
torch.mean,
lambda x: torch.var(x, unbiased=False),
lambda x: torch.var(x, unbiased=True),
lambda x: torch.std(x, unbiased=False),
lambda x: torch.std(x, unbiased=True),
torch.min,
torch.max,
torch.sum,
lambda x: torch.sum((x - torch.mean(x)) ** 2),
]
for stat, name, gt in zip(stats_to_test, stat_names, gt_fns):
summ = Summarizer([stat])
actual = gt(values)
for x in values:
summ.update(x)
stat_val = summ.summary[name]
# rounding errors is a serious issue (moreso for MSE)
assertTensorAlmostEqual(self, stat_val, actual, delta=0.005)
|
#!/usr/bin/env python3
import io
import unittest
import unittest.mock
from typing import Any, Callable, List, Tuple, Union
import torch
from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.kernel_shap import KernelShap
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
set_all_random_seeds,
)
from tests.helpers.basic_models import (
BasicLinearModel,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
class Test(BaseTest):
def setUp(self) -> None:
super().setUp()
try:
import sklearn # noqa: F401
assert (
sklearn.__version__ >= "0.23.0"
), "Must have sklearn version 0.23.0 or higher"
except (ImportError, AssertionError):
raise unittest.SkipTest("Skipping KernelShap tests, sklearn not available.")
def test_linear_kernel_shap(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
baseline = torch.tensor([[10.0, 20.0, 10.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[[40.0, 120.0, 80.0]],
n_samples=500,
baselines=baseline,
expected_coefs=[[40.0, 120.0, 80.0]],
)
def test_simple_kernel_shap(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[[76.66666, 196.66666, 116.66666]],
perturbations_per_eval=(1, 2, 3),
n_samples=500,
)
def test_simple_kernel_shap_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[[275.0, 275.0, 115.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
expected_coefs=[[275.0, 115.0]],
)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_simple_kernel_shap_with_show_progress(self, mock_stderr) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
# test progress output for each batch size
for bsz in (1, 2, 3):
self._kernel_shap_test_assert(
net,
inp,
[[76.66666, 196.66666, 116.66666]],
perturbations_per_eval=(bsz,),
n_samples=500,
show_progress=True,
)
output = mock_stderr.getvalue()
# to test if progress calculation aligns with the actual iteration
# all perturbations_per_eval should reach progress of 100%
assert (
"Kernel Shap attribution: 100%" in output
), f"Error progress output: {repr(output)}"
mock_stderr.seek(0)
mock_stderr.truncate(0)
def test_simple_kernel_shap_with_baselines(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]])
self._kernel_shap_test_assert(
net,
inp,
[[248.0, 248.0, 104.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
baselines=4,
perturbations_per_eval=(1, 2, 3),
)
def test_simple_batch_kernel_shap(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[[7.0, 32.5, 10.5], [76.66666, 196.66666, 116.66666]],
perturbations_per_eval=(1, 2, 3),
n_samples=20000,
)
def test_simple_batch_kernel_shap_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[[39.5, 39.5, 10.5], [275.0, 275.0, 115.0]],
feature_mask=torch.tensor([[0, 0, 1], [1, 1, 0]]),
perturbations_per_eval=(1, 2, 3),
n_samples=100,
expected_coefs=[[39.5, 10.5], [115.0, 275.0]],
)
def test_multi_input_kernel_shap_without_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 0.0, 0.0]])
inp2 = torch.tensor([[20.0, 0.0, 50.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0]])
expected = (
[[90, 0, 0]],
[[78, 0, 198]],
[[0, 398, 38]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
n_samples=2000,
)
def test_multi_input_kernel_shap_with_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[20.0, 50.0, 30.0]])
inp2 = torch.tensor([[0.0, 100.0, 0.0]])
inp3 = torch.tensor([[2.0, 10.0, 3.0]])
mask1 = torch.tensor([[0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 0, 0]])
expected = (
[[255.0, 595.0, 255.0]],
[[255.0, 595.0, 0.0]],
[[255.0, 255.0, 255.0]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
)
expected_with_baseline = (
[[184, 580.0, 184]],
[[184, 580.0, -12.0]],
[[184, 184, 184]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
)
def test_multi_input_kernel_shap_with_empty_input(self) -> None:
net = BasicLinearModel()
inp1 = torch.tensor([[23.0, 0.0, 0.0, 23.0, 0.0, 0.0, 23.0]])
inp2 = torch.tensor([[]]) # empty input
mask1 = torch.tensor([[0, 1, 2, 3, 4, 5, 6]])
mask2 = torch.tensor([[]], dtype=torch.long) # empty mask
expected: Tuple[List[List[float]], ...] = (
[[-8.0, 0, 0, -2.0, 0, 0, -8.0]],
[[]],
)
# no mask
self._kernel_shap_test_assert(
net,
(inp1, inp2),
expected,
n_samples=2000,
expected_coefs=[[-8.0, 0, 0, -2.0, 0, 0, -8.0]],
)
# with mask
self._kernel_shap_test_assert(
net,
(inp1, inp2),
expected,
n_samples=2000,
expected_coefs=[[-8.0, 0, 0, -2.0, 0, 0, -8.0]],
feature_mask=(mask1, mask2),
)
def test_multi_input_batch_kernel_shap_without_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 0.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 0.0, 50.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [0.0, 10.0, 0.0]])
expected = (
[[90, 0, 0], [78.0, 198.0, 118.0]],
[[78, 0, 198], [0.0, 398.0, 0.0]],
[[0, 398, 38], [0.0, 38.0, 0.0]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
n_samples=2500,
expected_coefs=[
[90.0, 0, 0, 78, 0, 198, 0, 398, 38],
[78.0, 198.0, 118.0, 0.0, 398.0, 0.0, 0.0, 38.0, 0.0],
],
)
def test_multi_input_batch_kernel_shap(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
mask1 = torch.tensor([[1, 1, 1], [0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2], [0, 0, 0]])
expected = (
[[1088.6666, 1088.6666, 1088.6666], [255.0, 595.0, 255.0]],
[[76.6666, 1088.6666, 156.6666], [255.0, 595.0, 0.0]],
[[76.6666, 1088.6666, 156.6666], [255.0, 255.0, 255.0]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
n_samples=300,
)
expected_with_baseline = (
[[1040, 1040, 1040], [184, 580.0, 184]],
[[52, 1040, 132], [184, 580.0, -12.0]],
[[52, 1040, 132], [184, 184, 184]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
)
# Remaining tests are for cases where forward function returns a scalar
# as either a float, integer, 0d tensor or 1d tensor.
def test_single_kernel_shap_scalar_float(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_kernel_shap_assert(
lambda inp: torch.sum(net(inp)).item()
)
def test_single_kernel_shap_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_kernel_shap_assert(lambda inp: torch.sum(net(inp)))
def test_single_kernel_shap_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_kernel_shap_assert(
lambda inp: torch.sum(net(inp)).reshape(1)
)
def test_single_kernel_shap_scalar_int(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_kernel_shap_assert(
lambda inp: int(torch.sum(net(inp)).item())
)
def _single_input_scalar_kernel_shap_assert(self, func: Callable) -> None:
inp = torch.tensor([[2.0, 10.0, 3.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1]])
self._kernel_shap_test_assert(
func,
inp,
[[79.0, 79.0, 21.0]],
feature_mask=mask,
perturbations_per_eval=(1,),
target=None,
)
def test_multi_inp_kernel_shap_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_kernel_shap_assert(lambda *inp: torch.sum(net(*inp)))
def test_multi_inp_kernel_shap_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_kernel_shap_assert(
lambda *inp: torch.sum(net(*inp)).reshape(1)
)
def test_multi_inp_kernel_shap_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_kernel_shap_assert(
lambda *inp: int(torch.sum(net(*inp)).item())
)
def test_multi_inp_kernel_shap_scalar_float(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_kernel_shap_assert(
lambda *inp: torch.sum(net(*inp)).item()
)
def _multi_input_scalar_kernel_shap_assert(self, func: Callable) -> None:
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [20.0, 10.0, 13.0]])
mask1 = torch.tensor([[1, 1, 1]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2]])
expected = (
[[3850.6666, 3850.6666, 3850.6666]] * 2,
[[306.6666, 3850.6666, 410.6666]] * 2,
[[306.6666, 3850.6666, 410.6666]] * 2,
)
self._kernel_shap_test_assert(
func,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
perturbations_per_eval=(1,),
target=None,
n_samples=1500,
)
def _kernel_shap_test_assert(
self,
model: Callable,
test_input: TensorOrTupleOfTensorsGeneric,
expected_attr,
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
additional_input: Any = None,
perturbations_per_eval: Tuple[int, ...] = (1,),
baselines: BaselineType = None,
target: Union[None, int] = 0,
n_samples: int = 100,
delta: float = 1.0,
expected_coefs: Union[None, List[float], List[List[float]]] = None,
show_progress: bool = False,
) -> None:
for batch_size in perturbations_per_eval:
kernel_shap = KernelShap(model)
attributions = kernel_shap.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
show_progress=show_progress,
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_attr, delta=delta, mode="max"
)
if expected_coefs is not None:
set_all_random_seeds(1234)
# Test with return_input_shape = False
attributions = kernel_shap.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
return_input_shape=False,
show_progress=show_progress,
)
assertTensorAlmostEqual(
self, attributions, expected_coefs, delta=delta, mode="max"
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import unittest
from typing import Any, cast, Tuple, Union
import torch
from captum._utils.common import _zeros
from captum._utils.typing import BaselineType, Tensor, TensorOrTupleOfTensorsGeneric
from captum.attr._core.integrated_gradients import IntegratedGradients
from captum.attr._core.noise_tunnel import NoiseTunnel
from captum.attr._utils.common import _tensorize_baseline
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel,
BasicModel2,
BasicModel3,
BasicModel4_MultiArgs,
BasicModel5_MultiArgs,
BasicModel6_MultiTensor,
BasicModel_MultiLayer,
)
from torch.nn import Module
class Test(BaseTest):
def test_multivariable_vanilla(self) -> None:
self._assert_multi_variable("vanilla", "riemann_right")
def test_multivariable_vanilla_wo_mutliplying_by_inputs(self) -> None:
self._assert_multi_variable(
"vanilla", "riemann_right", multiply_by_inputs=False
)
def test_multivariable_smoothgrad(self) -> None:
self._assert_multi_variable("smoothgrad", "riemann_left")
def test_multivariable_smoothgrad_sq(self) -> None:
self._assert_multi_variable("smoothgrad_sq", "riemann_middle")
def test_multivariable_vargrad(self) -> None:
self._assert_multi_variable("vargrad", "riemann_trapezoid")
def test_multi_argument_vanilla(self) -> None:
self._assert_multi_argument("vanilla", "gausslegendre")
def test_multi_argument_smoothgrad(self) -> None:
self._assert_multi_argument("smoothgrad", "riemann_right")
def test_multi_argument_smoothgrad_sq(self) -> None:
self._assert_multi_argument("smoothgrad_sq", "riemann_left")
def test_multi_argument_vargrad(self) -> None:
self._assert_multi_argument("vargrad", "riemann_middle")
def test_univariable_vanilla(self) -> None:
self._assert_univariable("vanilla", "riemann_trapezoid")
def test_univariable_smoothgrad(self) -> None:
self._assert_univariable("smoothgrad", "gausslegendre")
def test_univariable_smoothgrad_sq(self) -> None:
self._assert_univariable("smoothgrad_sq", "riemann_right")
def test_univariable_vargrad(self) -> None:
self._assert_univariable("vargrad", "riemann_left")
def test_multi_tensor_input_vanilla(self) -> None:
self._assert_multi_tensor_input("vanilla", "riemann_middle")
def test_multi_tensor_input_smoothgrad(self) -> None:
self._assert_multi_tensor_input("smoothgrad", "riemann_trapezoid")
def test_multi_tensor_input_smoothgrad_sq(self) -> None:
self._assert_multi_tensor_input("smoothgrad_sq", "gausslegendre")
def test_multi_tensor_input_vargrad(self) -> None:
self._assert_multi_tensor_input("vargrad", "riemann_right")
def test_batched_input_vanilla(self) -> None:
self._assert_batched_tensor_input("vanilla", "riemann_left")
def test_batched_input_smoothgrad(self) -> None:
self._assert_batched_tensor_input("smoothgrad", "riemann_middle")
def test_batched_input_smoothgrad_with_batch_size_1(self) -> None:
self._assert_n_samples_batched_size("smoothgrad", "riemann_middle", 1)
def test_batched_input_smoothgrad_with_batch_size_2(self) -> None:
self._assert_n_samples_batched_size("vargrad", "riemann_middle", 2)
def test_batched_input_smoothgrad_with_batch_size_3(self) -> None:
self._assert_n_samples_batched_size("smoothgrad_sq", "riemann_middle", 3)
def test_batched_input_smoothgrad_sq(self) -> None:
self._assert_batched_tensor_input("smoothgrad_sq", "riemann_trapezoid")
def test_batched_input_vargrad(self) -> None:
self._assert_batched_tensor_input("vargrad", "gausslegendre")
def test_batched_input_smoothgrad_wo_mutliplying_by_inputs(self) -> None:
model = BasicModel_MultiLayer()
inputs = torch.tensor(
[[1.5, 2.0, 1.3], [0.5, 0.1, 2.3], [1.5, 2.0, 1.3]], requires_grad=True
)
ig_wo_mutliplying_by_inputs = IntegratedGradients(
model, multiply_by_inputs=False
)
nt_wo_mutliplying_by_inputs = NoiseTunnel(ig_wo_mutliplying_by_inputs)
ig = IntegratedGradients(model)
nt = NoiseTunnel(ig)
n_samples = 5
target = 0
type = "smoothgrad"
attributions_wo_mutliplying_by_inputs = nt_wo_mutliplying_by_inputs.attribute(
inputs,
nt_type=type,
nt_samples=n_samples,
stdevs=0.0,
target=target,
n_steps=500,
)
attributions = nt.attribute(
inputs,
nt_type=type,
nt_samples=n_samples,
stdevs=0.0,
target=target,
n_steps=500,
)
assertTensorAlmostEqual(
self, attributions_wo_mutliplying_by_inputs * inputs, attributions
)
def test_batched_multi_input_vanilla(self) -> None:
self._assert_batched_tensor_multi_input("vanilla", "riemann_right")
def test_batched_multi_input_smoothgrad(self) -> None:
self._assert_batched_tensor_multi_input("smoothgrad", "riemann_left")
def test_batched_multi_input_smoothgrad_sq(self) -> None:
self._assert_batched_tensor_multi_input("smoothgrad_sq", "riemann_middle")
def test_batched_multi_input_vargrad(self) -> None:
self._assert_batched_tensor_multi_input("vargrad", "riemann_trapezoid")
def test_batched_multi_input_vargrad_batch_size_1(self) -> None:
self._assert_batched_tensor_multi_input("vargrad", "riemann_trapezoid", 1)
def test_batched_multi_input_smooth_batch_size_2(self) -> None:
self._assert_batched_tensor_multi_input("vargrad", "riemann_trapezoid", 2)
def test_batched_multi_input_smoothgrad_sq_batch_size_3(self) -> None:
self._assert_batched_tensor_multi_input("vargrad", "riemann_trapezoid", 3)
def _assert_multi_variable(
self,
type: str,
approximation_method: str = "gausslegendre",
multiply_by_inputs: bool = True,
) -> None:
model = BasicModel2()
input1 = torch.tensor([3.0])
input2 = torch.tensor([1.0], requires_grad=True)
baseline1 = torch.tensor([0.0])
baseline2 = torch.tensor([0.0])
attributions1 = self._compute_attribution_and_evaluate(
model,
(input1, input2),
(baseline1, baseline2),
type=type,
approximation_method=approximation_method,
multiply_by_inputs=multiply_by_inputs,
)
if type == "vanilla":
assertTensorAlmostEqual(
self,
attributions1[0],
[1.5] if multiply_by_inputs else [0.5],
delta=0.05,
mode="max",
)
assertTensorAlmostEqual(
self,
attributions1[1],
[-0.5] if multiply_by_inputs else [-0.5],
delta=0.05,
mode="max",
)
model = BasicModel3()
attributions2 = self._compute_attribution_and_evaluate(
model,
(input1, input2),
(baseline1, baseline2),
type=type,
approximation_method=approximation_method,
multiply_by_inputs=multiply_by_inputs,
)
if type == "vanilla":
assertTensorAlmostEqual(
self,
attributions2[0],
[1.5] if multiply_by_inputs else [0.5],
delta=0.05,
mode="max",
)
assertTensorAlmostEqual(
self,
attributions2[1],
[-0.5] if multiply_by_inputs else [-0.5],
delta=0.05,
mode="max",
)
# Verifies implementation invariance
self.assertEqual(
sum(attribution for attribution in attributions1),
sum(attribution for attribution in attributions2),
)
def _assert_univariable(
self, type: str, approximation_method: str = "gausslegendre"
) -> None:
model = BasicModel()
self._compute_attribution_and_evaluate(
model,
torch.tensor([1.0], requires_grad=True),
torch.tensor([0.0]),
type=type,
approximation_method=approximation_method,
)
self._compute_attribution_and_evaluate(
model,
torch.tensor([0.0]),
torch.tensor([0.0]),
type=type,
approximation_method=approximation_method,
)
self._compute_attribution_and_evaluate(
model,
torch.tensor([-1.0], requires_grad=True),
0.00001,
type=type,
approximation_method=approximation_method,
)
def _assert_multi_argument(
self, type: str, approximation_method: str = "gausslegendre"
) -> None:
model = BasicModel4_MultiArgs()
self._compute_attribution_and_evaluate(
model,
(
torch.tensor([[1.5, 2.0, 34.3]], requires_grad=True),
torch.tensor([[3.0, 3.5, 23.2]], requires_grad=True),
),
baselines=(0.0, torch.zeros((1, 3))),
additional_forward_args=torch.arange(1.0, 4.0).reshape(1, 3),
type=type,
approximation_method=approximation_method,
)
# uses batching with an integer variable and nd-tensors as
# additional forward arguments
self._compute_attribution_and_evaluate(
model,
(
torch.tensor([[1.5, 2.0, 34.3], [3.4, 1.2, 2.0]], requires_grad=True),
torch.tensor([[3.0, 3.5, 23.2], [2.3, 1.2, 0.3]], requires_grad=True),
),
baselines=(torch.zeros((2, 3)), 0.0),
additional_forward_args=(torch.arange(1.0, 7.0).reshape(2, 3), 1),
type=type,
approximation_method=approximation_method,
)
# uses batching with an integer variable and python list
# as additional forward arguments
model = BasicModel5_MultiArgs()
self._compute_attribution_and_evaluate(
model,
(
torch.tensor([[1.5, 2.0, 34.3], [3.4, 1.2, 2.0]], requires_grad=True),
torch.tensor([[3.0, 3.5, 23.2], [2.3, 1.2, 0.3]], requires_grad=True),
),
baselines=(0.0, 0.00001),
additional_forward_args=([2, 3], 1),
type=type,
approximation_method=approximation_method,
)
# similar to previous case plus baseline consists of a tensor and
# a single example
self._compute_attribution_and_evaluate(
model,
(
torch.tensor([[1.5, 2.0, 34.3], [3.4, 1.2, 2.0]], requires_grad=True),
torch.tensor([[3.0, 3.5, 23.2], [2.3, 1.2, 0.3]], requires_grad=True),
),
baselines=(torch.zeros((1, 3)), 0.00001),
additional_forward_args=([2, 3], 1),
type=type,
approximation_method=approximation_method,
)
def _assert_multi_tensor_input(
self, type: str, approximation_method: str = "gausslegendre"
) -> None:
model = BasicModel6_MultiTensor()
self._compute_attribution_and_evaluate(
model,
(
torch.tensor([[1.5, 2.0, 3.3]], requires_grad=True),
torch.tensor([[3.0, 3.5, 2.2]], requires_grad=True),
),
type=type,
approximation_method=approximation_method,
)
def _assert_batched_tensor_input(
self, type: str, approximation_method: str = "gausslegendre"
) -> None:
model = BasicModel_MultiLayer()
input = (
torch.tensor(
[[1.5, 2.0, 1.3], [0.5, 0.1, 2.3], [1.5, 2.0, 1.3]], requires_grad=True
),
)
self._compute_attribution_and_evaluate(
model, input, type=type, target=0, approximation_method=approximation_method
)
self._compute_attribution_batch_helper_evaluate(
model, input, target=0, approximation_method=approximation_method
)
def _assert_batched_tensor_multi_input(
self,
type: str,
approximation_method: str = "gausslegendre",
nt_samples_batch_size: int = None,
) -> None:
model = BasicModel_MultiLayer()
input = (
torch.tensor(
[[1.5, 2.1, 1.9], [0.5, 0.0, 0.7], [1.5, 2.1, 1.1]], requires_grad=True
),
torch.tensor(
[[0.3, 1.9, 2.4], [0.5, 0.6, 2.1], [1.2, 2.1, 0.2]], requires_grad=True
),
)
self._compute_attribution_and_evaluate(
model,
input,
type=type,
target=0,
approximation_method=approximation_method,
nt_samples_batch_size=nt_samples_batch_size,
)
def _assert_n_samples_batched_size(
self,
type: str,
approximation_method: str = "gausslegendre",
nt_samples_batch_size: int = None,
) -> None:
model = BasicModel_MultiLayer()
input = (
torch.tensor(
[[1.5, 2.0, 1.3], [0.5, 0.1, 2.3], [1.5, 2.0, 1.3]], requires_grad=True
),
)
self._compute_attribution_and_evaluate(
model,
input,
type=type,
target=0,
nt_samples_batch_size=nt_samples_batch_size,
approximation_method=approximation_method,
)
def _compute_attribution_and_evaluate(
self,
model: Module,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: Union[None, int] = None,
additional_forward_args: Any = None,
type: str = "vanilla",
approximation_method: str = "gausslegendre",
multiply_by_inputs=True,
nt_samples_batch_size=None,
) -> Tuple[Tensor, ...]:
r"""
attrib_type: 'vanilla', 'smoothgrad', 'smoothgrad_sq', 'vargrad'
"""
ig = IntegratedGradients(model, multiply_by_inputs=multiply_by_inputs)
self.assertEqual(ig.multiplies_by_inputs, multiply_by_inputs)
if not isinstance(inputs, tuple):
inputs = (inputs,) # type: ignore
inputs: Tuple[Tensor, ...]
if baselines is not None and not isinstance(baselines, tuple):
baselines = (baselines,)
if baselines is None:
baselines = _tensorize_baseline(inputs, _zeros(inputs))
if type == "vanilla":
attributions, delta = ig.attribute(
inputs,
baselines,
additional_forward_args=additional_forward_args,
method=approximation_method,
n_steps=500,
target=target,
return_convergence_delta=True,
)
model.zero_grad()
attributions_without_delta, delta = ig.attribute(
inputs,
baselines,
additional_forward_args=additional_forward_args,
method=approximation_method,
n_steps=500,
target=target,
return_convergence_delta=True,
)
model.zero_grad()
self.assertEqual([inputs[0].shape[0]], list(delta.shape))
delta_external = ig.compute_convergence_delta(
attributions,
baselines,
inputs,
target=target,
additional_forward_args=additional_forward_args,
)
assertTensorAlmostEqual(self, delta, delta_external, delta=0.0, mode="max")
else:
nt = NoiseTunnel(ig)
n_samples = 5
attributions, delta = nt.attribute(
inputs,
nt_type=type,
nt_samples=n_samples,
stdevs=0.00000002,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
method=approximation_method,
n_steps=500,
return_convergence_delta=True,
nt_samples_batch_size=nt_samples_batch_size,
)
attributions_without_delta = nt.attribute(
inputs,
nt_type=type,
nt_samples=n_samples,
stdevs=0.00000002,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
method=approximation_method,
n_steps=500,
nt_samples_batch_size=3,
)
self.assertEqual(nt.multiplies_by_inputs, multiply_by_inputs)
self.assertEqual([inputs[0].shape[0] * n_samples], list(delta.shape))
for input, attribution in zip(inputs, attributions):
self.assertEqual(attribution.shape, input.shape)
if multiply_by_inputs:
assertTensorAlmostEqual(self, delta, torch.zeros(delta.shape), 0.07, "max")
# compare attributions retrieved with and without
# `return_convergence_delta` flag
for attribution, attribution_without_delta in zip(
attributions, attributions_without_delta
):
assertTensorAlmostEqual(
self, attribution, attribution_without_delta, delta=0.05
)
return cast(Tuple[Tensor, ...], attributions)
def _compute_attribution_batch_helper_evaluate(
self,
model: Module,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: Union[None, Tensor, Tuple[Tensor, ...]] = None,
target: Union[None, int] = None,
additional_forward_args: Any = None,
approximation_method: str = "gausslegendre",
) -> None:
ig = IntegratedGradients(model)
if not isinstance(inputs, tuple):
inputs = (inputs,) # type: ignore
inputs: Tuple[Tensor, ...]
if baselines is not None and not isinstance(baselines, tuple):
baselines = (baselines,)
if baselines is None:
baselines = _tensorize_baseline(inputs, _zeros(inputs))
for internal_batch_size in [None, 10, 20]:
attributions, delta = ig.attribute(
inputs,
baselines,
additional_forward_args=additional_forward_args,
method=approximation_method,
n_steps=100,
target=target,
internal_batch_size=internal_batch_size,
return_convergence_delta=True,
)
total_delta = 0.0
for i in range(inputs[0].shape[0]):
attributions_indiv, delta_indiv = ig.attribute(
tuple(input[i : i + 1] for input in inputs),
tuple(baseline[i : i + 1] for baseline in baselines),
additional_forward_args=additional_forward_args,
method=approximation_method,
n_steps=100,
target=target,
internal_batch_size=internal_batch_size,
return_convergence_delta=True,
)
total_delta += abs(delta_indiv).sum().item()
for j in range(len(attributions)):
assertTensorAlmostEqual(
self,
attributions[j][i : i + 1].squeeze(0),
attributions_indiv[j].squeeze(0),
delta=0.05,
mode="max",
)
self.assertAlmostEqual(abs(delta).sum().item(), total_delta, delta=0.005)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import io
import unittest
import unittest.mock
from typing import Any, Callable, Tuple, Union
import torch
from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.shapley_value import ShapleyValues, ShapleyValueSampling
from tests.helpers.basic import assertTensorTuplesAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
BasicModelBoolInput,
)
class Test(BaseTest):
def test_simple_shapley_sampling(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._shapley_test_assert(
net,
inp,
[[76.66666, 196.66666, 116.66666]],
perturbations_per_eval=(1, 2, 3),
n_samples=250,
)
def test_simple_shapley_sampling_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._shapley_test_assert(
net,
inp,
[[275.0, 275.0, 115.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
)
def test_simple_shapley_sampling_boolean(self) -> None:
net = BasicModelBoolInput()
inp = torch.tensor([[True, False, True]])
self._shapley_test_assert(
net,
inp,
[[35.0, 35.0, 35.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
)
def test_simple_shapley_sampling_boolean_with_baseline(self) -> None:
net = BasicModelBoolInput()
inp = torch.tensor([[True, False, True]])
self._shapley_test_assert(
net,
inp,
[[-40.0, -40.0, 0.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
baselines=True,
perturbations_per_eval=(1, 2, 3),
)
def test_simple_shapley_sampling_with_baselines(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]])
self._shapley_test_assert(
net,
inp,
[[248.0, 248.0, 104.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
baselines=4,
perturbations_per_eval=(1, 2, 3),
)
def test_multi_sample_shapley_sampling(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]])
self._shapley_test_assert(
net,
inp,
[[7.0, 32.5, 10.5], [76.66666, 196.66666, 116.66666]],
perturbations_per_eval=(1, 2, 3),
n_samples=200,
)
def test_multi_sample_shapley_sampling_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1], [1, 1, 0]])
self._shapley_test_assert(
net,
inp,
[[39.5, 39.5, 10.5], [275.0, 275.0, 115.0]],
feature_mask=mask,
perturbations_per_eval=(1, 2, 3),
)
def test_multi_input_shapley_sampling_without_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 0.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 0.0, 50.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [0.0, 10.0, 0.0]])
expected = (
[[90, 0, 0], [78.0, 198.0, 118.0]],
[[78, 0, 198], [0.0, 398.0, 0.0]],
[[0, 398, 38], [0.0, 38.0, 0.0]],
)
self._shapley_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
n_samples=200,
test_true_shapley=False,
)
def test_multi_input_shapley_sampling_with_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
mask1 = torch.tensor([[1, 1, 1], [0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2], [0, 0, 0]])
expected = (
[[1088.6666, 1088.6666, 1088.6666], [255.0, 595.0, 255.0]],
[[76.6666, 1088.6666, 156.6666], [255.0, 595.0, 0.0]],
[[76.6666, 1088.6666, 156.6666], [255.0, 255.0, 255.0]],
)
self._shapley_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
)
expected_with_baseline = (
[[1040, 1040, 1040], [184, 580.0, 184]],
[[52, 1040, 132], [184, 580.0, -12.0]],
[[52, 1040, 132], [184, 184, 184]],
)
self._shapley_test_assert(
net,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
)
# Remaining tests are for cases where forward function returns a scalar
# per batch, as either a float, integer, 0d tensor or 1d tensor.
def test_single_shapley_batch_scalar_float(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_one_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp)).item()
)
def test_single_shapley_batch_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_one_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp))
)
def test_single_shapley_batch_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_one_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp)).reshape(1)
)
def test_single_shapley_batch_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_one_sample_batch_scalar_shapley_assert(
lambda inp: int(torch.sum(net(inp)).item())
)
def test_single_shapley_int_batch_scalar_float(self) -> None:
net = BasicModel_MultiLayer()
self._single_int_input_multi_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp.float())).item()
)
def test_single_shapley_int_batch_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer()
self._single_int_input_multi_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp.float()))
)
def test_single_shapley_int_batch_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer()
self._single_int_input_multi_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp.float())).reshape(1)
)
def test_single_shapley_int_batch_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer()
self._single_int_input_multi_sample_batch_scalar_shapley_assert(
lambda inp: int(torch.sum(net(inp.float())).item())
)
def test_multi_sample_shapley_batch_scalar_float(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_multi_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp)).item()
)
def test_multi_sample_shapley_batch_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_multi_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp))
)
def test_multi_sample_shapley_batch_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_multi_sample_batch_scalar_shapley_assert(
lambda inp: torch.sum(net(inp)).reshape(1)
)
def test_multi_sample_shapley_batch_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_multi_sample_batch_scalar_shapley_assert(
lambda inp: int(torch.sum(net(inp)).item())
)
def test_multi_inp_shapley_batch_scalar_float(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_batch_scalar_shapley_assert(
lambda *inp: torch.sum(net(*inp)).item()
)
def test_multi_inp_shapley_batch_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_batch_scalar_shapley_assert(lambda *inp: torch.sum(net(*inp)))
def test_multi_inp_shapley_batch_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_batch_scalar_shapley_assert(
lambda *inp: torch.sum(net(*inp)).reshape(1)
)
def test_mutli_inp_shapley_batch_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_batch_scalar_shapley_assert(
lambda *inp: int(torch.sum(net(*inp)).item())
)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_shapley_sampling_with_show_progress(self, mock_stderr) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
# test progress output for each batch size
for bsz in (1, 2, 3):
self._shapley_test_assert(
net,
inp,
[[76.66666, 196.66666, 116.66666]],
perturbations_per_eval=(bsz,),
n_samples=250,
show_progress=True,
)
output = mock_stderr.getvalue()
# to test if progress calculation aligns with the actual iteration
# all perturbations_per_eval should reach progress of 100%
assert (
"Shapley Value Sampling attribution: 100%" in output
), f"Error progress output: {repr(output)}"
assert (
"Shapley Values attribution: 100%" in output
), f"Error progress output: {repr(output)}"
mock_stderr.seek(0)
mock_stderr.truncate(0)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_shapley_sampling_with_mask_and_show_progress(self, mock_stderr) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
# test progress output for each batch size
for bsz in (1, 2, 3):
self._shapley_test_assert(
net,
inp,
[[275.0, 275.0, 115.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(bsz,),
show_progress=True,
)
output = mock_stderr.getvalue()
# to test if progress calculation aligns with the actual iteration
# all perturbations_per_eval should reach progress of 100%
assert (
"Shapley Value Sampling attribution: 100%" in output
), f"Error progress output: {repr(output)}"
assert (
"Shapley Values attribution: 100%" in output
), f"Error progress output: {repr(output)}"
mock_stderr.seek(0)
mock_stderr.truncate(0)
def _single_input_one_sample_batch_scalar_shapley_assert(
self, func: Callable
) -> None:
inp = torch.tensor([[2.0, 10.0, 3.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1]])
self._shapley_test_assert(
func,
inp,
[[79.0, 79.0, 21.0]],
feature_mask=mask,
perturbations_per_eval=(1,),
target=None,
)
def _single_input_multi_sample_batch_scalar_shapley_assert(
self, func: Callable
) -> None:
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1]])
self._shapley_test_assert(
func,
inp,
[[629.0, 629.0, 251.0]],
feature_mask=mask,
perturbations_per_eval=(1,),
target=None,
n_samples=2500,
)
def _single_int_input_multi_sample_batch_scalar_shapley_assert(
self, func: Callable
) -> None:
inp = torch.tensor([[2, 10, 3], [20, 50, 30]])
mask = torch.tensor([[0, 0, 1]])
self._shapley_test_assert(
func,
inp,
[[629.0, 629.0, 251.0]],
feature_mask=mask,
perturbations_per_eval=(1,),
target=None,
n_samples=2500,
)
def _multi_input_batch_scalar_shapley_assert(self, func: Callable) -> None:
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [20.0, 10.0, 13.0]])
mask1 = torch.tensor([[1, 1, 1]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2]])
expected = (
[[3850.6666, 3850.6666, 3850.6666]],
[[306.6666, 3850.6666, 410.6666]],
[[306.6666, 3850.6666, 410.6666]],
)
self._shapley_test_assert(
func,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
perturbations_per_eval=(1,),
target=None,
n_samples=3500,
delta=1.2,
)
def _shapley_test_assert(
self,
model: Callable,
test_input: TensorOrTupleOfTensorsGeneric,
expected_attr,
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
additional_input: Any = None,
perturbations_per_eval: Tuple[int, ...] = (1,),
baselines: BaselineType = None,
target: Union[None, int] = 0,
n_samples: int = 100,
delta: float = 1.0,
test_true_shapley: bool = True,
show_progress: bool = False,
) -> None:
for batch_size in perturbations_per_eval:
shapley_samp = ShapleyValueSampling(model)
attributions = shapley_samp.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
show_progress=show_progress,
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_attr, delta=delta, mode="max"
)
if test_true_shapley:
shapley_val = ShapleyValues(model)
attributions = shapley_val.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
show_progress=show_progress,
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_attr, mode="max", delta=0.001
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import unittest
import torch
from captum._utils.typing import BaselineType, Tensor
from captum.attr._core.integrated_gradients import IntegratedGradients
from captum.attr._core.noise_tunnel import NoiseTunnel
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.classification_models import SigmoidModel, SoftmaxModel
from torch.nn import Module
class Test(BaseTest):
def test_sigmoid_classification_vanilla(self) -> None:
self._assert_sigmoid_classification("vanilla", "riemann_right")
def test_sigmoid_classification_smoothgrad(self) -> None:
self._assert_sigmoid_classification("smoothgrad", "riemann_left")
def test_sigmoid_classification_smoothgrad_sq(self) -> None:
self._assert_sigmoid_classification("smoothgrad_sq", "riemann_middle")
def test_sigmoid_classification_vargrad(self) -> None:
self._assert_sigmoid_classification("vargrad", "riemann_trapezoid")
def test_softmax_classification_vanilla(self) -> None:
self._assert_softmax_classification("vanilla", "gausslegendre")
def test_softmax_classification_smoothgrad(self) -> None:
self._assert_softmax_classification("smoothgrad", "riemann_right")
def test_softmax_classification_smoothgrad_sq(self) -> None:
self._assert_softmax_classification("smoothgrad_sq", "riemann_left")
def test_softmax_classification_vargrad(self) -> None:
self._assert_softmax_classification("vargrad", "riemann_middle")
def test_softmax_classification_vanilla_batch(self) -> None:
self._assert_softmax_classification_batch("vanilla", "riemann_trapezoid")
def test_softmax_classification_smoothgrad_batch(self) -> None:
self._assert_softmax_classification_batch("smoothgrad", "gausslegendre")
def test_softmax_classification_smoothgrad_sq_batch(self) -> None:
self._assert_softmax_classification_batch("smoothgrad_sq", "riemann_right")
def test_softmax_classification_vargrad_batch(self) -> None:
self._assert_softmax_classification_batch("vargrad", "riemann_left")
def _assert_sigmoid_classification(
self, type: str = "vanilla", approximation_method: str = "gausslegendre"
) -> None:
num_in = 20
input = torch.arange(0.0, num_in * 1.0, requires_grad=True).unsqueeze(0)
target = torch.tensor(0)
# TODO add test cases for multiple different layers
model = SigmoidModel(num_in, 5, 1)
self._validate_completness(model, input, target, type, approximation_method)
def _assert_softmax_classification(
self, type: str = "vanilla", approximation_method: str = "gausslegendre"
) -> None:
num_in = 40
input = torch.arange(0.0, num_in * 1.0, requires_grad=True).unsqueeze(0)
target = torch.tensor(5)
# 10-class classification model
model = SoftmaxModel(num_in, 20, 10)
self._validate_completness(model, input, target, type, approximation_method)
def _assert_softmax_classification_batch(
self, type: str = "vanilla", approximation_method: str = "gausslegendre"
) -> None:
num_in = 40
input = torch.arange(0.0, num_in * 3.0, requires_grad=True).reshape(3, num_in)
target = torch.tensor([5, 5, 2])
baseline = torch.zeros(1, num_in)
# 10-class classification model
model = SoftmaxModel(num_in, 20, 10)
self._validate_completness(
model, input, target, type, approximation_method, baseline
)
def _validate_completness(
self,
model: Module,
input: Tensor,
target: Tensor,
type: str = "vanilla",
approximation_method: str = "gausslegendre",
baseline: BaselineType = None,
) -> None:
ig = IntegratedGradients(model.forward)
model.zero_grad()
if type == "vanilla":
attributions, delta = ig.attribute(
input,
baselines=baseline,
target=target,
method=approximation_method,
n_steps=200,
return_convergence_delta=True,
)
delta_expected = ig.compute_convergence_delta(
attributions, baseline, input, target
)
assertTensorAlmostEqual(self, delta_expected, delta)
delta_condition = (delta.abs() < 0.005).all()
self.assertTrue(
delta_condition,
"The sum of attribution values {} is not "
"nearly equal to the difference between the endpoint for "
"some samples".format(delta),
)
self.assertEqual([input.shape[0]], list(delta.shape))
else:
nt = NoiseTunnel(ig)
n_samples = 10
attributions, delta = nt.attribute(
input,
baselines=baseline,
nt_type=type,
nt_samples=n_samples,
stdevs=0.0002,
n_steps=100,
target=target,
method=approximation_method,
return_convergence_delta=True,
)
self.assertEqual([input.shape[0] * n_samples], list(delta.shape))
self.assertTrue((delta.abs() < 0.05).all())
self.assertEqual(attributions.shape, input.shape)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import torch
from captum.attr._utils.batching import (
_batched_generator,
_batched_operator,
_tuple_splice_range,
)
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
class Test(BaseTest):
def test_tuple_splice_range(self) -> None:
test_tuple = (
torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]]),
"test",
torch.tensor([[6, 7, 8], [0, 1, 2], [3, 4, 5]]),
)
spliced_tuple = _tuple_splice_range(test_tuple, 1, 3)
assertTensorAlmostEqual(self, spliced_tuple[0], [[3, 4, 5], [6, 7, 8]])
self.assertEqual(spliced_tuple[1], "test")
assertTensorAlmostEqual(self, spliced_tuple[2], [[0, 1, 2], [3, 4, 5]])
def test_tuple_splice_range_3d(self) -> None:
test_tuple = (
torch.tensor([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [6, 7, 8]]]),
"test",
)
spliced_tuple = _tuple_splice_range(test_tuple, 1, 2)
assertTensorAlmostEqual(self, spliced_tuple[0], [[[6, 7, 8], [6, 7, 8]]])
self.assertEqual(spliced_tuple[1], "test")
def test_batched_generator(self) -> None:
def sample_operator(inputs, additional_forward_args, target_ind, scale):
return (
scale * (sum(inputs)),
scale * sum(additional_forward_args),
target_ind,
)
array1 = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
array2 = [[6, 7, 8], [0, 1, 2], [3, 4, 5]]
array3 = [[0, 1, 2], [0, 0, 0], [0, 0, 0]]
inp1, inp2, inp3 = (
torch.tensor(array1),
torch.tensor(array2),
torch.tensor(array3),
)
for index, (inp, add, targ) in enumerate(
_batched_generator((inp1, inp2), (inp3, 5), 7, 1)
):
assertTensorAlmostEqual(self, inp[0], [array1[index]])
assertTensorAlmostEqual(self, inp[1], [array2[index]])
assertTensorAlmostEqual(self, add[0], [array3[index]])
self.assertEqual(add[1], 5)
self.assertEqual(targ, 7)
def test_batched_operator_0_bsz(self) -> None:
inp1 = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
with self.assertRaises(AssertionError):
_batched_operator(lambda x: x, inputs=inp1, internal_batch_size=0)
def test_batched_operator(self) -> None:
def _sample_operator(inputs, additional_forward_args, target_ind, scale):
return (
scale * (sum(inputs)),
scale * sum(additional_forward_args) + target_ind[0],
)
inp1 = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
inp2 = torch.tensor([[6, 7, 8], [0, 1, 2], [3, 4, 5]])
inp3 = torch.tensor([[0, 1, 2], [0, 0, 0], [0, 0, 0]])
batched_result = _batched_operator(
_sample_operator,
inputs=(inp1, inp2),
additional_forward_args=(inp3),
target_ind=[0, 1, 2],
scale=2.0,
internal_batch_size=1,
)
assertTensorAlmostEqual(
self, batched_result[0], [[12, 16, 20], [6, 10, 14], [18, 22, 26]]
)
assertTensorAlmostEqual(
self, batched_result[1], [[0, 2, 4], [1, 1, 1], [2, 2, 2]]
)
|
#!/usr/bin/env python3
import unittest
from typing import Any, List, Tuple, Union
import torch
from captum._utils.typing import TensorLikeList, TensorOrTupleOfTensorsGeneric
from captum.attr._core.guided_backprop_deconvnet import GuidedBackprop
from captum.attr._core.neuron.neuron_guided_backprop_deconvnet import (
NeuronGuidedBackprop,
)
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModel_ConvNet_One_Conv
from torch.nn import Module
class Test(BaseTest):
def test_simple_input_conv_gb(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = 1.0 * torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
exp = [
[
[
[0.0, 1.0, 1.0, 1.0],
[1.0, 3.0, 3.0, 2.0],
[1.0, 3.0, 3.0, 2.0],
[1.0, 2.0, 2.0, 1.0],
]
]
]
self._guided_backprop_test_assert(net, (inp,), (exp,))
def test_simple_input_conv_neuron_gb(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = 1.0 * torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
exp = [
[
[
[0.0, 1.0, 1.0, 1.0],
[1.0, 3.0, 3.0, 2.0],
[1.0, 3.0, 3.0, 2.0],
[1.0, 2.0, 2.0, 1.0],
]
]
]
self._neuron_guided_backprop_test_assert(net, net.fc1, (0,), (inp,), (exp,))
def test_simple_input_conv_neuron_gb_agg_neurons(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = 1.0 * torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
exp = [
[
[
[0.0, 1.0, 1.0, 1.0],
[1.0, 3.0, 3.0, 2.0],
[1.0, 3.0, 3.0, 2.0],
[1.0, 2.0, 2.0, 1.0],
]
]
]
self._neuron_guided_backprop_test_assert(
net, net.fc1, (slice(0, 1, 1),), (inp,), (exp,)
)
def test_simple_multi_input_conv_gb(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
ex_attr = [
[
[
[1.0, 2.0, 2.0, 1.0],
[2.0, 4.0, 4.0, 2.0],
[2.0, 4.0, 4.0, 2.0],
[1.0, 2.0, 2.0, 1.0],
]
]
]
self._guided_backprop_test_assert(net, (inp, inp2), (ex_attr, ex_attr))
def test_simple_multi_input_conv_neuron_gb(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
ex_attr = [
[
[
[1.0, 2.0, 2.0, 1.0],
[2.0, 4.0, 4.0, 2.0],
[2.0, 4.0, 4.0, 2.0],
[1.0, 2.0, 2.0, 1.0],
]
]
]
self._neuron_guided_backprop_test_assert(
net, net.fc1, (3,), (inp, inp2), (ex_attr, ex_attr)
)
def test_gb_matching(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = 100.0 * torch.randn(1, 1, 4, 4)
self._guided_backprop_matching_assert(net, net.relu2, inp)
def _guided_backprop_test_assert(
self,
model: Module,
test_input: TensorOrTupleOfTensorsGeneric,
expected: Tuple[TensorLikeList, ...],
additional_input: Any = None,
) -> None:
guided_backprop = GuidedBackprop(model)
attributions = guided_backprop.attribute(
test_input, target=0, additional_forward_args=additional_input
)
for i in range(len(test_input)):
assertTensorAlmostEqual(
self,
attributions[i],
expected[i],
delta=0.01,
)
def _neuron_guided_backprop_test_assert(
self,
model: Module,
layer: Module,
neuron_selector: Union[int, Tuple[Union[int, slice], ...]],
test_input: TensorOrTupleOfTensorsGeneric,
expected: Tuple[List[List[List[List[float]]]], ...],
additional_input: Any = None,
) -> None:
guided_backprop = NeuronGuidedBackprop(model, layer)
attributions = guided_backprop.attribute(
test_input,
neuron_selector=neuron_selector,
additional_forward_args=additional_input,
)
for i in range(len(test_input)):
assertTensorAlmostEqual(
self,
attributions[i],
expected[i],
delta=0.01,
)
def _guided_backprop_matching_assert(
self,
model: Module,
output_layer: Module,
test_input: TensorOrTupleOfTensorsGeneric,
):
out = model(test_input)
attrib = GuidedBackprop(model)
self.assertFalse(attrib.multiplies_by_inputs)
neuron_attrib = NeuronGuidedBackprop(model, output_layer)
for i in range(out.shape[1]):
gbp_vals = attrib.attribute(test_input, target=i)
neuron_gbp_vals = neuron_attrib.attribute(test_input, (i,))
assertTensorAlmostEqual(self, gbp_vals, neuron_gbp_vals, delta=0.01)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
from typing import cast, Tuple
import torch
import torch.nn as nn
from captum.attr import InputXGradient, LRP
from captum.attr._utils.lrp_rules import (
Alpha1_Beta0_Rule,
EpsilonRule,
GammaRule,
IdentityRule,
)
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel_ConvNet_One_Conv,
BasicModel_MultiLayer,
BasicModelWithReusedLinear,
SimpleLRPModel,
)
from torch import Tensor
from torch.nn import Module
def _get_basic_config() -> Tuple[Module, Tensor]:
input = torch.arange(16).view(1, 1, 4, 4).float()
return BasicModel_ConvNet_One_Conv(), input
def _get_rule_config() -> Tuple[Tensor, Module, Tensor, Tensor]:
relevance = torch.tensor([[[-0.0, 3.0]]])
layer = nn.modules.Conv1d(1, 1, 2, bias=False)
nn.init.constant_(layer.weight.data, 2)
activations = torch.tensor([[[1.0, 5.0, 7.0]]])
input = torch.tensor([[2, 0, -2]])
return relevance, layer, activations, input
def _get_simple_model(inplace: bool = False) -> Tuple[Module, Tensor]:
model = SimpleLRPModel(inplace)
inputs = torch.tensor([[1.0, 2.0, 3.0]])
return model, inputs
def _get_simple_model2(inplace: bool = False) -> Tuple[Module, Tensor]:
class MyModel(nn.Module):
def __init__(self, inplace) -> None:
super().__init__()
self.lin = nn.Linear(2, 2)
self.lin.weight = nn.Parameter(torch.ones(2, 2))
self.relu = torch.nn.ReLU(inplace=inplace)
def forward(self, input):
return self.relu(self.lin(input))[0].unsqueeze(0)
input = torch.tensor([[1.0, 2.0], [1.0, 3.0]])
model = MyModel(inplace)
return model, input
class Test(BaseTest):
def test_lrp_creator(self) -> None:
model, _ = _get_basic_config()
model.conv1.rule = 1 # type: ignore
self.assertRaises(TypeError, LRP, model)
def test_lrp_creator_activation(self) -> None:
model, inputs = _get_basic_config()
model.add_module("sigmoid", nn.Sigmoid())
lrp = LRP(model)
self.assertRaises(TypeError, lrp.attribute, inputs)
def test_lrp_basic_attributions(self) -> None:
model, inputs = _get_basic_config()
logits = model(inputs)
_, classIndex = torch.max(logits, 1)
lrp = LRP(model)
relevance, delta = lrp.attribute(
inputs, cast(int, classIndex.item()), return_convergence_delta=True
)
self.assertEqual(delta.item(), 0) # type: ignore
self.assertEqual(relevance.shape, inputs.shape) # type: ignore
assertTensorAlmostEqual(
self,
relevance,
torch.Tensor(
[[[[0, 1, 2, 3], [0, 5, 6, 7], [0, 9, 10, 11], [0, 0, 0, 0]]]]
),
)
def test_lrp_simple_attributions(self) -> None:
model, inputs = _get_simple_model()
model.eval()
model.linear.rule = EpsilonRule() # type: ignore
model.linear2.rule = EpsilonRule() # type: ignore
lrp = LRP(model)
relevance = lrp.attribute(inputs)
assertTensorAlmostEqual(self, relevance, torch.tensor([[18.0, 36.0, 54.0]]))
def test_lrp_simple_attributions_batch(self) -> None:
model, inputs = _get_simple_model()
model.eval()
model.linear.rule = EpsilonRule() # type: ignore
model.linear2.rule = EpsilonRule() # type: ignore
lrp = LRP(model)
inputs = torch.cat((inputs, 3 * inputs))
relevance, delta = lrp.attribute(
inputs, target=0, return_convergence_delta=True
)
self.assertEqual(relevance.shape, inputs.shape) # type: ignore
self.assertEqual(delta.shape[0], inputs.shape[0]) # type: ignore
assertTensorAlmostEqual(
self, relevance, torch.Tensor([[18.0, 36.0, 54.0], [54.0, 108.0, 162.0]])
)
def test_lrp_simple_repeat_attributions(self) -> None:
model, inputs = _get_simple_model()
model.eval()
model.linear.rule = GammaRule() # type: ignore
model.linear2.rule = Alpha1_Beta0_Rule() # type: ignore
output = model(inputs)
lrp = LRP(model)
_ = lrp.attribute(inputs)
output_after = model(inputs)
assertTensorAlmostEqual(self, output, output_after)
def test_lrp_simple_inplaceReLU(self) -> None:
model_default, inputs = _get_simple_model()
model_inplace, _ = _get_simple_model(inplace=True)
for model in [model_default, model_inplace]:
model.eval()
model.linear.rule = EpsilonRule() # type: ignore
model.linear2.rule = EpsilonRule() # type: ignore
lrp_default = LRP(model_default)
lrp_inplace = LRP(model_inplace)
relevance_default = lrp_default.attribute(inputs)
relevance_inplace = lrp_inplace.attribute(inputs)
assertTensorAlmostEqual(self, relevance_default, relevance_inplace)
def test_lrp_simple_tanh(self) -> None:
class Model(nn.Module):
def __init__(self) -> None:
super(Model, self).__init__()
self.linear = nn.Linear(3, 3, bias=False)
self.linear.weight.data.fill_(0.1)
self.tanh = torch.nn.Tanh()
self.linear2 = nn.Linear(3, 1, bias=False)
self.linear2.weight.data.fill_(0.1)
def forward(self, x):
return self.linear2(self.tanh(self.linear(x)))
model = Model()
inputs = torch.tensor([[1.0, 2.0, 3.0]])
_ = model(inputs)
lrp = LRP(model)
relevance = lrp.attribute(inputs)
assertTensorAlmostEqual(
self, relevance, torch.Tensor([[0.0269, 0.0537, 0.0806]])
) # Result if tanh is skipped for propagation
def test_lrp_simple_attributions_GammaRule(self) -> None:
model, inputs = _get_simple_model()
with torch.no_grad():
model.linear.weight.data[0][0] = -2 # type: ignore
model.eval()
model.linear.rule = GammaRule(gamma=1) # type: ignore
model.linear2.rule = GammaRule() # type: ignore
lrp = LRP(model)
relevance = lrp.attribute(inputs)
assertTensorAlmostEqual(
self, relevance.data, torch.tensor([[28 / 3, 104 / 3, 52]]) # type: ignore
)
def test_lrp_simple_attributions_AlphaBeta(self) -> None:
model, inputs = _get_simple_model()
with torch.no_grad():
model.linear.weight.data[0][0] = -2 # type: ignore
model.eval()
model.linear.rule = Alpha1_Beta0_Rule() # type: ignore
model.linear2.rule = Alpha1_Beta0_Rule() # type: ignore
lrp = LRP(model)
relevance = lrp.attribute(inputs)
assertTensorAlmostEqual(self, relevance, torch.tensor([[12, 33.6, 50.4]]))
def test_lrp_Identity(self) -> None:
model, inputs = _get_simple_model()
with torch.no_grad():
model.linear.weight.data[0][0] = -2 # type: ignore
model.eval()
model.linear.rule = IdentityRule() # type: ignore
model.linear2.rule = EpsilonRule() # type: ignore
lrp = LRP(model)
relevance = lrp.attribute(inputs)
assertTensorAlmostEqual(self, relevance, torch.tensor([[24.0, 36.0, 36.0]]))
def test_lrp_simple2_attributions(self) -> None:
model, input = _get_simple_model2()
lrp = LRP(model)
relevance = lrp.attribute(input, 0)
self.assertEqual(relevance.shape, input.shape) # type: ignore
def test_lrp_skip_connection(self) -> None:
# A custom addition module needs to be used so that relevance is
# propagated correctly.
class Addition_Module(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
return x1 + x2
class SkipConnection(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(2, 2, bias=False)
self.linear.weight.data.fill_(5)
self.add = Addition_Module()
def forward(self, input: Tensor) -> Module:
x = self.add(self.linear(input), input)
return x
model = SkipConnection()
input = torch.Tensor([[2, 3]])
model.add.rule = EpsilonRule() # type: ignore
lrp = LRP(model)
relevance = lrp.attribute(input, target=1)
assertTensorAlmostEqual(self, relevance, torch.Tensor([[10, 18]]))
def test_lrp_maxpool1D(self) -> None:
class MaxPoolModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(2, 2, bias=False)
self.linear.weight.data.fill_(2.0)
self.maxpool = nn.MaxPool1d(2)
def forward(self, input: Tensor) -> Module:
return self.maxpool(self.linear(input))
model = MaxPoolModel()
input = torch.tensor([[[1.0, 2.0], [5.0, 6.0]]])
lrp = LRP(model)
relevance = lrp.attribute(input, target=1)
assertTensorAlmostEqual(self, relevance, torch.Tensor([[[0.0, 0.0], [10, 12]]]))
def test_lrp_maxpool2D(self) -> None:
class MaxPoolModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.maxpool = nn.MaxPool2d(2)
def forward(self, input: Tensor) -> Module:
return self.maxpool(input)
model = MaxPoolModel()
input = torch.tensor([[[[1.0, 2.0], [5.0, 6.0]]]])
lrp = LRP(model)
relevance = lrp.attribute(input)
assertTensorAlmostEqual(
self, relevance, torch.Tensor([[[[0.0, 0.0], [0.0, 6.0]]]])
)
def test_lrp_maxpool3D(self) -> None:
class MaxPoolModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.maxpool = nn.MaxPool3d(2)
def forward(self, input: Tensor) -> Module:
return self.maxpool(input)
model = MaxPoolModel()
input = torch.tensor([[[[[1.0, 2.0], [5.0, 6.0]], [[3.0, 4.0], [7.0, 8.0]]]]])
lrp = LRP(model)
relevance = lrp.attribute(input)
assertTensorAlmostEqual(
self,
relevance,
torch.Tensor([[[[[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 8.0]]]]]),
)
def test_lrp_multi(self) -> None:
model = BasicModel_MultiLayer()
input = torch.Tensor([[1, 2, 3]])
add_input = 0
output = model(input)
output_add = model(input, add_input=add_input)
self.assertTrue(torch.equal(output, output_add))
lrp = LRP(model)
attributions = lrp.attribute(input, target=0)
attributions_add_input = lrp.attribute(
input, target=0, additional_forward_args=(add_input,)
)
self.assertTrue(
torch.equal(attributions, attributions_add_input) # type: ignore
) # type: ignore
def test_lrp_multi_inputs(self) -> None:
model = BasicModel_MultiLayer()
input = torch.Tensor([[1, 2, 3]])
input = (input, 3 * input)
lrp = LRP(model)
attributions, delta = lrp.attribute(
input, target=0, return_convergence_delta=True
)
self.assertEqual(len(input), 2)
assertTensorAlmostEqual(self, attributions[0], torch.Tensor([[16, 32, 48]]))
assertTensorAlmostEqual(self, delta, torch.Tensor([-104.0]))
def test_lrp_ixg_equivalency(self) -> None:
model, inputs = _get_simple_model()
lrp = LRP(model)
attributions_lrp = lrp.attribute(inputs)
ixg = InputXGradient(model)
attributions_ixg = ixg.attribute(inputs)
assertTensorAlmostEqual(
self, attributions_lrp, attributions_ixg
) # Divide by score because LRP relevance is normalized.
def test_lrp_repeated_module(self) -> None:
model = BasicModelWithReusedLinear()
inp = torch.ones(2, 3)
lrp = LRP(model)
with self.assertRaisesRegexp(RuntimeError, "more than once"):
lrp.attribute(inp, target=0)
|
#!/usr/bin/env python3
import io
import unittest
import unittest.mock
from typing import Any, cast, List, Tuple, Union
import torch
from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.feature_ablation import FeatureAblation
from captum.attr._core.noise_tunnel import NoiseTunnel
from captum.attr._utils.attribution import Attribution
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel,
BasicModel_ConvNet_One_Conv,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
BasicModelBoolInput,
BasicModelWithSparseInputs,
)
from torch import Tensor
class Test(BaseTest):
r"""
The following conversion tests are underlying assumptions
made by the rest of tests in this file.
We are testing them explicitly just in case they break behaviour
in the future. As in this case it will be easier to update the tests.
"""
def test_python_float_conversion(self) -> None:
x = torch.tensor(3, dtype=cast(torch.dtype, float))
self.assertEqual(x.dtype, torch.float64)
def test_python_int_conversion(self) -> None:
x = torch.tensor(5, dtype=cast(torch.dtype, int))
self.assertEqual(x.dtype, torch.int64)
def test_float32_tensor_item_conversion(self) -> None:
x = torch.tensor(5, dtype=torch.float32)
y = torch.tensor(x.item()) # .item() returns a python float
# for whatever reason it is only
# dtype == torch.float64 if you provide dtype=float
self.assertEqual(y.dtype, torch.float32)
def test_int32_tensor_item_conversion(self) -> None:
x = torch.tensor(5, dtype=torch.int32)
y = torch.tensor(x.item()) # .item() returns a python int
self.assertEqual(y.dtype, torch.int64)
def test_simple_ablation(self) -> None:
ablation_algo = FeatureAblation(BasicModel_MultiLayer())
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._ablation_test_assert(
ablation_algo, inp, [[80.0, 200.0, 120.0]], perturbations_per_eval=(1, 2, 3)
)
def test_simple_ablation_int_to_int(self) -> None:
ablation_algo = FeatureAblation(BasicModel())
inp = torch.tensor([[-3, 1, 2]])
self._ablation_test_assert(
ablation_algo, inp, [[-3, 0, 0]], perturbations_per_eval=(1, 2, 3)
)
def test_simple_ablation_int_to_int_nt(self) -> None:
ablation_algo = NoiseTunnel(FeatureAblation(BasicModel()))
inp = torch.tensor([[-3, 1, 2]]).float()
self._ablation_test_assert(
ablation_algo,
inp,
[[-3.0, 0.0, 0.0]],
perturbations_per_eval=(1, 2, 3),
stdevs=1e-10,
)
def test_simple_ablation_int_to_float(self) -> None:
net = BasicModel()
def wrapper_func(inp):
return net(inp).float()
ablation_algo = FeatureAblation(wrapper_func)
inp = torch.tensor([[-3, 1, 2]])
self._ablation_test_assert(
ablation_algo, inp, [[-3.0, 0.0, 0.0]], perturbations_per_eval=(1, 2, 3)
)
def test_simple_ablation_with_mask(self) -> None:
ablation_algo = FeatureAblation(BasicModel_MultiLayer())
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._ablation_test_assert(
ablation_algo,
inp,
[[280.0, 280.0, 120.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
)
def test_simple_ablation_with_baselines(self) -> None:
ablation_algo = FeatureAblation(BasicModel_MultiLayer())
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._ablation_test_assert(
ablation_algo,
inp,
[[248.0, 248.0, 104.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
baselines=4,
perturbations_per_eval=(1, 2, 3),
)
def test_simple_ablation_boolean(self) -> None:
ablation_algo = FeatureAblation(BasicModelBoolInput())
inp = torch.tensor([[True, False, True]])
self._ablation_test_assert(
ablation_algo,
inp,
[[40.0, 40.0, 40.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
)
def test_simple_ablation_boolean_with_baselines(self) -> None:
ablation_algo = FeatureAblation(BasicModelBoolInput())
inp = torch.tensor([[True, False, True]])
self._ablation_test_assert(
ablation_algo,
inp,
[[-40.0, -40.0, 0.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
baselines=True,
perturbations_per_eval=(1, 2, 3),
)
def test_multi_sample_ablation(self) -> None:
ablation_algo = FeatureAblation(BasicModel_MultiLayer())
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
self._ablation_test_assert(
ablation_algo,
inp,
[[8.0, 35.0, 12.0], [80.0, 200.0, 120.0]],
perturbations_per_eval=(1, 2, 3),
)
def test_multi_sample_ablation_with_mask(self) -> None:
ablation_algo = FeatureAblation(BasicModel_MultiLayer())
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1], [1, 1, 0]])
self._ablation_test_assert(
ablation_algo,
inp,
[[41.0, 41.0, 12.0], [280.0, 280.0, 120.0]],
feature_mask=mask,
perturbations_per_eval=(1, 2, 3),
)
def test_multi_input_ablation_with_mask(self) -> None:
ablation_algo = FeatureAblation(BasicModel_MultiLayer_MultiInput())
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
mask1 = torch.tensor([[1, 1, 1], [0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2], [0, 0, 0]])
expected = (
[[492.0, 492.0, 492.0], [200.0, 200.0, 200.0]],
[[80.0, 200.0, 120.0], [0.0, 400.0, 0.0]],
[[0.0, 400.0, 40.0], [60.0, 60.0, 60.0]],
)
self._ablation_test_assert(
ablation_algo,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
)
self._ablation_test_assert(
ablation_algo,
(inp1, inp2),
expected[0:1],
additional_input=(inp3, 1),
feature_mask=(mask1, mask2),
perturbations_per_eval=(1, 2, 3),
)
expected_with_baseline = (
[[468.0, 468.0, 468.0], [184.0, 192.0, 184.0]],
[[68.0, 188.0, 108.0], [-12.0, 388.0, -12.0]],
[[-16.0, 384.0, 24.0], [12.0, 12.0, 12.0]],
)
self._ablation_test_assert(
ablation_algo,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
)
def test_multi_input_ablation_with_mask_nt(self) -> None:
ablation_algo = NoiseTunnel(FeatureAblation(BasicModel_MultiLayer_MultiInput()))
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
mask1 = torch.tensor([[1, 1, 1], [0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2], [0, 0, 0]])
expected = (
[[492.0, 492.0, 492.0], [200.0, 200.0, 200.0]],
[[80.0, 200.0, 120.0], [0.0, 400.0, 0.0]],
[[0.0, 400.0, 40.0], [60.0, 60.0, 60.0]],
)
self._ablation_test_assert(
ablation_algo,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
stdevs=1e-10,
)
self._ablation_test_assert(
ablation_algo,
(inp1, inp2),
expected[0:1],
additional_input=(inp3, 1),
feature_mask=(mask1, mask2),
perturbations_per_eval=(1, 2, 3),
stdevs=1e-10,
)
expected_with_baseline = (
[[468.0, 468.0, 468.0], [184.0, 192.0, 184.0]],
[[68.0, 188.0, 108.0], [-12.0, 388.0, -12.0]],
[[-16.0, 384.0, 24.0], [12.0, 12.0, 12.0]],
)
self._ablation_test_assert(
ablation_algo,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
stdevs=1e-10,
)
def test_multi_input_ablation(self) -> None:
ablation_algo = FeatureAblation(BasicModel_MultiLayer_MultiInput())
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
baseline1 = torch.tensor([[3.0, 0.0, 0.0]])
baseline2 = torch.tensor([[0.0, 1.0, 0.0]])
baseline3 = torch.tensor([[1.0, 2.0, 3.0]])
self._ablation_test_assert(
ablation_algo,
(inp1, inp2, inp3),
(
[[80.0, 400.0, 0.0], [68.0, 200.0, 120.0]],
[[80.0, 196.0, 120.0], [0.0, 396.0, 0.0]],
[[-4.0, 392.0, 28.0], [4.0, 32.0, 0.0]],
),
additional_input=(1,),
baselines=(baseline1, baseline2, baseline3),
perturbations_per_eval=(1, 2, 3),
)
baseline1_exp = torch.tensor([[3.0, 0.0, 0.0], [3.0, 0.0, 2.0]])
baseline2_exp = torch.tensor([[0.0, 1.0, 0.0], [0.0, 1.0, 4.0]])
baseline3_exp = torch.tensor([[3.0, 2.0, 4.0], [1.0, 2.0, 3.0]])
self._ablation_test_assert(
ablation_algo,
(inp1, inp2, inp3),
(
[[80.0, 400.0, 0.0], [68.0, 200.0, 112.0]],
[[80.0, 196.0, 120.0], [0.0, 396.0, -16.0]],
[[-12.0, 392.0, 24.0], [4.0, 32.0, 0.0]],
),
additional_input=(1,),
baselines=(baseline1_exp, baseline2_exp, baseline3_exp),
perturbations_per_eval=(1, 2, 3),
)
def test_simple_multi_input_conv(self) -> None:
ablation_algo = FeatureAblation(BasicModel_ConvNet_One_Conv())
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
self._ablation_test_assert(
ablation_algo,
(inp, inp2),
(67 * torch.ones_like(inp), 13 * torch.ones_like(inp2)),
feature_mask=(torch.tensor(0), torch.tensor(1)),
perturbations_per_eval=(1, 2, 4, 8, 12, 16),
)
self._ablation_test_assert(
ablation_algo,
(inp, inp2),
(
[
[
[
[0.0, 2.0, 4.0, 3.0],
[4.0, 9.0, 10.0, 7.0],
[4.0, 13.0, 14.0, 11.0],
[0.0, 0.0, 0.0, 0.0],
]
]
],
[
[
[
[1.0, 2.0, 2.0, 1.0],
[1.0, 2.0, 2.0, 1.0],
[1.0, 2.0, 2.0, 1.0],
[0.0, 0.0, 0.0, 0.0],
]
]
],
),
perturbations_per_eval=(1, 3, 7, 14),
)
# Remaining tests are for cases where forward function returns a scalar
# per batch, as either a float, integer, 0d tensor or 1d tensor.
def test_error_perturbations_per_eval_limit_batch_scalar(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
ablation = FeatureAblation(lambda inp: torch.sum(net(inp)).item())
with self.assertRaises(AssertionError):
_ = ablation.attribute(inp, perturbations_per_eval=2)
def test_error_agg_mode_arbitrary_output(self) -> None:
net = BasicModel_MultiLayer()
# output 3 numbers for the entire batch
# note that the batch size == 2
def forward_func(inp):
pred = net(inp)
return torch.stack([pred.sum(), pred.max(), pred.min()])
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
ablation = FeatureAblation(forward_func)
with self.assertRaises(AssertionError):
_ = ablation.attribute(inp, perturbations_per_eval=2)
def test_empty_sparse_features(self) -> None:
ablation_algo = FeatureAblation(BasicModelWithSparseInputs())
inp1 = torch.tensor([[1.0, -2.0, 3.0], [2.0, -1.0, 3.0]])
inp2 = torch.tensor([])
exp: Tuple[List[List[float]], List[float]] = ([[9.0, -3.0, 12.0]], [0.0])
self._ablation_test_assert(ablation_algo, (inp1, inp2), exp, target=None)
def test_sparse_features(self) -> None:
ablation_algo = FeatureAblation(BasicModelWithSparseInputs())
inp1 = torch.tensor([[1.0, -2.0, 3.0], [2.0, -1.0, 3.0]])
# Length of sparse index list may not match # of examples
inp2 = torch.tensor([1, 7, 2, 4, 5, 3, 6])
self._ablation_test_assert(
ablation_algo, (inp1, inp2), ([[9.0, -3.0, 12.0]], [2.0]), target=None
)
def test_single_ablation_batch_scalar_float(self) -> None:
net = BasicModel_MultiLayer()
ablation_algo = FeatureAblation(lambda inp: torch.sum(net(inp)).item())
self._single_input_one_sample_batch_scalar_ablation_assert(
ablation_algo, dtype=torch.float64
)
def test_single_ablation_batch_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer()
ablation_algo = FeatureAblation(lambda inp: torch.sum(net(inp)))
self._single_input_one_sample_batch_scalar_ablation_assert(ablation_algo)
def test_single_ablation_batch_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer()
ablation_algo = FeatureAblation(lambda inp: torch.sum(net(inp)).reshape(1))
self._single_input_one_sample_batch_scalar_ablation_assert(ablation_algo)
def test_single_ablation_batch_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer()
ablation_algo = FeatureAblation(lambda inp: int(torch.sum(net(inp)).item()))
self._single_input_one_sample_batch_scalar_ablation_assert(
ablation_algo, dtype=torch.int64
)
def test_multi_sample_ablation_batch_scalar_float(self) -> None:
net = BasicModel_MultiLayer()
ablation_algo = FeatureAblation(lambda inp: torch.sum(net(inp)).item())
self._single_input_multi_sample_batch_scalar_ablation_assert(
ablation_algo,
dtype=torch.float64,
)
def test_multi_sample_ablation_batch_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer()
ablation_algo = FeatureAblation(lambda inp: torch.sum(net(inp)))
self._single_input_multi_sample_batch_scalar_ablation_assert(ablation_algo)
def test_multi_sample_ablation_batch_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer()
ablation_algo = FeatureAblation(lambda inp: torch.sum(net(inp)).reshape(1))
self._single_input_multi_sample_batch_scalar_ablation_assert(ablation_algo)
def test_multi_sample_ablation_batch_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer()
ablation_algo = FeatureAblation(lambda inp: int(torch.sum(net(inp)).item()))
self._single_input_multi_sample_batch_scalar_ablation_assert(
ablation_algo, dtype=torch.int64
)
def test_multi_inp_ablation_batch_scalar_float(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
ablation_algo = FeatureAblation(lambda *inp: torch.sum(net(*inp)).item())
self._multi_input_batch_scalar_ablation_assert(
ablation_algo,
dtype=torch.float64,
)
def test_multi_inp_ablation_batch_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
ablation_algo = FeatureAblation(lambda *inp: torch.sum(net(*inp)))
self._multi_input_batch_scalar_ablation_assert(ablation_algo)
def test_multi_inp_ablation_batch_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
ablation_algo = FeatureAblation(lambda *inp: torch.sum(net(*inp)).reshape(1))
self._multi_input_batch_scalar_ablation_assert(ablation_algo)
def test_mutli_inp_ablation_batch_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
ablation_algo = FeatureAblation(lambda *inp: int(torch.sum(net(*inp)).item()))
self._multi_input_batch_scalar_ablation_assert(ablation_algo, dtype=torch.int64)
def test_unassociated_output_3d_tensor(self) -> None:
def forward_func(inp):
return torch.ones(1, 5, 3, 2)
inp = torch.randn(10, 5)
mask = torch.arange(5).unsqueeze(0)
self._ablation_test_assert(
ablation_algo=FeatureAblation(forward_func),
test_input=inp,
baselines=None,
target=None,
feature_mask=mask,
perturbations_per_eval=(1,),
expected_ablation=torch.zeros((5 * 3 * 2,) + inp[0].shape),
)
def test_single_inp_ablation_multi_output_aggr(self) -> None:
def forward_func(inp):
return inp[0].unsqueeze(0)
inp = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
mask = torch.tensor([[0, 1, 2]])
self._ablation_test_assert(
ablation_algo=FeatureAblation(forward_func),
test_input=inp,
feature_mask=mask,
baselines=None,
target=None,
perturbations_per_eval=(1,),
# should just be the first input spread across each feature
expected_ablation=[[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0]],
)
def test_single_inp_ablation_multi_output_aggr_mask_none(self) -> None:
def forward_func(inp):
return inp[0].unsqueeze(0)
inp = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
self._ablation_test_assert(
ablation_algo=FeatureAblation(forward_func),
test_input=inp,
feature_mask=None,
baselines=None,
target=None,
perturbations_per_eval=(1,),
# should just be the first input spread across each feature
expected_ablation=[[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0]],
)
def test_single_inp_ablation_multi_output_aggr_non_standard(self) -> None:
def forward_func(inp):
return inp[0].unsqueeze(0)
inp = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
mask = torch.tensor([[0, 0, 1]])
self._ablation_test_assert(
ablation_algo=FeatureAblation(forward_func),
test_input=inp,
feature_mask=mask,
baselines=None,
target=None,
perturbations_per_eval=(1,),
expected_ablation=[[1.0, 1.0, 0.0], [2.0, 2.0, 0.0], [0.0, 0.0, 3.0]],
)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_simple_ablation_with_show_progress(self, mock_stderr) -> None:
ablation_algo = FeatureAblation(BasicModel_MultiLayer())
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
# test progress output for each batch size
for bsz in (1, 2, 3):
self._ablation_test_assert(
ablation_algo,
inp,
[[80.0, 200.0, 120.0]],
perturbations_per_eval=(bsz,),
show_progress=True,
)
output = mock_stderr.getvalue()
# to test if progress calculation aligns with the actual iteration
# all perturbations_per_eval should reach progress of 100%
assert (
"Feature Ablation attribution: 100%" in output
), f"Error progress output: {repr(output)}"
mock_stderr.seek(0)
mock_stderr.truncate(0)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_simple_ablation_with_mask_and_show_progress(self, mock_stderr) -> None:
ablation_algo = FeatureAblation(BasicModel_MultiLayer())
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
# test progress output for each batch size
for bsz in (1, 2, 3):
self._ablation_test_assert(
ablation_algo,
inp,
[[280.0, 280.0, 120.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(bsz,),
show_progress=True,
)
output = mock_stderr.getvalue()
# to test if progress calculation aligns with the actual iteration
# all perturbations_per_eval should reach progress of 100%
assert (
"Feature Ablation attribution: 100%" in output
), f"Error progress output: {repr(output)}"
mock_stderr.seek(0)
mock_stderr.truncate(0)
def _single_input_one_sample_batch_scalar_ablation_assert(
self, ablation_algo: Attribution, dtype: torch.dtype = torch.float32
) -> None:
inp = torch.tensor([[2.0, 10.0, 3.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1]])
self._ablation_test_assert(
ablation_algo,
inp,
torch.tensor([[82.0, 82.0, 24.0]], dtype=torch.float32).to(dtype),
feature_mask=mask,
perturbations_per_eval=(1,),
target=None,
)
def _single_input_multi_sample_batch_scalar_ablation_assert(
self,
ablation_algo: Attribution,
dtype: torch.dtype = torch.float32,
) -> None:
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1]])
self._ablation_test_assert(
ablation_algo,
inp,
torch.tensor([[642.0, 642.0, 264.0]], dtype=torch.float32).to(dtype),
feature_mask=mask,
perturbations_per_eval=(1,),
target=None,
)
def _multi_input_batch_scalar_ablation_assert(
self,
ablation_algo: Attribution,
dtype: torch.dtype = torch.float32,
) -> None:
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
mask1 = torch.tensor([[1, 1, 1]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2]])
expected = (
torch.tensor([[1784, 1784, 1784]], dtype=dtype),
torch.tensor([[160, 1200, 240]], dtype=dtype),
torch.tensor([[16, 880, 104]], dtype=dtype),
)
self._ablation_test_assert(
ablation_algo,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
perturbations_per_eval=(1,),
target=None,
)
def _ablation_test_assert(
self,
ablation_algo: Attribution,
test_input: TensorOrTupleOfTensorsGeneric,
expected_ablation: Union[
Tensor,
Tuple[Tensor, ...],
# NOTE: mypy doesn't support recursive types
# would do a List[NestedList[Union[int, float]]
# or Tuple[NestedList[Union[int, float]]
# but... we can't.
#
# See https://github.com/python/mypy/issues/731
List[Any],
Tuple[List[Any], ...],
],
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
additional_input: Any = None,
perturbations_per_eval: Tuple[int, ...] = (1,),
baselines: BaselineType = None,
target: TargetType = 0,
**kwargs: Any,
) -> None:
for batch_size in perturbations_per_eval:
self.assertTrue(ablation_algo.multiplies_by_inputs)
attributions = ablation_algo.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
**kwargs,
)
if isinstance(expected_ablation, tuple):
for i in range(len(expected_ablation)):
expected = expected_ablation[i]
if not isinstance(expected, torch.Tensor):
expected = torch.tensor(expected)
self.assertEqual(attributions[i].shape, expected.shape)
self.assertEqual(attributions[i].dtype, expected.dtype)
assertTensorAlmostEqual(self, attributions[i], expected)
else:
if not isinstance(expected_ablation, torch.Tensor):
expected_ablation = torch.tensor(expected_ablation)
self.assertEqual(attributions.shape, expected_ablation.shape)
self.assertEqual(attributions.dtype, expected_ablation.dtype)
assertTensorAlmostEqual(self, attributions, expected_ablation)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env fbpython
import math
from typing import cast
from unittest.mock import Mock, patch
import torch
from captum.attr._core.dataloader_attr import DataLoaderAttribution, InputRole
from captum.attr._core.feature_ablation import FeatureAblation
from parameterized import parameterized
from tests.helpers.basic import (
assertAttributionComparision,
assertTensorAlmostEqual,
BaseTest,
)
from torch import Tensor
from torch.utils.data import DataLoader, TensorDataset
def sum_forward(*inps):
inps = [torch.flatten(inp, start_dim=1) for inp in inps]
return torch.cat(inps, dim=1).sum(1)
class Linear(torch.nn.Module):
def __init__(self, n):
super().__init__()
self.linear = torch.nn.Linear(n, 1)
def forward(self, *inps):
inps = [torch.flatten(inp, start_dim=1) for inp in inps]
return self.linear(torch.cat(inps, dim=1))
mock_dataset = TensorDataset(
# iD feature
torch.tensor(
[
[0.0, 0.1],
[0.3, 0.4],
[0.6, 0.7],
[0.9, 1.0],
[1.2, 1.3],
]
),
# 2D feature
torch.tensor(
[
[[0.1, 0.2], [0.3, 0.2]],
[[0.4, 0.5], [0.3, 0.2]],
[[0.8, 0.1], [0.2, 0.5]],
[[1.1, 0.7], [0.1, 0.7]],
[[0.6, 1.4], [1.2, 0.4]],
]
),
# scalar feature or label
torch.tensor(
[
[0],
[1],
[0],
[0],
[1],
]
),
)
class Test(BaseTest):
@parameterized.expand(
[
(sum_forward,),
(Linear(7),),
]
)
def test_dl_attr(self, forward) -> None:
fa = FeatureAblation(forward)
dl_fa = DataLoaderAttribution(fa)
dataloader = DataLoader(mock_dataset, batch_size=2)
dl_attributions = dl_fa.attribute(dataloader)
# default reduce of DataLoaderAttribution works the same as concat all batches
attr_list = []
for batch in dataloader:
batch_attr = fa.attribute(tuple(batch))
attr_list.append(batch_attr)
expected_attr = tuple(
torch.cat(feature_attrs, dim=0) for feature_attrs in zip(*attr_list)
)
assertAttributionComparision(self, dl_attributions, expected_attr)
@parameterized.expand(
[
(sum_forward,),
(Linear(7),),
]
)
def test_dl_attr_with_mask(self, forward) -> None:
# FeatureAblation does not support grouping across tensors for now
# add such test cases after support grouping across tensors in FeatureAblation
masks = (
torch.tensor([[0, 0]]),
torch.tensor([[[1, 2], [3, 2]]]),
torch.tensor([[4]]),
)
fa = FeatureAblation(forward)
dl_fa = DataLoaderAttribution(fa)
dataloader = DataLoader(mock_dataset, batch_size=2)
dl_attributions = dl_fa.attribute(dataloader, feature_mask=masks)
# default reduce of DataLoaderAttribution works the same as concat all batches
attr_list = []
for batch in dataloader:
batch_attr = fa.attribute(tuple(batch), feature_mask=masks)
attr_list.append(batch_attr)
expected_attr = tuple(
torch.cat(feature_attrs, dim=0) for feature_attrs in zip(*attr_list)
)
assertAttributionComparision(self, dl_attributions, expected_attr)
@parameterized.expand(
[
(sum_forward,),
(Linear(7),),
]
)
def test_dl_attr_with_baseline(self, forward) -> None:
baselines = (
torch.tensor([[0, -1]]),
1,
0.1,
)
fa = FeatureAblation(forward)
dl_fa = DataLoaderAttribution(fa)
dataloader = DataLoader(mock_dataset, batch_size=2)
dl_attributions = dl_fa.attribute(dataloader, baselines=baselines)
# default reduce of DataLoaderAttribution works the same as concat all batches
attr_list = []
for batch in dataloader:
batch_attr = fa.attribute(tuple(batch), baselines=baselines)
attr_list.append(batch_attr)
expected_attr = tuple(
torch.cat(feature_attrs, dim=0) for feature_attrs in zip(*attr_list)
)
assertAttributionComparision(self, dl_attributions, expected_attr)
def test_dl_attr_with_reduce_and_to_metric(self) -> None:
forward = sum_forward
func_call_counts = {
"reduce": 0,
"to_metric": 0,
}
def reduce(accum, cur_output, cur_inputs):
func_call_counts["reduce"] += 1
accum = {"sum": 0, "count": 0} if accum is None else accum
accum["sum"] += cur_output.sum()
accum["count"] += len(cur_output)
return accum
def to_metric(accum):
func_call_counts["to_metric"] += 1
self.assertEqual(isinstance(accum, dict), True)
return torch.tensor(
[
accum["sum"] / accum["count"],
accum["sum"],
]
)
fa = FeatureAblation(forward)
dl_fa = DataLoaderAttribution(fa)
batch_size = 2
dataloader = DataLoader(mock_dataset, batch_size=batch_size)
dl_attribution = dl_fa.attribute(
dataloader,
reduce=reduce,
to_metric=to_metric,
return_input_shape=False,
)
n_iters = len(dataloader)
n_features = 7
# after support other attr methods, this can be diff from n_features
n_perturbations = 7
n_passes = n_perturbations + 1 # +1 for base forward without perturbation
n_outputs = 2 # [mean, sum]
self.assertEqual(func_call_counts["reduce"], n_iters * n_passes)
self.assertEqual(func_call_counts["to_metric"], n_passes)
expected_attr_shape = (n_outputs, n_features)
self.assertEqual(type(dl_attribution), Tensor)
dl_attribution = cast(Tensor, dl_attribution)
self.assertEqual(dl_attribution.shape, expected_attr_shape)
@parameterized.expand(
[
([0, 0, 0],),
([0, 1, 0],),
([0, 1, 1],),
([0, 1, 2],),
([0, 2, 2],),
]
)
def test_dl_attr_with_input_roles(self, input_roles) -> None:
n_inputs = len(input_roles)
n_forward_inputs = sum(1 for r in input_roles if r != InputRole.no_forward)
n_attr_inputs = sum(1 for r in input_roles if r == InputRole.need_attr)
def reduce(accum, cur_output, cur_inputs):
# all inputs from dataloader should be given to reduce
self.assertEqual(len(cur_inputs), n_inputs)
return cur_output if accum is None else torch.cat([accum, cur_output])
def forward(*forward_inputs):
# inputs of InputRole.no_forward should not be passed to forward
self.assertEqual(len(forward_inputs), n_forward_inputs)
return sum_forward(*forward_inputs)
fa = FeatureAblation(forward)
dl_fa = DataLoaderAttribution(fa)
batch_size = 2
dataloader = DataLoader(mock_dataset, batch_size=batch_size)
dl_attributions = dl_fa.attribute(
dataloader,
input_roles=input_roles,
reduce=reduce,
)
# only inputs needs
self.assertEqual(len(dl_attributions), n_attr_inputs)
# default reduce of DataLoaderAttribution works the same as concat all batches
attr_list = []
for batch in dataloader:
attr_inputs = tuple(
_ for _, role in zip(batch, input_roles) if role == InputRole.need_attr
)
additional_forward_args = tuple(
_
for _, role in zip(batch, input_roles)
if role == InputRole.need_forward
)
batch_attr = fa.attribute(
attr_inputs, additional_forward_args=additional_forward_args
)
attr_list.append(batch_attr)
expected_attr = tuple(
torch.cat(feature_attrs, dim=0) for feature_attrs in zip(*attr_list)
)
assertAttributionComparision(self, dl_attributions, expected_attr)
def test_dl_attr_not_return_input_shape(self) -> None:
forward = sum_forward
fa = FeatureAblation(forward)
dl_fa = DataLoaderAttribution(fa)
dataloader = DataLoader(mock_dataset, batch_size=2)
dl_attribution = dl_fa.attribute(dataloader, return_input_shape=False)
expected_attr_shape = (len(mock_dataset), 7)
self.assertEqual(type(dl_attribution), Tensor)
dl_attribution = cast(Tensor, dl_attribution)
self.assertEqual(dl_attribution.shape, expected_attr_shape)
# default reduce of DataLoaderAttribution works the same as concat all batches
attr_list = []
for batch in dataloader:
batch_attr = fa.attribute(tuple(batch))
attr_list.append(batch_attr)
expected_attr = torch.cat(
[
# flatten feature dim
torch.cat(feature_attrs, dim=0).flatten(start_dim=1)
for feature_attrs in zip(*attr_list)
],
dim=1,
)
assertTensorAlmostEqual(self, dl_attribution, expected_attr)
def test_dl_attr_with_mask_not_return_input_shape(self) -> None:
forward = sum_forward
masks = (
torch.tensor([[0, 0]]),
torch.tensor([[[1, 2], [3, 2]]]),
torch.tensor([[4]]),
)
fa = FeatureAblation(forward)
dl_fa = DataLoaderAttribution(fa)
dataloader = DataLoader(mock_dataset, batch_size=2)
dl_attribution = dl_fa.attribute(
dataloader, feature_mask=masks, return_input_shape=False
)
expected_attr_shape = (len(mock_dataset), 5)
self.assertEqual(type(dl_attribution), Tensor)
dl_attribution = cast(Tensor, dl_attribution)
self.assertEqual(dl_attribution.shape, expected_attr_shape)
@parameterized.expand([(2,), (3,), (4,)])
def test_dl_attr_with_perturb_per_pass(self, perturb_per_pass) -> None:
forward = sum_forward
fa = FeatureAblation(forward)
dl_fa = DataLoaderAttribution(fa)
mock_dl_iter = Mock(wraps=DataLoader.__iter__)
with patch.object(DataLoader, "__iter__", lambda self: mock_dl_iter(self)):
dataloader = DataLoader(mock_dataset, batch_size=2)
dl_attributions = dl_fa.attribute(
dataloader, perturbations_per_pass=perturb_per_pass
)
n_features = 7
# 2 extra iter calls: get one input for format; get unperturbed output
n_iter_overhead = 2
self.assertEqual(
mock_dl_iter.call_count,
math.ceil(n_features / perturb_per_pass) + n_iter_overhead,
)
# default reduce of DataLoaderAttribution works the same as concat all batches
attr_list = []
for batch in dataloader:
batch_attr = fa.attribute(tuple(batch))
attr_list.append(batch_attr)
expected_attr = tuple(
torch.cat(feature_attrs, dim=0) for feature_attrs in zip(*attr_list)
)
assertAttributionComparision(self, dl_attributions, expected_attr)
|
#!/usr/bin/env python3
import unittest
from typing import List
import torch
from captum.attr._utils.approximation_methods import Riemann, riemann_builders
from tests.helpers.basic import assertTensorAlmostEqual
class Test(unittest.TestCase):
def __init__(self, methodName: str = "runTest") -> None:
super().__init__(methodName)
def test_riemann_0(self) -> None:
with self.assertRaises(AssertionError):
step_sizes, alphas = riemann_builders()
step_sizes(0)
alphas(0)
def test_riemann_2(self) -> None:
expected_step_sizes_lrm = [0.5, 0.5]
expected_step_sizes_trapezoid = [0.25, 0.25]
expected_left = [0.0, 0.5]
expected_right = [0.5, 1.0]
expected_middle = [0.25, 0.75]
expected_trapezoid = [0.0, 1.0]
self._assert_steps_and_alphas(
2,
expected_step_sizes_lrm,
expected_step_sizes_trapezoid,
expected_left,
expected_right,
expected_middle,
expected_trapezoid,
)
def test_riemann_3(self) -> None:
expected_step_sizes = [1 / 3] * 3
expected_step_sizes_trapezoid = [1 / 6, 1 / 3, 1 / 6]
expected_left = [0.0, 1 / 3, 2 / 3]
expected_right = [1 / 3, 2 / 3, 1.0]
expected_middle = [1 / 6, 0.5, 1 - 1 / 6]
expected_trapezoid = [0.0, 0.5, 1.0]
self._assert_steps_and_alphas(
3,
expected_step_sizes,
expected_step_sizes_trapezoid,
expected_left,
expected_right,
expected_middle,
expected_trapezoid,
)
def test_riemann_4(self) -> None:
expected_step_sizes = [1 / 4] * 4
expected_step_sizes_trapezoid = [1 / 8, 1 / 4, 1 / 4, 1 / 8]
expected_left = [0.0, 0.25, 0.5, 0.75]
expected_right = [0.25, 0.5, 0.75, 1.0]
expected_middle = [0.125, 0.375, 0.625, 0.875]
expected_trapezoid = [0.0, 1 / 3, 2 / 3, 1.0]
self._assert_steps_and_alphas(
4,
expected_step_sizes,
expected_step_sizes_trapezoid,
expected_left,
expected_right,
expected_middle,
expected_trapezoid,
)
def _assert_steps_and_alphas(
self,
n: int,
expected_step_sizes: List[float],
expected_step_sizes_trapezoid: List[float],
expected_left: List[float],
expected_right: List[float],
expected_middle: List[float],
expected_trapezoid: List[float],
) -> None:
step_sizes_left, alphas_left = riemann_builders(Riemann.left)
step_sizes_right, alphas_right = riemann_builders(Riemann.right)
step_sizes_middle, alphas_middle = riemann_builders(Riemann.middle)
step_sizes_trapezoid, alphas_trapezoid = riemann_builders(Riemann.trapezoid)
assertTensorAlmostEqual(
self,
torch.tensor(expected_step_sizes),
step_sizes_left(n),
delta=0.05,
mode="max",
)
assertTensorAlmostEqual(
self,
torch.tensor(expected_step_sizes),
step_sizes_right(n),
delta=0.05,
mode="max",
)
assertTensorAlmostEqual(
self,
torch.tensor(expected_step_sizes),
step_sizes_middle(n),
delta=0.05,
mode="max",
)
assertTensorAlmostEqual(
self,
torch.tensor(expected_step_sizes_trapezoid),
step_sizes_trapezoid(n),
delta=0.05,
mode="max",
)
assertTensorAlmostEqual(
self, torch.tensor(expected_left), alphas_left(n), delta=0.05, mode="max"
)
assertTensorAlmostEqual(
self, torch.tensor(expected_right), alphas_right(n), delta=0.05, mode="max"
)
assertTensorAlmostEqual(
self,
torch.tensor(expected_middle),
alphas_middle(n),
delta=0.05,
mode="max",
)
assertTensorAlmostEqual(
self,
torch.tensor(expected_trapezoid),
alphas_trapezoid(n),
delta=0.05,
mode="max",
)
# TODO write a test case for gauss-legendre
|
#!/usr/bin/env python3
import unittest
from typing import Any, Callable, cast, List, Tuple, Union
import torch
from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.layer.layer_conductance import LayerConductance
from captum.attr._core.neuron.neuron_conductance import NeuronConductance
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel_ConvNet,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_simple_conductance_input_linear2(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
self._conductance_input_test_assert(
net, net.linear2, inp, (0,), [0.0, 390.0, 0.0]
)
def test_simple_conductance_input_linear2_wo_mult_by_inputs(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[100.0, 100.0, 100.0]], requires_grad=True)
self._conductance_input_test_assert(
net,
net.linear2,
inp,
(0,),
[3.96, 3.96, 3.96],
multiply_by_inputs=False,
)
def test_simple_conductance_input_linear1(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._conductance_input_test_assert(net, net.linear1, inp, 0, [0.0, 90.0, 0.0])
def test_simple_conductance_input_linear1_selector_fn(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._conductance_input_test_assert(
net, net.linear1, inp, lambda x: x[:, 0], [0.0, 90.0, 0.0]
)
def test_simple_conductance_input_relu(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 70.0, 30.0]], requires_grad=True)
self._conductance_input_test_assert(net, net.relu, inp, (3,), [0.0, 70.0, 30.0])
def test_simple_conductance_multi_input_linear2(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 0.0]])
inp2 = torch.tensor([[0.0, 10.0, 0.0]])
inp3 = torch.tensor([[0.0, 5.0, 0.0]])
self._conductance_input_test_assert(
net,
net.model.linear2,
(inp1, inp2, inp3),
(0,),
([[0.0, 156.0, 0.0]], [[0.0, 156.0, 0.0]], [[0.0, 78.0, 0.0]]),
(4,),
)
def test_simple_conductance_multi_input_relu(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 1.0]])
inp2 = torch.tensor([[0.0, 4.0, 5.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0]])
self._conductance_input_test_assert(
net,
net.model.relu,
(inp1, inp2),
(3,),
([[0.0, 50.0, 5.0]], [[0.0, 20.0, 25.0]]),
(inp3, 5),
)
def test_simple_conductance_multi_input_batch_relu(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 1.0], [0.0, 0.0, 10.0]])
inp2 = torch.tensor([[0.0, 4.0, 5.0], [0.0, 0.0, 10.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 5.0]])
self._conductance_input_test_assert(
net,
net.model.relu,
(inp1, inp2),
(3,),
(
[[0.0, 50.0, 5.0], [0.0, 0.0, 50.0]],
[[0.0, 20.0, 25.0], [0.0, 0.0, 50.0]],
),
(inp3, 5),
)
def test_layer_tuple_selector_fn(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 6.0, 0.0]])
self._conductance_input_test_assert(
net, net.multi_relu, inp, lambda x: x[0][:, 1], [0.0, 6.0, 0.0]
)
def test_matching_conv2_multi_input_conductance(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(2, 1, 10, 10)
self._conductance_input_sum_test_assert(net, net.conv2, inp, 0.0)
# trying different baseline
self._conductance_input_sum_test_assert(net, net.conv2, inp, 0.000001)
def test_matching_relu2_multi_input_conductance(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(3, 1, 10, 10, requires_grad=True)
baseline = 20 * torch.randn(3, 1, 10, 10, requires_grad=True)
self._conductance_input_sum_test_assert(net, net.relu2, inp, baseline)
def test_matching_relu2_with_scalar_base_multi_input_conductance(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(3, 1, 10, 10, requires_grad=True)
self._conductance_input_sum_test_assert(net, net.relu2, inp, 0.0)
def test_matching_pool2_multi_input_conductance(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(1, 1, 10, 10)
baseline = 20 * torch.randn(1, 1, 10, 10, requires_grad=True)
self._conductance_input_sum_test_assert(net, net.pool2, inp, baseline)
def test_matching_layer_tuple_selector_fn(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 6.0, 0.0]])
lc = LayerConductance(net, net.multi_relu)
layer_attr = lc.attribute(inp, target=0, n_steps=500, method="gausslegendre")
nc = NeuronConductance(net, net.multi_relu)
for i in range(len(layer_attr)):
for j in range(layer_attr[i].shape[1]):
neuron_attr = nc.attribute(
inp,
lambda x: x[i][:, j],
target=0,
n_steps=500,
method="gausslegendre",
)
self.assertAlmostEqual(
neuron_attr.sum().item(),
layer_attr[i][0][j].item(),
delta=0.005,
)
def _conductance_input_test_assert(
self,
model: Module,
target_layer: Module,
test_input: TensorOrTupleOfTensorsGeneric,
test_neuron: Union[int, Tuple[int, ...], Callable],
expected_input_conductance: Union[List[float], Tuple[List[List[float]], ...]],
additional_input: Any = None,
multiply_by_inputs: bool = True,
) -> None:
for internal_batch_size in (None, 5, 20):
cond = NeuronConductance(
model,
target_layer,
multiply_by_inputs=multiply_by_inputs,
)
self.assertEqual(cond.multiplies_by_inputs, multiply_by_inputs)
attributions = cond.attribute(
test_input,
test_neuron,
target=0,
n_steps=500,
method="gausslegendre",
additional_forward_args=additional_input,
internal_batch_size=internal_batch_size,
)
if isinstance(expected_input_conductance, tuple):
for i in range(len(expected_input_conductance)):
for j in range(len(expected_input_conductance[i])):
assertTensorAlmostEqual(
self,
attributions[i][j : j + 1].squeeze(0),
expected_input_conductance[i][j],
delta=0.1,
mode="max",
)
else:
if isinstance(attributions, Tensor):
assertTensorAlmostEqual(
self,
attributions.squeeze(0),
expected_input_conductance,
delta=0.1,
mode="max",
)
else:
raise AssertionError(
"Attributions not returning a Tensor when expected."
)
def _conductance_input_sum_test_assert(
self,
model: Module,
target_layer: Module,
test_input: TensorOrTupleOfTensorsGeneric,
test_baseline: BaselineType = None,
):
layer_cond = LayerConductance(model, target_layer)
attributions = cast(
Tensor,
layer_cond.attribute(
test_input,
baselines=test_baseline,
target=0,
n_steps=500,
method="gausslegendre",
),
)
neuron_cond = NeuronConductance(model, target_layer)
attr_shape = cast(Tuple[int, ...], attributions.shape)
for i in range(attr_shape[1]):
for j in range(attr_shape[2]):
for k in range(attr_shape[3]):
neuron_vals = neuron_cond.attribute(
test_input,
(i, j, k),
baselines=test_baseline,
target=0,
n_steps=500,
)
for n in range(attributions.shape[0]):
self.assertAlmostEqual(
torch.sum(neuron_vals[n]).item(),
attributions[n, i, j, k].item(),
delta=0.005,
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
from __future__ import print_function
from typing import Tuple, Union
import torch
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._core.neuron.neuron_deep_lift import NeuronDeepLift, NeuronDeepLiftShap
from tests.attr.layer.test_layer_deeplift import (
_create_inps_and_base_for_deeplift_neuron_layer_testing,
_create_inps_and_base_for_deepliftshap_neuron_layer_testing,
)
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel_ConvNet,
BasicModel_ConvNet_MaxPool3d,
LinearMaxPoolLinearModel,
ReLULinearModel,
)
from torch import Tensor
class Test(BaseTest):
def test_relu_neuron_deeplift(self) -> None:
model = ReLULinearModel(inplace=True)
x1 = torch.tensor([[-10.0, 1.0, -5.0]], requires_grad=True)
x2 = torch.tensor([[3.0, 3.0, 1.0]], requires_grad=True)
inputs = (x1, x2)
neuron_dl = NeuronDeepLift(model, model.relu)
attributions = neuron_dl.attribute(inputs, 0, attribute_to_neuron_input=False)
assertTensorAlmostEqual(self, attributions[0], [[0.0, 0.0, 0.0]])
assertTensorAlmostEqual(self, attributions[1], [[0.0, 0.0, 0.0]])
def test_deeplift_compare_with_and_without_inplace(self) -> None:
model1 = ReLULinearModel(inplace=True)
model2 = ReLULinearModel()
x1 = torch.tensor([[-10.0, 1.0, -5.0]], requires_grad=True)
x2 = torch.tensor([[3.0, 3.0, 1.0]], requires_grad=True)
inputs = (x1, x2)
neuron_dl1 = NeuronDeepLift(model1, model1.relu)
attributions1 = neuron_dl1.attribute(inputs, 0, attribute_to_neuron_input=False)
neuron_dl2 = NeuronDeepLift(model2, model2.relu)
attributions2 = neuron_dl2.attribute(inputs, 0, attribute_to_neuron_input=False)
assertTensorAlmostEqual(self, attributions1[0], attributions2[0])
assertTensorAlmostEqual(self, attributions1[1], attributions2[1])
def test_linear_neuron_deeplift(self) -> None:
model = ReLULinearModel()
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
neuron_dl = NeuronDeepLift(model, model.l3)
attributions = neuron_dl.attribute(
inputs, 0, baselines, attribute_to_neuron_input=True
)
assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])
assertTensorAlmostEqual(self, attributions[1], [[0.0, 0.0, 0.0]])
attributions = neuron_dl.attribute(
inputs, 0, baselines, attribute_to_neuron_input=False
)
self.assertTrue(neuron_dl.multiplies_by_inputs)
assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])
assertTensorAlmostEqual(self, attributions[1], [[6.0, 9.0, 0.0]])
def test_linear_neuron_deeplift_wo_inp_marginal_effects(self) -> None:
model = ReLULinearModel()
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
neuron_dl = NeuronDeepLift(model, model.l3, multiply_by_inputs=False)
attributions = neuron_dl.attribute(
inputs, 0, baselines, attribute_to_neuron_input=False
)
assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])
assertTensorAlmostEqual(self, attributions[1], [[2.0, 3.0, 0.0]])
def test_relu_deeplift_with_custom_attr_func(self) -> None:
model = ReLULinearModel()
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
neuron_dl = NeuronDeepLift(model, model.l3)
expected = ([[0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0]])
self._relu_custom_attr_func_assert(neuron_dl, inputs, baselines, expected)
def test_relu_neuron_deeplift_shap(self) -> None:
model = ReLULinearModel()
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
neuron_dl = NeuronDeepLiftShap(model, model.relu)
attributions = neuron_dl.attribute(
inputs, 0, baselines, attribute_to_neuron_input=False
)
assertTensorAlmostEqual(self, attributions[0], [[0.0, 0.0, 0.0]])
assertTensorAlmostEqual(self, attributions[1], [[0.0, 0.0, 0.0]])
def test_linear_neuron_deeplift_shap(self) -> None:
model = ReLULinearModel()
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
neuron_dl = NeuronDeepLiftShap(model, model.l3)
attributions = neuron_dl.attribute(
inputs, 0, baselines, attribute_to_neuron_input=True
)
assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])
assertTensorAlmostEqual(self, attributions[1], [[0.0, 0.0, 0.0]])
attributions = neuron_dl.attribute(
inputs, 0, baselines, attribute_to_neuron_input=False
)
self.assertTrue(neuron_dl.multiplies_by_inputs)
assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])
assertTensorAlmostEqual(self, attributions[1], [[6.0, 9.0, 0.0]])
def test_linear_neuron_deeplift_shap_wo_inp_marginal_effects(self) -> None:
model = ReLULinearModel()
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
neuron_dl = NeuronDeepLiftShap(model, model.l3, multiply_by_inputs=False)
attributions = neuron_dl.attribute(
inputs, 0, baselines, attribute_to_neuron_input=False
)
assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])
assertTensorAlmostEqual(self, attributions[1], [[2.0, 3.0, 0.0]])
attributions = neuron_dl.attribute(
inputs, lambda x: x[:, 0], baselines, attribute_to_neuron_input=False
)
assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])
assertTensorAlmostEqual(self, attributions[1], [[2.0, 3.0, 0.0]])
def test_relu_deepliftshap_with_custom_attr_func(self) -> None:
model = ReLULinearModel()
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
neuron_dl = NeuronDeepLiftShap(model, model.l3)
expected = (torch.zeros(1, 3), torch.zeros(1, 3))
self._relu_custom_attr_func_assert(neuron_dl, inputs, baselines, expected)
def _relu_custom_attr_func_assert(
self,
attr_method: Union[NeuronDeepLift, NeuronDeepLiftShap],
inputs: TensorOrTupleOfTensorsGeneric,
baselines,
expected,
) -> None:
def custom_attr_func(
multipliers: Tuple[Tensor, ...],
inputs: Tuple[Tensor, ...],
baselines: Union[None, Tuple[Union[Tensor, int, float], ...]] = None,
) -> Tuple[Tensor, ...]:
return tuple(multiplier * 0.0 for multiplier in multipliers)
attr = attr_method.attribute(
inputs, 0, baselines, custom_attribution_func=custom_attr_func
)
assertTensorAlmostEqual(self, attr[0], expected[0], 0.0)
assertTensorAlmostEqual(self, attr[1], expected[1], 0.0)
def test_lin_maxpool_lin_classification(self) -> None:
inputs = torch.ones(2, 4)
baselines = torch.tensor([[1, 2, 3, 9], [4, 8, 6, 7]]).float()
model = LinearMaxPoolLinearModel()
ndl = NeuronDeepLift(model, model.pool1)
attr = ndl.attribute(inputs, neuron_selector=(0), baselines=baselines)
ndl2 = NeuronDeepLift(model, model.lin2)
attr2 = ndl2.attribute(
inputs,
neuron_selector=(0),
baselines=baselines,
attribute_to_neuron_input=True,
)
assertTensorAlmostEqual(self, attr, attr2)
def test_convnet_maxpool2d_classification(self) -> None:
inputs = 100 * torch.randn(2, 1, 10, 10)
model = BasicModel_ConvNet()
ndl = NeuronDeepLift(model, model.pool1)
attr = ndl.attribute(inputs, neuron_selector=(0, 0, 0))
ndl2 = NeuronDeepLift(model, model.conv2)
attr2 = ndl2.attribute(
inputs, neuron_selector=(0, 0, 0), attribute_to_neuron_input=True
)
assertTensorAlmostEqual(self, attr.sum(), attr2.sum())
def test_convnet_maxpool3d_classification(self) -> None:
inputs = 100 * torch.randn(2, 1, 10, 10, 10)
model = BasicModel_ConvNet_MaxPool3d()
ndl = NeuronDeepLift(model, model.pool1)
attr = ndl.attribute(inputs, neuron_selector=(0, 0, 0, 0))
ndl2 = NeuronDeepLift(model, model.conv2)
attr2 = ndl2.attribute(
inputs, neuron_selector=(0, 0, 0, 0), attribute_to_neuron_input=True
)
assertTensorAlmostEqual(self, attr.sum(), attr2.sum())
|
#!/usr/bin/env python3
import unittest
from typing import Any, Callable, Tuple, Union
import torch
from captum._utils.typing import TensorLikeList, TensorOrTupleOfTensorsGeneric
from captum.attr._core.integrated_gradients import IntegratedGradients
from captum.attr._core.neuron.neuron_integrated_gradients import (
NeuronIntegratedGradients,
)
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
)
from tests.helpers.basic_models import (
BasicModel_ConvNet,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_simple_ig_input_linear2(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._ig_input_test_assert(net, net.linear2, inp, 0, [[0.0, 390.0, 0.0]])
def test_simple_ig_input_linear2_wo_mult_by_inputs(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[100.0, 100.0, 100.0]])
self._ig_input_test_assert(
net, net.linear2, inp, 0, [[3.96, 3.96, 3.96]], multiply_by_inputs=False
)
def test_simple_ig_input_linear1(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
self._ig_input_test_assert(net, net.linear1, inp, (0,), [[0.0, 100.0, 0.0]])
def test_simple_ig_input_relu(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 6.0, 14.0]], requires_grad=True)
self._ig_input_test_assert(net, net.relu, inp, (0,), [[0.0, 3.0, 7.0]])
def test_simple_ig_input_relu2(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 5.0, 4.0]])
self._ig_input_test_assert(net, net.relu, inp, 1, [[0.0, 5.0, 4.0]])
def test_simple_ig_input_relu_selector_fn(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 5.0, 4.0]])
self._ig_input_test_assert(
net, net.relu, inp, lambda x: torch.sum(x[:, 2:]), [[0.0, 10.0, 8.0]]
)
def test_simple_ig_input_relu2_agg_neurons(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 5.0, 4.0]])
self._ig_input_test_assert(
net, net.relu, inp, (slice(0, 2, 1),), [[0.0, 5.0, 4.0]]
)
def test_simple_ig_multi_input_linear2(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 0.0]])
inp2 = torch.tensor([[0.0, 10.0, 0.0]])
inp3 = torch.tensor([[0.0, 5.0, 0.0]])
self._ig_input_test_assert(
net,
net.model.linear2,
(inp1, inp2, inp3),
(0,),
([[0.0, 156.0, 0.0]], [[0.0, 156.0, 0.0]], [[0.0, 78.0, 0.0]]),
(4,),
)
def test_simple_ig_multi_input_relu(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 6.0, 14.0]])
inp2 = torch.tensor([[0.0, 6.0, 14.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0]])
self._ig_input_test_assert(
net,
net.model.relu,
(inp1, inp2),
(0,),
([[0.0, 1.5, 3.5]], [[0.0, 1.5, 3.5]]),
(inp3, 0.5),
)
def test_simple_ig_multi_input_relu_batch(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 6.0, 14.0], [0.0, 80.0, 0.0]])
inp2 = torch.tensor([[0.0, 6.0, 14.0], [0.0, 20.0, 0.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0], [0.0, 20.0, 0.0]])
self._ig_input_test_assert(
net,
net.model.relu,
(inp1, inp2),
(0,),
([[0.0, 1.5, 3.5], [0.0, 40.0, 0.0]], [[0.0, 1.5, 3.5], [0.0, 10.0, 0.0]]),
(inp3, 0.5),
)
def test_simple_ig_multi_input_relu_batch_selector_fn(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 6.0, 14.0], [0.0, 80.0, 0.0]])
inp2 = torch.tensor([[0.0, 6.0, 14.0], [0.0, 20.0, 0.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0], [0.0, 20.0, 0.0]])
self._ig_input_test_assert(
net,
net.model.relu,
(inp1, inp2),
lambda x: torch.sum(x),
(
[[0.0, 10.5, 24.5], [0.0, 160.0, 0.0]],
[[0.0, 10.5, 24.5], [0.0, 40.0, 0.0]],
),
(inp3, 0.5),
)
def test_matching_output_gradient(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(2, 1, 10, 10, requires_grad=True)
baseline = 20 * torch.randn(2, 1, 10, 10, requires_grad=True)
self._ig_matching_test_assert(net, net.softmax, inp, baseline)
def _ig_input_test_assert(
self,
model: Module,
target_layer: Module,
test_input: TensorOrTupleOfTensorsGeneric,
test_neuron: Union[int, Tuple[Union[int, slice], ...], Callable],
expected_input_ig: Union[TensorLikeList, Tuple[TensorLikeList, ...]],
additional_input: Any = None,
multiply_by_inputs: bool = True,
) -> None:
for internal_batch_size in [None, 5, 20]:
grad = NeuronIntegratedGradients(
model, target_layer, multiply_by_inputs=multiply_by_inputs
)
self.assertEquals(grad.multiplies_by_inputs, multiply_by_inputs)
attributions = grad.attribute(
test_input,
test_neuron,
n_steps=200,
method="gausslegendre",
additional_forward_args=additional_input,
internal_batch_size=internal_batch_size,
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_input_ig, delta=0.1
)
def _ig_matching_test_assert(
self,
model: Module,
output_layer: Module,
test_input: Tensor,
baseline: Union[None, Tensor] = None,
) -> None:
out = model(test_input)
input_attrib = IntegratedGradients(model)
ig_attrib = NeuronIntegratedGradients(model, output_layer)
for i in range(out.shape[1]):
ig_vals = input_attrib.attribute(test_input, target=i, baselines=baseline)
neuron_ig_vals = ig_attrib.attribute(test_input, (i,), baselines=baseline)
assertTensorAlmostEqual(
self, ig_vals, neuron_ig_vals, delta=0.001, mode="max"
)
self.assertEqual(neuron_ig_vals.shape, test_input.shape)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import unittest
from typing import Any, Callable, cast, List, Tuple, Union
import torch
from captum._utils.gradient import _forward_layer_eval
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._core.neuron.neuron_gradient import NeuronGradient
from captum.attr._core.saliency import Saliency
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
)
from tests.helpers.basic_models import (
BasicModel_ConvNet,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_simple_gradient_input_linear2(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
self._gradient_input_test_assert(net, net.linear2, inp, (0,), [[4.0, 4.0, 4.0]])
def test_simple_gradient_input_linear1(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._gradient_input_test_assert(net, net.linear1, inp, (0,), [[1.0, 1.0, 1.0]])
def test_simple_gradient_input_relu_inplace(self) -> None:
net = BasicModel_MultiLayer(inplace=True)
inp = torch.tensor([[0.0, 5.0, 4.0]])
self._gradient_input_test_assert(
net, net.relu, inp, (0,), [[1.0, 1.0, 1.0]], attribute_to_neuron_input=True
)
def test_simple_gradient_input_linear1_inplace(self) -> None:
net = BasicModel_MultiLayer(inplace=True)
inp = torch.tensor([[0.0, 5.0, 4.0]])
self._gradient_input_test_assert(net, net.linear1, inp, (0,), [[1.0, 1.0, 1.0]])
def test_simple_gradient_input_relu(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 5.0, 4.0]], requires_grad=True)
self._gradient_input_test_assert(net, net.relu, inp, 0, [[0.0, 0.0, 0.0]])
def test_simple_gradient_input_relu2(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 5.0, 4.0]])
self._gradient_input_test_assert(net, net.relu, inp, 1, [[1.0, 1.0, 1.0]])
def test_simple_gradient_input_relu_selector_fn(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 5.0, 4.0]])
self._gradient_input_test_assert(
net, net.relu, inp, lambda x: torch.sum(x), [[3.0, 3.0, 3.0]]
)
def test_simple_gradient_input_relu2_agg_neurons(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 5.0, 4.0]])
self._gradient_input_test_assert(
net, net.relu, inp, (slice(0, 2, 1),), [[1.0, 1.0, 1.0]]
)
def test_simple_gradient_multi_input_linear2(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 100.0, 0.0]])
inp2 = torch.tensor([[0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 0.0]])
self._gradient_input_test_assert(
net,
net.model.linear2,
(inp1, inp2, inp3),
(0,),
([[12.0, 12.0, 12.0]], [[12.0, 12.0, 12.0]], [[12.0, 12.0, 12.0]]),
(3,),
)
def test_simple_gradient_multi_input_linear1(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 100.0, 0.0]])
inp2 = torch.tensor([[0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 0.0]])
self._gradient_input_test_assert(
net,
net.model.linear1,
(inp1, inp2),
(0,),
([[5.0, 5.0, 5.0]], [[5.0, 5.0, 5.0]]),
(inp3, 5),
)
def test_matching_output_gradient(self) -> None:
net = BasicModel_ConvNet()
inp = torch.randn(2, 1, 10, 10, requires_grad=True)
self._gradient_matching_test_assert(net, net.softmax, inp)
def test_matching_intermediate_gradient(self) -> None:
net = BasicModel_ConvNet()
inp = torch.randn(3, 1, 10, 10)
self._gradient_matching_test_assert(net, net.relu2, inp)
def _gradient_input_test_assert(
self,
model: Module,
target_layer: Module,
test_input: TensorOrTupleOfTensorsGeneric,
test_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
expected_input_gradient: Union[
List[List[float]], Tuple[List[List[float]], ...]
],
additional_input: Any = None,
attribute_to_neuron_input: bool = False,
) -> None:
grad = NeuronGradient(model, target_layer)
attributions = grad.attribute(
test_input,
test_neuron_selector,
additional_forward_args=additional_input,
attribute_to_neuron_input=attribute_to_neuron_input,
)
assertTensorTuplesAlmostEqual(self, attributions, expected_input_gradient)
def _gradient_matching_test_assert(
self, model: Module, output_layer: Module, test_input: Tensor
) -> None:
out = _forward_layer_eval(model, test_input, output_layer)
# Select first element of tuple
out = out[0]
gradient_attrib = NeuronGradient(model, output_layer)
self.assertFalse(gradient_attrib.multiplies_by_inputs)
for i in range(cast(Tuple[int, ...], out.shape)[1]):
neuron: Tuple[int, ...] = (i,)
while len(neuron) < len(out.shape) - 1:
neuron = neuron + (0,)
input_attrib = Saliency(
lambda x: _forward_layer_eval(
model, x, output_layer, grad_enabled=True
)[0][(slice(None), *neuron)]
)
sal_vals = input_attrib.attribute(test_input, abs=False)
grad_vals = gradient_attrib.attribute(test_input, neuron)
# Verify matching sizes
self.assertEqual(grad_vals.shape, sal_vals.shape)
self.assertEqual(grad_vals.shape, test_input.shape)
assertTensorAlmostEqual(self, sal_vals, grad_vals, delta=0.001, mode="max")
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import unittest
from typing import Any, Callable, Tuple, Union
import torch
from captum._utils.typing import (
BaselineType,
TensorLikeList,
TensorOrTupleOfTensorsGeneric,
)
from captum.attr._core.neuron.neuron_feature_ablation import NeuronFeatureAblation
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel_ConvNet_One_Conv,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_simple_ablation_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._ablation_test_assert(
net,
net.linear2,
inp,
[[280.0, 280.0, 120.0]],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
)
def test_multi_sample_ablation_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1], [1, 1, 0]])
self._ablation_test_assert(
net,
net.linear2,
inp,
[[41.0, 41.0, 12.0], [280.0, 280.0, 120.0]],
feature_mask=mask,
perturbations_per_eval=(1, 2, 3),
)
def test_multi_sample_ablation_with_selector_fn(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1], [1, 1, 0]])
self._ablation_test_assert(
net,
net.linear2,
inp,
[[82.0, 82.0, 24.0], [560.0, 560.0, 240.0]],
feature_mask=mask,
perturbations_per_eval=(1, 2, 3),
neuron_selector=lambda x: torch.sum(x, dim=1),
)
def test_multi_sample_ablation_with_slice(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1], [1, 1, 0]])
self._ablation_test_assert(
net,
net.linear2,
inp,
[[82.0, 82.0, 24.0], [560.0, 560.0, 240.0]],
feature_mask=mask,
perturbations_per_eval=(1, 2, 3),
neuron_selector=(slice(0, 2, 1),),
)
def test_multi_input_ablation_with_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
mask1 = torch.tensor([[1, 1, 1], [0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2], [0, 0, 0]])
expected = (
[[492.0, 492.0, 492.0], [200.0, 200.0, 200.0]],
[[80.0, 200.0, 120.0], [0.0, 400.0, 0.0]],
[[0.0, 400.0, 40.0], [60.0, 60.0, 60.0]],
)
self._ablation_test_assert(
net,
net.model.linear2,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
)
self._ablation_test_assert(
net,
net.model.linear2,
(inp1, inp2),
expected[0:1],
additional_input=(inp3, 1),
feature_mask=(mask1, mask2),
perturbations_per_eval=(1, 2, 3),
)
expected_with_baseline = (
[[468.0, 468.0, 468.0], [184.0, 192.0, 184.0]],
[[68.0, 188.0, 108.0], [-12.0, 388.0, -12.0]],
[[-16.0, 384.0, 24.0], [12.0, 12.0, 12.0]],
)
self._ablation_test_assert(
net,
net.model.linear2,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
)
def test_multi_input_ablation(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
baseline1 = torch.tensor([[3.0, 0.0, 0.0]])
baseline2 = torch.tensor([[0.0, 1.0, 0.0]])
baseline3 = torch.tensor([[1.0, 2.0, 3.0]])
self._ablation_test_assert(
net,
net.model.linear2,
(inp1, inp2, inp3),
(
[[80.0, 400.0, 0.0], [68.0, 200.0, 120.0]],
[[80.0, 196.0, 120.0], [0.0, 396.0, 0.0]],
[[-4.0, 392.0, 28.0], [4.0, 32.0, 0.0]],
),
additional_input=(1,),
baselines=(baseline1, baseline2, baseline3),
perturbations_per_eval=(1, 2, 3),
)
baseline1_exp = torch.tensor([[3.0, 0.0, 0.0], [3.0, 0.0, 2.0]])
baseline2_exp = torch.tensor([[0.0, 1.0, 0.0], [0.0, 1.0, 4.0]])
baseline3_exp = torch.tensor([[3.0, 2.0, 4.0], [1.0, 2.0, 3.0]])
self._ablation_test_assert(
net,
net.model.linear2,
(inp1, inp2, inp3),
(
[[80.0, 400.0, 0.0], [68.0, 200.0, 112.0]],
[[80.0, 196.0, 120.0], [0.0, 396.0, -16.0]],
[[-12.0, 392.0, 24.0], [4.0, 32.0, 0.0]],
),
additional_input=(1,),
baselines=(baseline1_exp, baseline2_exp, baseline3_exp),
perturbations_per_eval=(1, 2, 3),
)
def test_simple_multi_input_conv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
self._ablation_test_assert(
net,
net.relu2,
(inp, inp2),
(67 * torch.ones_like(inp), 13 * torch.ones_like(inp2)),
feature_mask=(torch.tensor(0), torch.tensor(1)),
perturbations_per_eval=(1, 2, 4, 8, 12, 16),
)
self._ablation_test_assert(
net,
net.relu2,
(inp, inp2),
(
[
[
[
[0.0, 2.0, 4.0, 3.0],
[4.0, 9.0, 10.0, 7.0],
[4.0, 13.0, 14.0, 11.0],
[0.0, 0.0, 0.0, 0.0],
]
]
],
[
[
[
[1.0, 2.0, 2.0, 1.0],
[1.0, 2.0, 2.0, 1.0],
[1.0, 2.0, 2.0, 1.0],
[0.0, 0.0, 0.0, 0.0],
]
]
],
),
perturbations_per_eval=(1, 3, 7, 14),
)
def test_simple_multi_input_conv_intermediate(self) -> None:
net = BasicModel_ConvNet_One_Conv(inplace=True)
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
self._ablation_test_assert(
net,
net.relu1,
(inp, inp2),
(torch.zeros_like(inp), torch.zeros_like(inp2)),
feature_mask=(torch.tensor(0), torch.tensor(1)),
perturbations_per_eval=(1, 2, 4, 8, 12, 16),
neuron_selector=(1, 0, 0),
)
self._ablation_test_assert(
net,
net.relu1,
(inp, inp2),
(45 * torch.ones_like(inp), 9 * torch.ones_like(inp2)),
feature_mask=(torch.tensor(0), torch.tensor(1)),
perturbations_per_eval=(1, 2, 4, 8, 12, 16),
neuron_selector=(1, 0, 0),
attribute_to_neuron_input=True,
)
self._ablation_test_assert(
net,
net.relu1,
(inp, inp2),
(
[
[
[
[0.0, 1.0, 2.0, 0.0],
[4.0, 5.0, 6.0, 0.0],
[8.0, 9.0, 10.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
]
],
[
[
[
[1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
]
],
),
perturbations_per_eval=(1, 3, 7, 14),
neuron_selector=(1, 0, 0),
attribute_to_neuron_input=True,
)
def _ablation_test_assert(
self,
model: Module,
layer: Module,
test_input: TensorOrTupleOfTensorsGeneric,
expected_ablation: Union[
TensorLikeList,
Tuple[TensorLikeList, ...],
Tuple[Tensor, ...],
],
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
additional_input: Any = None,
perturbations_per_eval: Tuple[int, ...] = (1,),
baselines: BaselineType = None,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable] = 0,
attribute_to_neuron_input: bool = False,
) -> None:
for batch_size in perturbations_per_eval:
ablation = NeuronFeatureAblation(model, layer)
self.assertTrue(ablation.multiplies_by_inputs)
attributions = ablation.attribute(
test_input,
neuron_selector=neuron_selector,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
attribute_to_neuron_input=attribute_to_neuron_input,
)
if isinstance(expected_ablation, tuple):
for i in range(len(expected_ablation)):
assertTensorAlmostEqual(self, attributions[i], expected_ablation[i])
else:
assertTensorAlmostEqual(self, attributions, expected_ablation)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
from typing import Callable, Tuple, Union
import torch
from captum.attr._core.neuron.neuron_gradient_shap import NeuronGradientShap
from captum.attr._core.neuron.neuron_integrated_gradients import (
NeuronIntegratedGradients,
)
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModel_MultiLayer
from tests.helpers.classification_models import SoftmaxModel
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_basic_multilayer(self) -> None:
model = BasicModel_MultiLayer(inplace=True)
model.eval()
inputs = torch.tensor([[1.0, 20.0, 10.0]])
baselines = torch.zeros(2, 3)
ngs = NeuronGradientShap(model, model.linear1, multiply_by_inputs=False)
attr = ngs.attribute(inputs, 0, baselines=baselines, stdevs=0.0)
self.assertFalse(ngs.multiplies_by_inputs)
assertTensorAlmostEqual(self, attr, [[1.0, 1.0, 1.0]])
def test_basic_multilayer_wo_mult_by_inputs(self) -> None:
model = BasicModel_MultiLayer(inplace=True)
model.eval()
inputs = torch.tensor([[1.0, 20.0, 10.0]])
baselines = torch.randn(2, 3)
self._assert_attributions(model, model.linear1, inputs, baselines, 0, 60)
def test_basic_multilayer_wo_mult_by_inputs_agg_neurons(self) -> None:
model = BasicModel_MultiLayer(inplace=True)
model.eval()
inputs = torch.tensor([[1.0, 20.0, 10.0]])
baselines = torch.randn(2, 3)
self._assert_attributions(
model, model.linear1, inputs, baselines, (slice(0, 1, 1),), 60
)
self._assert_attributions(
model, model.linear1, inputs, baselines, lambda x: x[:, 0:1], 60
)
def test_classification(self) -> None:
def custom_baseline_fn(inputs: Tensor) -> Tensor:
num_in = inputs.shape[1] # type: ignore
return torch.arange(0.0, num_in * 5.0).reshape(5, num_in)
num_in = 40
n_samples = 100
# 10-class classification model
model = SoftmaxModel(num_in, 20, 10)
model.eval()
inputs = torch.arange(0.0, num_in * 2.0).reshape(2, num_in)
baselines = custom_baseline_fn
self._assert_attributions(model, model.relu1, inputs, baselines, 1, n_samples)
def _assert_attributions(
self,
model: Module,
layer: Module,
inputs: Tensor,
baselines: Union[Tensor, Callable[..., Tensor]],
neuron_ind: Union[int, Tuple[Union[int, slice], ...], Callable],
n_samples: int = 5,
) -> None:
ngs = NeuronGradientShap(model, layer)
nig = NeuronIntegratedGradients(model, layer)
attrs_gs = ngs.attribute(
inputs, neuron_ind, baselines=baselines, n_samples=n_samples, stdevs=0.09
)
if callable(baselines):
baselines = baselines(inputs)
attrs_ig = []
for baseline in torch.unbind(baselines):
attrs_ig.append(
nig.attribute(inputs, neuron_ind, baselines=baseline.unsqueeze(0))
)
combined_attrs_ig = torch.stack(attrs_ig, dim=0).mean(dim=0)
self.assertTrue(ngs.multiplies_by_inputs)
assertTensorAlmostEqual(self, attrs_gs, combined_attrs_ig, 0.5)
|
#!/usr/bin/env python3
from __future__ import print_function
import os
import tempfile
import unittest
from typing import Dict, List
import torch
HAS_PYTEXT = True
try:
from captum.attr._models.pytext import (
BaselineGenerator,
configure_model_integ_grads_embeddings,
)
from pytext.common.constants import DatasetFieldName
from pytext.config.component import create_featurizer, create_model
from pytext.config.doc_classification import ModelInputConfig, TargetConfig
from pytext.config.field_config import FeatureConfig, WordFeatConfig
from pytext.data import CommonMetadata
from pytext.data.doc_classification_data_handler import DocClassificationDataHandler
from pytext.data.featurizer import SimpleFeaturizer
from pytext.fields import FieldMeta
from pytext.models.decoders.mlp_decoder import MLPDecoder
from pytext.models.doc_model import DocModel_Deprecated
from pytext.models.embeddings.word_embedding import WordEmbedding
from pytext.models.representations.bilstm_doc_attention import BiLSTMDocAttention
except ImportError:
HAS_PYTEXT = False
class VocabStub:
def __init__(self) -> None:
self.itos: List = []
self.stoi: Dict = {}
# TODO add more test cases for dict features
class TestWordEmbeddings(unittest.TestCase):
def setUp(self):
if not HAS_PYTEXT:
return self.skipTest("Skip the test since PyText is not installed")
self.embedding_file, self.embedding_path = tempfile.mkstemp()
self.word_embedding_file, self.word_embedding_path = tempfile.mkstemp()
self.decoder_file, self.decoder_path = tempfile.mkstemp()
self.representation_file, self.representation_path = tempfile.mkstemp()
self.model = self._create_dummy_model()
self.data_handler = self._create_dummy_data_handler()
def tearDown(self) -> None:
for f in (
self.embedding_file,
self.word_embedding_file,
self.decoder_file,
self.representation_file,
):
os.close(f)
for p in (
self.embedding_path,
self.word_embedding_path,
self.decoder_path,
self.representation_path,
):
os.remove(p)
def test_word_embeddings(self) -> None:
embedding_list = configure_model_integ_grads_embeddings(self.model)
integrated_gradients_embedding = embedding_list[0]
input = torch.arange(0, 300).unsqueeze(0).unsqueeze(0)
self.assertEqual(integrated_gradients_embedding.embedding_dim, 300)
self.assertEqual(embedding_list.embedding_dim[0], 300)
self.assertEqual(embedding_list(input).shape[2], input.shape[2])
self.assertTrue(
torch.allclose(
integrated_gradients_embedding.get_attribution_map(input)["word"], input
)
)
def test_baseline_generation(self) -> None:
baseline_generator = BaselineGenerator(self.model, self.data_handler, "cpu")
embedding_list = configure_model_integ_grads_embeddings(self.model)
integrated_gradients_embedding = embedding_list[0]
self.assertTrue(
torch.allclose(
baseline_generator.generate_baseline(integrated_gradients_embedding, 5)[
0
],
torch.tensor([[1, 1, 1, 1, 1]]),
)
)
def _create_dummy_data_handler(self):
feat = WordFeatConfig(
vocab_size=4,
vocab_from_all_data=True,
vocab_from_train_data=True,
vocab_from_pretrained_embeddings=False,
pretrained_embeddings_path=None,
)
featurizer = create_featurizer(
SimpleFeaturizer.Config(), FeatureConfig(word_feat=feat)
)
data_handler = DocClassificationDataHandler.from_config(
DocClassificationDataHandler.Config(),
ModelInputConfig(word_feat=feat),
TargetConfig(),
featurizer=featurizer,
)
train_data = data_handler.gen_dataset(
[{"text": "<pad>"}], include_label_fields=False
)
eval_data = data_handler.gen_dataset(
[{"text": "<pad>"}], include_label_fields=False
)
test_data = data_handler.gen_dataset(
[{"text": "<pad>"}], include_label_fields=False
)
data_handler.init_feature_metadata(train_data, eval_data, test_data)
return data_handler
def _create_dummy_model(self):
return create_model(
DocModel_Deprecated.Config(
representation=BiLSTMDocAttention.Config(
save_path=self.representation_path
),
decoder=MLPDecoder.Config(save_path=self.decoder_path),
),
FeatureConfig(
word_feat=WordEmbedding.Config(
embed_dim=300, save_path=self.word_embedding_path
),
save_path=self.embedding_path,
),
self._create_dummy_meta_data(),
)
def _create_dummy_meta_data(self):
text_field_meta = FieldMeta()
text_field_meta.vocab = VocabStub()
text_field_meta.vocab_size = 4
text_field_meta.unk_token_idx = 1
text_field_meta.pad_token_idx = 0
text_field_meta.pretrained_embeds_weight = None
label_meta = FieldMeta()
label_meta.vocab = VocabStub()
label_meta.vocab_size = 3
metadata = CommonMetadata()
metadata.features = {DatasetFieldName.TEXT_FIELD: text_field_meta}
metadata.target = label_meta
return metadata
|
#!/usr/bin/env python3
from __future__ import print_function
import unittest
import torch
from captum.attr._models.base import (
configure_interpretable_embedding_layer,
InterpretableEmbeddingBase,
remove_interpretable_embedding_layer,
)
from tests.helpers.basic import assertTensorAlmostEqual
from tests.helpers.basic_models import BasicEmbeddingModel, TextModule
from torch.nn import Embedding
class Test(unittest.TestCase):
def test_interpretable_embedding_base(self) -> None:
input1 = torch.tensor([2, 5, 0, 1])
input2 = torch.tensor([3, 0, 0, 2])
model = BasicEmbeddingModel()
output = model(input1, input2)
interpretable_embedding1 = configure_interpretable_embedding_layer(
model, "embedding1"
)
self.assertEqual(model.embedding1, interpretable_embedding1)
self._assert_embeddings_equal(
input1,
output,
interpretable_embedding1,
model.embedding1.embedding_dim,
model.embedding1.num_embeddings,
)
interpretable_embedding2 = configure_interpretable_embedding_layer(
model, "embedding2.inner_embedding"
)
self.assertEqual(model.embedding2.inner_embedding, interpretable_embedding2)
self._assert_embeddings_equal(
input2,
output,
interpretable_embedding2,
model.embedding2.inner_embedding.embedding_dim,
model.embedding2.inner_embedding.num_embeddings,
)
# configure another embedding when one is already configured
with self.assertRaises(AssertionError):
configure_interpretable_embedding_layer(model, "embedding2.inner_embedding")
with self.assertRaises(AssertionError):
configure_interpretable_embedding_layer(model, "embedding1")
# remove interpretable embedding base
self.assertTrue(
model.embedding2.inner_embedding.__class__ is InterpretableEmbeddingBase
)
remove_interpretable_embedding_layer(model, interpretable_embedding2)
self.assertTrue(model.embedding2.inner_embedding.__class__ is Embedding)
self.assertTrue(model.embedding1.__class__ is InterpretableEmbeddingBase)
remove_interpretable_embedding_layer(model, interpretable_embedding1)
self.assertTrue(model.embedding1.__class__ is Embedding)
def test_custom_module(self) -> None:
input1 = torch.tensor([[3, 2, 0], [1, 2, 4]])
input2 = torch.tensor([[0, 1, 0], [1, 2, 3]])
model = BasicEmbeddingModel()
output = model(input1, input2)
expected = model.embedding2(input=input2)
# in this case we make interpretable the custom embedding layer - TextModule
interpretable_embedding = configure_interpretable_embedding_layer(
model, "embedding2"
)
actual = interpretable_embedding.indices_to_embeddings(input=input2)
output_interpretable_models = model(input1, actual)
assertTensorAlmostEqual(
self, output, output_interpretable_models, delta=0.05, mode="max"
)
assertTensorAlmostEqual(self, expected, actual, delta=0.0, mode="max")
self.assertTrue(model.embedding2.__class__ is InterpretableEmbeddingBase)
remove_interpretable_embedding_layer(model, interpretable_embedding)
self.assertTrue(model.embedding2.__class__ is TextModule)
self._assert_embeddings_equal(input2, output, interpretable_embedding)
def test_nested_multi_embeddings(self) -> None:
input1 = torch.tensor([[3, 2, 0], [1, 2, 4]])
input2 = torch.tensor([[0, 1, 0], [2, 6, 8]])
input3 = torch.tensor([[4, 1, 0], [2, 2, 8]])
model = BasicEmbeddingModel(nested_second_embedding=True)
output = model(input1, input2, input3)
expected = model.embedding2(input=input2, another_input=input3)
# in this case we make interpretable the custom embedding layer - TextModule
interpretable_embedding2 = configure_interpretable_embedding_layer(
model, "embedding2"
)
actual = interpretable_embedding2.indices_to_embeddings(
input=input2, another_input=input3
)
output_interpretable_models = model(input1, actual)
assertTensorAlmostEqual(
self, output, output_interpretable_models, delta=0.05, mode="max"
)
assertTensorAlmostEqual(self, expected, actual, delta=0.0, mode="max")
self.assertTrue(model.embedding2.__class__ is InterpretableEmbeddingBase)
remove_interpretable_embedding_layer(model, interpretable_embedding2)
self.assertTrue(model.embedding2.__class__ is TextModule)
self._assert_embeddings_equal(input2, output, interpretable_embedding2)
def _assert_embeddings_equal(
self,
input,
output,
interpretable_embedding,
embedding_dim=None,
num_embeddings=None,
):
if interpretable_embedding.embedding_dim is not None:
self.assertEqual(embedding_dim, interpretable_embedding.embedding_dim)
self.assertEqual(num_embeddings, interpretable_embedding.num_embeddings)
# dim - [4, 100]
emb_shape = interpretable_embedding.indices_to_embeddings(input).shape
self.assertEqual(emb_shape[0], input.shape[0])
if interpretable_embedding.embedding_dim is not None:
self.assertEqual(emb_shape[1], interpretable_embedding.embedding_dim)
self.assertEqual(input.shape[0], output.shape[0])
|
#!/usr/bin/env python3
from typing import Any, cast, List, Tuple, Union
import torch
from captum.attr._core.integrated_gradients import IntegratedGradients
from captum.attr._core.layer.layer_activation import LayerActivation
from captum.attr._core.layer.layer_conductance import LayerConductance
from captum.attr._core.layer.layer_integrated_gradients import LayerIntegratedGradients
from captum.attr._models.base import (
configure_interpretable_embedding_layer,
remove_interpretable_embedding_layer,
)
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
)
from tests.helpers.basic_models import (
BasicEmbeddingModel,
BasicModel_MultiLayer,
BasicModel_MultiLayer_TrueMultiInput,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_compare_with_emb_patching(self) -> None:
input1 = torch.tensor([[2, 5, 0, 1]])
baseline1 = torch.tensor([[0, 0, 0, 0]])
# these ones will be use as an additional forward args
input2 = torch.tensor([[0, 2, 4, 1]])
input3 = torch.tensor([[2, 3, 0, 1]])
self._assert_compare_with_emb_patching(
input1, baseline1, additional_args=(input2, input3)
)
def test_compare_with_emb_patching_wo_mult_by_inputs(self) -> None:
input1 = torch.tensor([[2, 5, 0, 1]])
baseline1 = torch.tensor([[0, 0, 0, 0]])
# these ones will be use as an additional forward args
input2 = torch.tensor([[0, 2, 4, 1]])
input3 = torch.tensor([[2, 3, 0, 1]])
self._assert_compare_with_emb_patching(
input1,
baseline1,
additional_args=(input2, input3),
multiply_by_inputs=False,
)
def test_compare_with_emb_patching_batch(self) -> None:
input1 = torch.tensor([[2, 5, 0, 1], [3, 1, 1, 0]])
baseline1 = torch.tensor([[0, 0, 0, 0]])
# these ones will be use as an additional forward args
input2 = torch.tensor([[0, 2, 4, 1], [2, 3, 5, 7]])
input3 = torch.tensor([[3, 5, 6, 7], [2, 3, 0, 1]])
self._assert_compare_with_emb_patching(
input1, baseline1, additional_args=(input2, input3)
)
def test_compare_with_layer_conductance_attr_to_outputs(self) -> None:
model = BasicModel_MultiLayer()
input = torch.tensor([[50.0, 50.0, 50.0]], requires_grad=True)
self._assert_compare_with_layer_conductance(model, input)
def test_compare_with_layer_conductance_attr_to_inputs(self) -> None:
# Note that Layer Conductance and Layer Integrated Gradients (IG) aren't
# exactly the same. Layer IG computes partial derivative of the output
# with respect to the layer and sums along the straight line. While Layer
# Conductance also computes the same partial derivatives it doesn't use
# the straight line but a path defined by F(i) - F(i - 1).
# However, in some cases when that path becomes close to a straight line,
# Layer IG and Layer Conductance become numerically very close.
model = BasicModel_MultiLayer()
input = torch.tensor([[50.0, 50.0, 50.0]], requires_grad=True)
self._assert_compare_with_layer_conductance(model, input, True)
def test_multiple_tensors_compare_with_expected(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._assert_compare_with_expected(
net,
net.multi_relu,
inp,
([[90.0, 100.0, 100.0, 100.0]], [[90.0, 100.0, 100.0, 100.0]]),
)
def test_multiple_layers_single_inputs(self) -> None:
input1 = torch.tensor([[2, 5, 0, 1], [3, 1, 1, 0]])
input2 = torch.tensor([[0, 2, 4, 1], [2, 3, 5, 7]])
input3 = torch.tensor([[3, 5, 6, 7], [2, 3, 0, 1]])
inputs = (input1, input2, input3)
baseline = tuple(torch.zeros_like(inp) for inp in inputs)
self._assert_compare_with_emb_patching(
inputs,
baseline,
multiple_emb=True,
additional_args=None,
)
def test_multiple_layers_multiple_inputs_shared_input(self) -> None:
input1 = torch.randn(5, 3)
input2 = torch.randn(5, 3)
input3 = torch.randn(5, 3)
inputs = (input1, input2, input3)
baseline = tuple(torch.zeros_like(inp) for inp in inputs)
net = BasicModel_MultiLayer_TrueMultiInput()
lig = LayerIntegratedGradients(net, layer=[net.m1, net.m234])
ig = IntegratedGradients(net)
# test layer inputs
attribs_inputs = lig.attribute(
inputs, baseline, target=0, attribute_to_layer_input=True
)
attribs_inputs_regular_ig = ig.attribute(inputs, baseline, target=0)
self.assertIsInstance(attribs_inputs, list)
self.assertEqual(len(attribs_inputs), 2)
self.assertIsInstance(attribs_inputs[0], Tensor)
self.assertIsInstance(attribs_inputs[1], tuple)
self.assertEqual(len(attribs_inputs[1]), 3)
assertTensorTuplesAlmostEqual(
self,
# last input for second layer is first input =>
# add the attributions
(attribs_inputs[0] + attribs_inputs[1][-1],) + attribs_inputs[1][0:-1],
attribs_inputs_regular_ig,
delta=1e-5,
)
# test layer outputs
attribs = lig.attribute(inputs, baseline, target=0)
ig = IntegratedGradients(lambda x, y: x + y)
attribs_ig = ig.attribute(
(net.m1(input1), net.m234(input2, input3, input1, 1)),
(net.m1(baseline[0]), net.m234(baseline[1], baseline[2], baseline[1], 1)),
target=0,
)
assertTensorTuplesAlmostEqual(self, attribs, attribs_ig, delta=1e-5)
def test_multiple_layers_multiple_input_outputs(self) -> None:
# test with multiple layers, where one layer accepts multiple inputs
input1 = torch.randn(5, 3)
input2 = torch.randn(5, 3)
input3 = torch.randn(5, 3)
input4 = torch.randn(5, 3)
inputs = (input1, input2, input3, input4)
baseline = tuple(torch.zeros_like(inp) for inp in inputs)
net = BasicModel_MultiLayer_TrueMultiInput()
lig = LayerIntegratedGradients(net, layer=[net.m1, net.m234])
ig = IntegratedGradients(net)
# test layer inputs
attribs_inputs = lig.attribute(
inputs, baseline, target=0, attribute_to_layer_input=True
)
attribs_inputs_regular_ig = ig.attribute(inputs, baseline, target=0)
self.assertIsInstance(attribs_inputs, list)
self.assertEqual(len(attribs_inputs), 2)
self.assertIsInstance(attribs_inputs[0], Tensor)
self.assertIsInstance(attribs_inputs[1], tuple)
self.assertEqual(len(attribs_inputs[1]), 3)
assertTensorTuplesAlmostEqual(
self,
(attribs_inputs[0],) + attribs_inputs[1],
attribs_inputs_regular_ig,
delta=1e-7,
)
# test layer outputs
attribs = lig.attribute(inputs, baseline, target=0)
ig = IntegratedGradients(lambda x, y: x + y)
attribs_ig = ig.attribute(
(net.m1(input1), net.m234(input2, input3, input4, 1)),
(net.m1(baseline[0]), net.m234(baseline[1], baseline[2], baseline[3], 1)),
target=0,
)
assertTensorTuplesAlmostEqual(self, attribs, attribs_ig, delta=1e-7)
def test_multiple_tensors_compare_with_exp_wo_mult_by_inputs(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 100.0, 0.0]])
base = torch.tensor([[0.0, 0.0, 0.0]])
target_layer = net.multi_relu
layer_ig = LayerIntegratedGradients(net, target_layer)
layer_ig_wo_mult_by_inputs = LayerIntegratedGradients(
net, target_layer, multiply_by_inputs=False
)
layer_act = LayerActivation(net, target_layer)
attributions = layer_ig.attribute(inp, target=0)
attributions_wo_mult_by_inputs = layer_ig_wo_mult_by_inputs.attribute(
inp, target=0
)
inp_minus_baseline_activ = tuple(
inp_act - base_act
for inp_act, base_act in zip(
layer_act.attribute(inp), layer_act.attribute(base)
)
)
assertTensorTuplesAlmostEqual(
self,
tuple(
attr_wo_mult * inp_min_base
for attr_wo_mult, inp_min_base in zip(
attributions_wo_mult_by_inputs, inp_minus_baseline_activ
)
),
attributions,
)
def _assert_compare_with_layer_conductance(
self, model: Module, input: Tensor, attribute_to_layer_input: bool = False
):
lc = LayerConductance(model, cast(Module, model.linear2))
# For large number of steps layer conductance and layer integrated gradients
# become very close
attribution, delta = lc.attribute(
input,
target=0,
n_steps=1500,
return_convergence_delta=True,
attribute_to_layer_input=attribute_to_layer_input,
)
lig = LayerIntegratedGradients(model, cast(Module, model.linear2))
attributions2, delta2 = lig.attribute(
input,
target=0,
n_steps=1500,
return_convergence_delta=True,
attribute_to_layer_input=attribute_to_layer_input,
)
assertTensorAlmostEqual(
self, attribution, attributions2, delta=0.01, mode="max"
)
assertTensorAlmostEqual(self, delta, delta2, delta=0.5, mode="max")
def _assert_compare_with_emb_patching(
self,
input: Union[Tensor, Tuple[Tensor, ...]],
baseline: Union[Tensor, Tuple[Tensor, ...]],
additional_args: Union[None, Tuple[Tensor, ...]],
multiply_by_inputs: bool = True,
multiple_emb: bool = False,
):
model = BasicEmbeddingModel(nested_second_embedding=True)
if multiple_emb:
module_list: List[Module] = [model.embedding1, model.embedding2]
lig = LayerIntegratedGradients(
model,
module_list,
multiply_by_inputs=multiply_by_inputs,
)
else:
lig = LayerIntegratedGradients(
model, model.embedding1, multiply_by_inputs=multiply_by_inputs
)
attributions, delta = lig.attribute(
input,
baselines=baseline,
additional_forward_args=additional_args,
return_convergence_delta=True,
)
# now let's interpret with standard integrated gradients and
# the embeddings for monkey patching
e1 = configure_interpretable_embedding_layer(model, "embedding1")
e1_input_emb = e1.indices_to_embeddings(input[0] if multiple_emb else input)
e1_baseline_emb = e1.indices_to_embeddings(
baseline[0] if multiple_emb else baseline
)
input_emb = e1_input_emb
baseline_emb = e1_baseline_emb
e2 = None
if multiple_emb:
e2 = configure_interpretable_embedding_layer(model, "embedding2")
e2_input_emb = e2.indices_to_embeddings(*input[1:])
e2_baseline_emb = e2.indices_to_embeddings(*baseline[1:])
input_emb = (e1_input_emb, e2_input_emb)
baseline_emb = (e1_baseline_emb, e2_baseline_emb)
ig = IntegratedGradients(model, multiply_by_inputs=multiply_by_inputs)
attributions_with_ig, delta_with_ig = ig.attribute(
input_emb,
baselines=baseline_emb,
additional_forward_args=additional_args,
target=0,
return_convergence_delta=True,
)
remove_interpretable_embedding_layer(model, e1)
if e2 is not None:
remove_interpretable_embedding_layer(model, e2)
self.assertEqual(
isinstance(attributions_with_ig, tuple), isinstance(attributions, list)
)
self.assertTrue(
isinstance(attributions_with_ig, tuple)
if multiple_emb
else not isinstance(attributions_with_ig, tuple)
)
# convert to tuple for comparison
if not isinstance(attributions_with_ig, tuple):
attributions = (attributions,)
attributions_with_ig = (attributions_with_ig,)
else:
# convert list to tuple
self.assertIsInstance(attributions, list)
attributions = tuple(attributions)
for attr_lig, attr_ig in zip(attributions, attributions_with_ig):
self.assertEqual(cast(Tensor, attr_lig).shape, cast(Tensor, attr_ig).shape)
assertTensorAlmostEqual(self, attr_lig, attr_ig, delta=0.05, mode="max")
if multiply_by_inputs:
assertTensorAlmostEqual(self, delta, delta_with_ig, delta=0.05, mode="max")
def _assert_compare_with_expected(
self,
model: Module,
target_layer: Module,
test_input: Union[Tensor, Tuple[Tensor, ...]],
expected_ig: Tuple[List[List[float]], ...],
additional_input: Any = None,
):
layer_ig = LayerIntegratedGradients(model, target_layer)
attributions = layer_ig.attribute(
test_input, target=0, additional_forward_args=additional_input
)
assertTensorTuplesAlmostEqual(self, attributions, expected_ig, delta=0.01)
|
#!/usr/bin/env python3
import unittest
from typing import Any, Tuple, Union
import torch
from captum._utils.typing import TensorLikeList
from captum.attr._core.layer.grad_cam import LayerGradCam
from tests.helpers.basic import assertTensorTuplesAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel_ConvNet_One_Conv,
BasicModel_MultiLayer,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_simple_input_non_conv(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
self._grad_cam_test_assert(net, net.linear0, inp, [[400.0]])
def test_simple_multi_input_non_conv(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 6.0, 0.0]], requires_grad=True)
self._grad_cam_test_assert(net, net.multi_relu, inp, ([[21.0]], [[21.0]]))
def test_simple_input_conv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16).view(1, 1, 4, 4).float()
self._grad_cam_test_assert(
net, net.conv1, inp, [[[[11.25, 13.5], [20.25, 22.5]]]]
)
def test_simple_input_conv_split_channels(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16).view(1, 1, 4, 4).float()
expected_result = [
[
[[-3.7500, 3.0000], [23.2500, 30.0000]],
[[15.0000, 10.5000], [-3.0000, -7.5000]],
]
]
self._grad_cam_test_assert(
net,
net.conv1,
inp,
expected_activation=expected_result,
attr_dim_summation=False,
)
def test_simple_input_conv_no_grad(self) -> None:
net = BasicModel_ConvNet_One_Conv()
# this way we deactivate require_grad. Some models explicitly
# do that before interpreting the model.
for param in net.parameters():
param.requires_grad = False
inp = torch.arange(16).view(1, 1, 4, 4).float()
self._grad_cam_test_assert(
net, net.conv1, inp, [[[[11.25, 13.5], [20.25, 22.5]]]]
)
def test_simple_input_conv_relu(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16).view(1, 1, 4, 4).float()
self._grad_cam_test_assert(net, net.relu1, inp, [[[[0.0, 4.0], [28.0, 32.5]]]])
def test_simple_input_conv_without_final_relu(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16).view(1, 1, 4, 4).float()
# Adding negative value to test final relu is not applied by default
inp[0, 0, 1, 1] = -4.0
inp.requires_grad_()
self._grad_cam_test_assert(
net, net.conv1, inp, 0.5625 * inp, attribute_to_layer_input=True
)
def test_simple_input_conv_fc_with_final_relu(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16).view(1, 1, 4, 4).float()
# Adding negative value to test final relu is applied
inp[0, 0, 1, 1] = -4.0
inp.requires_grad_()
exp = 0.5625 * inp
exp[0, 0, 1, 1] = 0.0
self._grad_cam_test_assert(
net,
net.conv1,
inp,
exp,
attribute_to_layer_input=True,
relu_attributions=True,
)
def test_simple_multi_input_conv(self) -> None:
net = BasicModel_ConvNet_One_Conv()
inp = torch.arange(16).view(1, 1, 4, 4).float()
inp2 = torch.ones((1, 1, 4, 4))
self._grad_cam_test_assert(
net, net.conv1, (inp, inp2), [[[[14.5, 19.0], [32.5, 37.0]]]]
)
def _grad_cam_test_assert(
self,
model: Module,
target_layer: Module,
test_input: Union[Tensor, Tuple[Tensor, ...]],
expected_activation: Union[
TensorLikeList,
Tuple[TensorLikeList, ...],
Tensor,
Tuple[Tensor, ...],
],
additional_input: Any = None,
attribute_to_layer_input: bool = False,
relu_attributions: bool = False,
attr_dim_summation: bool = True,
):
layer_gc = LayerGradCam(model, target_layer)
self.assertFalse(layer_gc.multiplies_by_inputs)
attributions = layer_gc.attribute(
test_input,
target=0,
additional_forward_args=additional_input,
attribute_to_layer_input=attribute_to_layer_input,
relu_attributions=relu_attributions,
attr_dim_summation=attr_dim_summation,
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_activation, delta=0.01
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
from __future__ import print_function
from typing import cast, List, Tuple, Union
import torch
from captum.attr._core.layer.layer_deep_lift import LayerDeepLift, LayerDeepLiftShap
from tests.helpers.basic import (
assert_delta,
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
)
from tests.helpers.basic_models import (
BasicModel_ConvNet,
BasicModel_ConvNet_MaxPool3d,
BasicModel_MaxPool_ReLU,
BasicModel_MultiLayer,
LinearMaxPoolLinearModel,
ReLULinearModel,
)
from torch import Tensor
class TestDeepLift(BaseTest):
def test_relu_layer_deeplift(self) -> None:
model = ReLULinearModel(inplace=True)
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
layer_dl = LayerDeepLift(model, model.relu)
attributions, delta = layer_dl.attribute(
inputs,
baselines,
attribute_to_layer_input=True,
return_convergence_delta=True,
)
assertTensorAlmostEqual(self, attributions[0], [0.0, 15.0])
assert_delta(self, delta)
def test_relu_layer_deeplift_wo_mutliplying_by_inputs(self) -> None:
model = ReLULinearModel(inplace=True)
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
layer_dl = LayerDeepLift(model, model.relu, multiply_by_inputs=False)
attributions = layer_dl.attribute(
inputs,
baselines,
attribute_to_layer_input=True,
)
assertTensorAlmostEqual(self, attributions[0], [0.0, 1.0])
def test_relu_layer_deeplift_multiple_output(self) -> None:
model = BasicModel_MultiLayer(multi_input_module=True)
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
layer_dl = LayerDeepLift(model, model.multi_relu)
attributions, delta = layer_dl.attribute(
inputs[0],
baselines[0],
target=0,
attribute_to_layer_input=False,
return_convergence_delta=True,
)
assertTensorTuplesAlmostEqual(
self, attributions, ([[0.0, -1.0, -1.0, -1.0]], [[0.0, -1.0, -1.0, -1.0]])
)
assert_delta(self, delta)
def test_relu_layer_deeplift_add_args(self) -> None:
model = ReLULinearModel()
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
layer_dl = LayerDeepLift(model, model.relu)
attributions, delta = layer_dl.attribute(
inputs,
baselines,
additional_forward_args=3.0,
attribute_to_layer_input=True,
return_convergence_delta=True,
)
assertTensorAlmostEqual(self, attributions[0], [0.0, 45.0])
assert_delta(self, delta)
def test_linear_layer_deeplift(self) -> None:
model = ReLULinearModel(inplace=True)
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
layer_dl = LayerDeepLift(model, model.l3)
attributions, delta = layer_dl.attribute(
inputs,
baselines,
attribute_to_layer_input=True,
return_convergence_delta=True,
)
assertTensorAlmostEqual(self, attributions[0], [0.0, 15.0])
assert_delta(self, delta)
def test_relu_deeplift_with_custom_attr_func(self) -> None:
model = ReLULinearModel()
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
attr_method = LayerDeepLift(model, model.l3)
self._relu_custom_attr_func_assert(attr_method, inputs, baselines, [[2.0]])
def test_inplace_maxpool_relu_with_custom_attr_func(self) -> None:
model = BasicModel_MaxPool_ReLU(inplace=True)
inp = torch.tensor([[[1.0, 2.0, -4.0], [-3.0, -2.0, -1.0]]])
dl = LayerDeepLift(model, model.maxpool)
def custom_att_func(mult, inp, baseline):
assertTensorAlmostEqual(self, mult[0], [[[1.0], [0.0]]])
assertTensorAlmostEqual(self, inp[0], [[[2.0], [-1.0]]])
assertTensorAlmostEqual(self, baseline[0], [[[0.0], [0.0]]])
return mult
dl.attribute(inp, custom_attribution_func=custom_att_func)
def test_linear_layer_deeplift_batch(self) -> None:
model = ReLULinearModel(inplace=True)
_, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
x1 = torch.tensor(
[[-10.0, 1.0, -5.0], [-10.0, 1.0, -5.0], [-10.0, 1.0, -5.0]],
requires_grad=True,
)
x2 = torch.tensor(
[[3.0, 3.0, 1.0], [3.0, 3.0, 1.0], [3.0, 3.0, 1.0]], requires_grad=True
)
inputs = (x1, x2)
layer_dl = LayerDeepLift(model, model.l3)
attributions, delta = layer_dl.attribute(
inputs,
baselines,
attribute_to_layer_input=True,
return_convergence_delta=True,
)
assertTensorAlmostEqual(self, attributions[0], [0.0, 15.0])
assert_delta(self, delta)
attributions, delta = layer_dl.attribute(
inputs,
baselines,
attribute_to_layer_input=False,
return_convergence_delta=True,
)
assertTensorAlmostEqual(self, attributions, [[15.0], [15.0], [15.0]])
assert_delta(self, delta)
def test_relu_layer_deepliftshap(self) -> None:
model = ReLULinearModel()
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
layer_dl_shap = LayerDeepLiftShap(model, model.relu)
attributions, delta = layer_dl_shap.attribute(
inputs,
baselines,
attribute_to_layer_input=True,
return_convergence_delta=True,
)
assertTensorAlmostEqual(self, attributions[0], [0.0, 15.0])
assert_delta(self, delta)
def test_relu_layer_deepliftshap_wo_mutliplying_by_inputs(self) -> None:
model = ReLULinearModel()
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
layer_dl_shap = LayerDeepLiftShap(model, model.relu, multiply_by_inputs=False)
attributions = layer_dl_shap.attribute(
inputs,
baselines,
attribute_to_layer_input=True,
)
assertTensorAlmostEqual(self, attributions[0], [0.0, 1.0])
def test_relu_layer_deepliftshap_multiple_output(self) -> None:
model = BasicModel_MultiLayer(multi_input_module=True)
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
layer_dl = LayerDeepLiftShap(model, model.multi_relu)
attributions, delta = layer_dl.attribute(
inputs[0],
baselines[0],
target=0,
attribute_to_layer_input=False,
return_convergence_delta=True,
)
assertTensorTuplesAlmostEqual(
self, attributions, ([[0.0, -1.0, -1.0, -1.0]], [[0.0, -1.0, -1.0, -1.0]])
)
assert_delta(self, delta)
def test_linear_layer_deepliftshap(self) -> None:
model = ReLULinearModel(inplace=True)
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
layer_dl_shap = LayerDeepLiftShap(model, model.l3)
attributions, delta = layer_dl_shap.attribute(
inputs,
baselines,
attribute_to_layer_input=True,
return_convergence_delta=True,
)
assertTensorAlmostEqual(self, attributions[0], [0.0, 15.0])
assert_delta(self, delta)
attributions, delta = layer_dl_shap.attribute(
inputs,
baselines,
attribute_to_layer_input=False,
return_convergence_delta=True,
)
assertTensorAlmostEqual(self, attributions, [[15.0]])
assert_delta(self, delta)
def test_relu_deepliftshap_with_custom_attr_func(self) -> None:
model = ReLULinearModel()
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
attr_method = LayerDeepLiftShap(model, model.l3)
self._relu_custom_attr_func_assert(attr_method, inputs, baselines, [[2.0]])
def test_lin_maxpool_lin_classification(self) -> None:
inputs = torch.ones(2, 4)
baselines = torch.tensor([[1, 2, 3, 9], [4, 8, 6, 7]]).float()
model = LinearMaxPoolLinearModel()
dl = LayerDeepLift(model, model.pool1)
attrs, delta = dl.attribute(
inputs, baselines, target=0, return_convergence_delta=True
)
expected = [[[-8.0]], [[-7.0]]]
expected_delta = [0.0, 0.0]
assertTensorAlmostEqual(self, cast(Tensor, attrs), expected, 0.0001, "max")
assertTensorAlmostEqual(self, delta, expected_delta, 0.0001, "max")
def test_convnet_maxpool2d_classification(self) -> None:
inputs = 100 * torch.randn(2, 1, 10, 10)
model = BasicModel_ConvNet()
model.eval()
dl = LayerDeepLift(model, model.pool1)
dl2 = LayerDeepLift(model, model.conv2)
attr = dl.attribute(inputs, target=0)
attr2 = dl2.attribute(inputs, target=0, attribute_to_layer_input=True)
self.assertTrue(cast(Tensor, attr).sum() == cast(Tensor, attr2).sum())
def test_convnet_maxpool3d_classification(self) -> None:
inputs = 100 * torch.randn(2, 1, 10, 10, 10)
model = BasicModel_ConvNet_MaxPool3d()
model.eval()
dl = LayerDeepLift(model, model.pool1)
dl2 = LayerDeepLift(model, model.conv2)
# with self.assertRaises(AssertionError) doesn't run with Cicle CI
# the error is being converted into RuntimeError
attr = dl.attribute(inputs, target=0, attribute_to_layer_input=False)
attr2 = dl2.attribute(inputs, target=0, attribute_to_layer_input=True)
self.assertTrue(cast(Tensor, attr).sum() == cast(Tensor, attr2).sum())
def _relu_custom_attr_func_assert(
self,
attr_method: Union[LayerDeepLift, LayerDeepLiftShap],
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: Union[Tensor, Tuple[Tensor, ...]],
expected: List[List[float]],
) -> None:
def custom_attr_func(multipliers, inputs, baselines):
return tuple(multiplier * 2 for multiplier in multipliers)
attr = attr_method.attribute(
inputs,
baselines,
custom_attribution_func=custom_attr_func,
return_convergence_delta=True,
)
assertTensorAlmostEqual(self, attr[0], expected, 1e-19)
def _create_inps_and_base_for_deeplift_neuron_layer_testing() -> Tuple[
Tuple[Tensor, Tensor], Tuple[Tensor, Tensor]
]:
x1 = torch.tensor([[-10.0, 1.0, -5.0]], requires_grad=True)
x2 = torch.tensor([[3.0, 3.0, 1.0]], requires_grad=True)
b1 = torch.tensor([[0.0, 0.0, 0.0]], requires_grad=True)
b2 = torch.tensor([[0.0, 0.0, 0.0]], requires_grad=True)
inputs = (x1, x2)
baselines = (b1, b2)
return inputs, baselines
def _create_inps_and_base_for_deepliftshap_neuron_layer_testing() -> Tuple[
Tuple[Tensor, Tensor], Tuple[Tensor, Tensor]
]:
x1 = torch.tensor([[-10.0, 1.0, -5.0]], requires_grad=True)
x2 = torch.tensor([[3.0, 3.0, 1.0]], requires_grad=True)
b1 = torch.tensor(
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], requires_grad=True
)
b2 = torch.tensor(
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], requires_grad=True
)
inputs = (x1, x2)
baselines = (b1, b2)
return inputs, baselines
|
#!/usr/bin/env python3
import unittest
from typing import Any, List, Tuple, Union
import torch
from captum._utils.typing import BaselineType
from captum.attr._core.layer.layer_feature_ablation import LayerFeatureAblation
from tests.helpers.basic import assertTensorTuplesAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel_ConvNet_One_Conv,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_simple_ablation_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._ablation_test_assert(
net,
net.linear0,
inp,
([280.0, 280.0, 120.0],),
layer_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
attribute_to_layer_input=True,
)
def test_multi_input_ablation(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
baseline = torch.tensor([[1.0, 2.0, 3.0]])
self._ablation_test_assert(
net,
net.model.linear1,
(inp1, inp2, inp3),
[[168.0, 992.0, 148.0], [84.0, 632.0, 120.0]],
additional_input=(1,),
baselines=baseline,
perturbations_per_eval=(1, 2, 3),
attribute_to_layer_input=True,
)
self._ablation_test_assert(
net,
net.model.linear0,
(inp1, inp2, inp3),
[[168.0, 992.0, 148.0], [84.0, 632.0, 120.0]],
additional_input=(1,),
baselines=baseline,
perturbations_per_eval=(1, 2, 3),
attribute_to_layer_input=False,
)
def test_multi_input_ablation_with_layer_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
baseline = torch.tensor([[1.0, 2.0, 3.0]])
layer_mask = torch.tensor([[0, 1, 0], [0, 1, 2]])
self._ablation_test_assert(
net,
net.model.linear1,
(inp1, inp2, inp3),
[[316.0, 992.0, 316.0], [84.0, 632.0, 120.0]],
additional_input=(1,),
baselines=baseline,
perturbations_per_eval=(1, 2, 3),
layer_mask=layer_mask,
attribute_to_layer_input=True,
)
self._ablation_test_assert(
net,
net.model.linear0,
(inp1, inp2, inp3),
[[316.0, 992.0, 316.0], [84.0, 632.0, 120.0]],
additional_input=(1,),
baselines=baseline,
layer_mask=layer_mask,
perturbations_per_eval=(1, 2, 3),
)
def test_simple_multi_input_conv_intermediate(self) -> None:
net = BasicModel_ConvNet_One_Conv(inplace=True)
inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
inp2 = torch.ones((1, 1, 4, 4))
self._ablation_test_assert(
net,
net.relu1,
(inp, inp2),
[[[[4.0, 13.0], [40.0, 49.0]], [[0, 0], [-15.0, -24.0]]]],
perturbations_per_eval=(1, 2, 4, 8, 12, 16),
)
self._ablation_test_assert(
net,
net.relu1,
(inp, inp2),
([[[4.0, 13.0], [40.0, 49.0]], [[0, 0], [-15.0, -24.0]]],),
baselines=torch.tensor(
[[[-4.0, -13.0], [-2.0, -2.0]], [[0, 0], [0.0, 0.0]]]
),
perturbations_per_eval=(1, 2, 4, 8, 12, 16),
attribute_to_layer_input=True,
)
self._ablation_test_assert(
net,
net.relu1,
(inp, inp2),
[[[[17.0, 17.0], [67.0, 67.0]], [[0, 0], [-39.0, -39.0]]]],
perturbations_per_eval=(1, 2, 4),
layer_mask=torch.tensor([[[[0, 0], [1, 1]], [[2, 2], [3, 3]]]]),
)
def test_simple_multi_output_ablation(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 6.0, 0.0]])
self._ablation_test_assert(
net, net.multi_relu, inp, ([[0.0, 7.0, 7.0, 7.0]], [[0.0, 7.0, 7.0, 7.0]])
)
def test_simple_multi_output_input_ablation(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 6.0, 0.0]])
self._ablation_test_assert(
net,
net.multi_relu,
inp,
([[0.0, 7.0, 7.0, 7.0]], [[0.0, 7.0, 7.0, 7.0]]),
attribute_to_layer_input=True,
)
def _ablation_test_assert(
self,
model: Module,
layer: Module,
test_input: Union[Tensor, Tuple[Tensor, ...]],
expected_ablation: Union[List, Tuple],
layer_mask: Union[None, Tensor, Tuple[Tensor, ...]] = None,
additional_input: Any = None,
perturbations_per_eval: Tuple[int, ...] = (1,),
baselines: BaselineType = None,
target: Union[None, int] = 0,
attribute_to_layer_input: bool = False,
) -> None:
for batch_size in perturbations_per_eval:
ablation = LayerFeatureAblation(model, layer)
attributions = ablation.attribute(
test_input,
target=target,
layer_mask=layer_mask,
additional_forward_args=additional_input,
layer_baselines=baselines,
perturbations_per_eval=batch_size,
attribute_to_layer_input=attribute_to_layer_input,
)
assertTensorTuplesAlmostEqual(self, attributions, expected_ablation)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import torch
import torch.nn as nn
from captum.attr import LayerLRP
from captum.attr._utils.lrp_rules import Alpha1_Beta0_Rule, EpsilonRule, GammaRule
from ...helpers.basic import assertTensorAlmostEqual, BaseTest
from ...helpers.basic_models import BasicModel_ConvNet_One_Conv, SimpleLRPModel
def _get_basic_config():
input = torch.arange(16).view(1, 1, 4, 4).float()
return BasicModel_ConvNet_One_Conv(), input
def _get_simple_model(inplace=False):
model = SimpleLRPModel(inplace)
inputs = torch.tensor([[1.0, 2.0, 3.0]])
return model, inputs
def _get_simple_model2(inplace=False):
class MyModel(nn.Module):
def __init__(self, inplace) -> None:
super().__init__()
self.lin = nn.Linear(2, 2)
self.lin.weight = nn.Parameter(torch.ones(2, 2))
self.relu = torch.nn.ReLU(inplace=inplace)
def forward(self, input):
return self.relu(self.lin(input))[0].unsqueeze(0)
input = torch.tensor([[1.0, 2.0], [1.0, 3.0]])
model = MyModel(inplace)
return model, input
class Test(BaseTest):
def test_lrp_creator(self) -> None:
model, _ = _get_basic_config()
model.conv1.rule = 1
self.assertRaises(TypeError, LayerLRP, model, model.conv1)
def test_lrp_creator_activation(self) -> None:
model, inputs = _get_basic_config()
model.add_module("sigmoid", nn.Sigmoid())
lrp = LayerLRP(model, model.conv1)
self.assertRaises(TypeError, lrp.attribute, inputs)
def test_lrp_basic_attributions(self):
model, inputs = _get_basic_config()
logits = model(inputs)
score, classIndex = torch.max(logits, 1)
lrp = LayerLRP(model, model.conv1)
relevance, delta = lrp.attribute(
inputs, classIndex.item(), return_convergence_delta=True
)
assertTensorAlmostEqual(
self, relevance[0], torch.Tensor([[[0, 4], [31, 40]], [[0, 0], [-6, -15]]])
)
assertTensorAlmostEqual(self, delta, torch.Tensor([0]))
def test_lrp_simple_attributions(self):
model, inputs = _get_simple_model(inplace=False)
model.eval()
model.linear.rule = EpsilonRule()
model.linear2.rule = EpsilonRule()
lrp_upper = LayerLRP(model, model.linear2)
relevance_upper, delta = lrp_upper.attribute(
inputs, attribute_to_layer_input=True, return_convergence_delta=True
)
lrp_lower = LayerLRP(model, model.linear)
relevance_lower = lrp_lower.attribute(inputs)
assertTensorAlmostEqual(self, relevance_lower[0], relevance_upper[0])
self.assertEqual(delta.item(), 0)
def test_lrp_simple_repeat_attributions(self) -> None:
model, inputs = _get_simple_model()
model.eval()
model.linear.rule = GammaRule()
model.linear2.rule = Alpha1_Beta0_Rule()
output = model(inputs)
lrp = LayerLRP(model, model.linear)
_ = lrp.attribute(inputs)
output_after = model(inputs)
assertTensorAlmostEqual(self, output, output_after)
def test_lrp_simple_inplaceReLU(self) -> None:
model_default, inputs = _get_simple_model()
model_inplace, _ = _get_simple_model(inplace=True)
for model in [model_default, model_inplace]:
model.eval()
model.linear.rule = EpsilonRule()
model.linear2.rule = EpsilonRule()
lrp_default = LayerLRP(model_default, model_default.linear2)
lrp_inplace = LayerLRP(model_inplace, model_inplace.linear2)
relevance_default = lrp_default.attribute(inputs, attribute_to_layer_input=True)
relevance_inplace = lrp_inplace.attribute(inputs, attribute_to_layer_input=True)
assertTensorAlmostEqual(self, relevance_default[0], relevance_inplace[0])
def test_lrp_simple_tanh(self) -> None:
class Model(nn.Module):
def __init__(self) -> None:
super(Model, self).__init__()
self.linear = nn.Linear(3, 3, bias=False)
self.linear.weight.data.fill_(0.1)
self.tanh = torch.nn.Tanh()
self.linear2 = nn.Linear(3, 1, bias=False)
self.linear2.weight.data.fill_(0.1)
def forward(self, x):
return self.linear2(self.tanh(self.linear(x)))
model = Model()
_, inputs = _get_simple_model()
lrp = LayerLRP(model, model.linear)
relevance = lrp.attribute(inputs)
assertTensorAlmostEqual(
self, relevance[0], torch.Tensor([0.0537, 0.0537, 0.0537])
) # Result if tanh is skipped for propagation
def test_lrp_simple_attributions_GammaRule(self) -> None:
model, inputs = _get_simple_model()
with torch.no_grad():
model.linear.weight.data[0][0] = -2
model.eval()
model.linear.rule = GammaRule(gamma=1)
model.linear2.rule = GammaRule()
lrp = LayerLRP(model, model.linear)
relevance = lrp.attribute(inputs)
assertTensorAlmostEqual(self, relevance[0], torch.tensor([24.0, 36.0, 36.0]))
def test_lrp_simple_attributions_AlphaBeta(self) -> None:
model, inputs = _get_simple_model()
with torch.no_grad():
model.linear.weight.data[0][0] = -2
model.eval()
model.linear.rule = Alpha1_Beta0_Rule()
model.linear2.rule = Alpha1_Beta0_Rule()
lrp = LayerLRP(model, model.linear)
relevance = lrp.attribute(inputs)
assertTensorAlmostEqual(self, relevance[0], torch.tensor([24.0, 36.0, 36.0]))
def test_lrp_simple_attributions_all_layers(self) -> None:
model, inputs = _get_simple_model(inplace=False)
model.eval()
model.linear.rule = EpsilonRule()
model.linear2.rule = EpsilonRule()
layers = [model.linear, model.linear2]
lrp = LayerLRP(model, layers)
relevance = lrp.attribute(inputs, attribute_to_layer_input=True)
self.assertEqual(len(relevance), 2)
assertTensorAlmostEqual(self, relevance[0][0], torch.tensor([18.0, 36.0, 54.0]))
def test_lrp_simple_attributions_all_layers_delta(self) -> None:
model, inputs = _get_simple_model(inplace=False)
model.eval()
model.linear.rule = EpsilonRule()
model.linear2.rule = EpsilonRule()
layers = [model.linear, model.linear2]
lrp = LayerLRP(model, layers)
inputs = torch.cat((inputs, 2 * inputs))
relevance, delta = lrp.attribute(
inputs, attribute_to_layer_input=True, return_convergence_delta=True
)
self.assertEqual(len(relevance), len(delta))
assertTensorAlmostEqual(
self,
relevance[0],
torch.tensor([[18.0, 36.0, 54.0], [36.0, 72.0, 108.0]]),
)
|
#!/usr/bin/env python3
import unittest
from typing import Any, List, Tuple, Union
import torch
from captum._utils.typing import ModuleOrModuleList
from captum.attr._core.layer.layer_activation import LayerActivation
from captum.attr._core.layer.layer_gradient_x_activation import LayerGradientXActivation
from tests.helpers.basic import assertTensorTuplesAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicEmbeddingModel,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_simple_input_gradient_activation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
self._layer_activation_test_assert(net, net.linear0, inp, [[0.0, 400.0, 0.0]])
def test_simple_input_gradient_activation_no_grad(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
with torch.no_grad():
self._layer_activation_test_assert(
net, net.linear0, inp, [[0.0, 400.0, 0.0]]
)
def test_simple_linear_gradient_activation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._layer_activation_test_assert(
net, net.linear1, inp, [[90.0, 101.0, 101.0, 101.0]]
)
def test_multi_layer_linear_gradient_activation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
module_list: List[Module] = [net.linear0, net.linear1]
self._layer_activation_test_assert(
net,
module_list,
inp,
([[0.0, 400.0, 0.0]], [[90.0, 101.0, 101.0, 101.0]]),
)
def test_simple_linear_gradient_activation_no_grad(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
# this way we deactivate require_grad. Some models explicitly
# do that before interpreting the model.
for param in net.parameters():
param.requires_grad = False
self._layer_activation_test_assert(
net, net.linear1, inp, [[90.0, 101.0, 101.0, 101.0]]
)
def test_simple_multi_gradient_activation(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[3.0, 4.0, 0.0]])
self._layer_activation_test_assert(
net, net.multi_relu, inp, ([[0.0, 8.0, 8.0, 8.0]], [[0.0, 8.0, 8.0, 8.0]])
)
def test_simple_relu_gradient_activation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[3.0, 4.0, 0.0]], requires_grad=True)
self._layer_activation_test_assert(net, net.relu, inp, [[0.0, 8.0, 8.0, 8.0]])
def test_multi_layer_multi_gradient_activation(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[3.0, 4.0, 0.0]])
module_list: List[Module] = [net.multi_relu, net.linear0]
self._layer_activation_test_assert(
net,
module_list,
inp,
[([[0.0, 8.0, 8.0, 8.0]], [[0.0, 8.0, 8.0, 8.0]]), [[9.0, 12.0, 0.0]]],
)
def test_simple_output_gradient_activation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._layer_activation_test_assert(net, net.linear2, inp, [[392.0, 0.0]])
def test_simple_gradient_activation_multi_input_linear2(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 0.0]])
inp2 = torch.tensor([[0.0, 10.0, 0.0]])
inp3 = torch.tensor([[0.0, 5.0, 0.0]])
self._layer_activation_test_assert(
net, net.model.linear2, (inp1, inp2, inp3), [[392.0, 0.0]], (4,)
)
def test_simple_gradient_activation_multi_input_relu(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 1.0]])
inp2 = torch.tensor([[0.0, 4.0, 5.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0]])
self._layer_activation_test_assert(
net, net.model.relu, (inp1, inp2), [[90.0, 101.0, 101.0, 101.0]], (inp3, 5)
)
def test_gradient_activation_embedding(self) -> None:
input1 = torch.tensor([2, 5, 0, 1])
input2 = torch.tensor([3, 0, 0, 2])
model = BasicEmbeddingModel()
layer_act = LayerGradientXActivation(model, model.embedding1)
self.assertEqual(
list(layer_act.attribute(inputs=(input1, input2)).shape), [4, 100]
)
def test_gradient_activation_embedding_no_grad(self) -> None:
input1 = torch.tensor([2, 5, 0, 1])
input2 = torch.tensor([3, 0, 0, 2])
model = BasicEmbeddingModel()
for param in model.parameters():
param.requires_grad = False
with torch.no_grad():
layer_act = LayerGradientXActivation(model, model.embedding1)
self.assertEqual(
list(layer_act.attribute(inputs=(input1, input2)).shape), [4, 100]
)
def _layer_activation_test_assert(
self,
model: Module,
target_layer: ModuleOrModuleList,
test_input: Union[Tensor, Tuple[Tensor, ...]],
expected_activation: Union[List, Tuple[List[List[float]], ...]],
additional_input: Any = None,
) -> None:
layer_act = LayerGradientXActivation(model, target_layer)
self.assertTrue(layer_act.multiplies_by_inputs)
attributions = layer_act.attribute(
test_input, target=0, additional_forward_args=additional_input
)
if isinstance(target_layer, Module):
assertTensorTuplesAlmostEqual(
self, attributions, expected_activation, delta=0.01
)
else:
for i in range(len(target_layer)):
assertTensorTuplesAlmostEqual(
self, attributions[i], expected_activation[i], delta=0.01
)
# test Layer Gradient without multiplying with activations
layer_grads = LayerGradientXActivation(
model, target_layer, multiply_by_inputs=False
)
layer_act = LayerActivation(model, target_layer)
self.assertFalse(layer_grads.multiplies_by_inputs)
grads = layer_grads.attribute(
test_input, target=0, additional_forward_args=additional_input
)
acts = layer_act.attribute(test_input, additional_forward_args=additional_input)
if isinstance(target_layer, Module):
assertTensorTuplesAlmostEqual(
self,
attributions,
tuple(act * grad for act, grad in zip(acts, grads)),
delta=0.01,
)
else:
for i in range(len(target_layer)):
assertTensorTuplesAlmostEqual(
self,
attributions[i],
tuple(act * grad for act, grad in zip(acts[i], grads[i])),
delta=0.01,
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import unittest
from typing import Any, cast, List, Tuple, Union
import torch
from captum._utils.typing import BaselineType
from captum.attr._core.layer.layer_conductance import LayerConductance
from tests.attr.helpers.conductance_reference import ConductanceReference
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
)
from tests.helpers.basic_models import (
BasicModel_ConvNet,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_simple_input_conductance(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._conductance_test_assert(net, net.linear0, inp, [[0.0, 390.0, 0.0]])
def test_simple_input_multi_conductance(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._conductance_test_assert(
net,
net.multi_relu,
inp,
([[90.0, 100.0, 100.0, 100.0]], [[90.0, 100.0, 100.0, 100.0]]),
)
def test_simple_input_with_scalar_baseline_conductance(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._conductance_test_assert(
net, net.linear0, inp, [[0.0, 390.0, 0.0]], baselines=0.0
)
def test_simple_linear_conductance(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
self._conductance_test_assert(
net, net.linear1, inp, [[90.0, 100.0, 100.0, 100.0]]
)
def test_simple_relu_conductance(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._conductance_test_assert(net, net.relu, inp, [[90.0, 100.0, 100.0, 100.0]])
def test_simple_output_conductance(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
self._conductance_test_assert(net, net.linear2, inp, [[390.0, 0.0]])
def test_simple_multi_input_linear2_conductance(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 0.0]])
inp2 = torch.tensor([[0.0, 10.0, 0.0]])
inp3 = torch.tensor([[0.0, 5.0, 0.0]])
self._conductance_test_assert(
net,
net.model.linear2,
(inp1, inp2, inp3),
[[390.0, 0.0]],
additional_args=(4,),
)
def test_simple_multi_input_relu_conductance(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 1.0]])
inp2 = torch.tensor([[0.0, 4.0, 5.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0]])
self._conductance_test_assert(
net,
net.model.relu,
(inp1, inp2),
[[90.0, 100.0, 100.0, 100.0]],
additional_args=(inp3, 5),
)
def test_simple_multi_input_relu_conductance_batch(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 1.0], [0.0, 0.0, 10.0]])
inp2 = torch.tensor([[0.0, 4.0, 5.0], [0.0, 0.0, 10.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 5.0]])
self._conductance_test_assert(
net,
net.model.relu,
(inp1, inp2),
[[90.0, 100.0, 100.0, 100.0], [100.0, 100.0, 100.0, 100.0]],
additional_args=(inp3, 5),
)
def test_matching_conv1_conductance(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(1, 1, 10, 10, requires_grad=True)
self._conductance_reference_test_assert(net, net.conv1, inp, n_steps=100)
def test_matching_pool1_conductance(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(1, 1, 10, 10)
self._conductance_reference_test_assert(net, net.pool1, inp)
def test_matching_conv2_conductance(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(1, 1, 10, 10, requires_grad=True)
self._conductance_reference_test_assert(net, net.conv2, inp)
def test_matching_pool2_conductance(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(1, 1, 10, 10)
self._conductance_reference_test_assert(net, net.pool2, inp)
def test_matching_conv_multi_input_conductance(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(4, 1, 10, 10, requires_grad=True)
self._conductance_reference_test_assert(net, net.relu3, inp)
def test_matching_conv_with_baseline_conductance(self) -> None:
net = BasicModel_ConvNet()
inp = 100 * torch.randn(3, 1, 10, 10)
baseline = 100 * torch.randn(3, 1, 10, 10, requires_grad=True)
self._conductance_reference_test_assert(net, net.fc1, inp, baseline)
def _conductance_test_assert(
self,
model: Module,
target_layer: Module,
test_input: Union[Tensor, Tuple[Tensor, ...]],
expected_conductance: Union[List[List[float]], Tuple[List[List[float]], ...]],
baselines: BaselineType = None,
additional_args: Any = None,
) -> None:
cond = LayerConductance(model, target_layer)
self.assertTrue(cond.multiplies_by_inputs)
for internal_batch_size in (None, 4, 20):
attributions, delta = cond.attribute(
test_input,
baselines=baselines,
target=0,
n_steps=500,
method="gausslegendre",
additional_forward_args=additional_args,
internal_batch_size=internal_batch_size,
return_convergence_delta=True,
)
delta_condition = (delta.abs() < 0.01).all()
self.assertTrue(
delta_condition,
"Sum of attributions does {}"
" not match the difference of endpoints.".format(delta),
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_conductance, delta=0.1
)
def _conductance_reference_test_assert(
self,
model: Module,
target_layer: Module,
test_input: Tensor,
test_baseline: Union[None, Tensor] = None,
n_steps=300,
) -> None:
layer_output = None
def forward_hook(module, inp, out):
nonlocal layer_output
layer_output = out
hook = target_layer.register_forward_hook(forward_hook)
final_output = model(test_input)
layer_output = cast(Tensor, layer_output)
hook.remove()
target_index = torch.argmax(torch.sum(final_output, 0))
cond = LayerConductance(model, target_layer)
cond_ref = ConductanceReference(model, target_layer)
attributions, delta = cast(
Tuple[Tensor, Tensor],
cond.attribute(
test_input,
baselines=test_baseline,
target=target_index,
n_steps=n_steps,
method="gausslegendre",
return_convergence_delta=True,
),
)
delta_condition = (delta.abs() < 0.005).all()
self.assertTrue(
delta_condition,
"Sum of attribution values does {} "
" not match the difference of endpoints.".format(delta),
)
attributions_reference = cond_ref.attribute(
test_input,
baselines=test_baseline,
target=target_index,
n_steps=n_steps,
method="gausslegendre",
)
# Check that layer output size matches conductance size.
self.assertEqual(layer_output.shape, attributions.shape)
# Check that reference implementation output matches standard implementation.
assertTensorAlmostEqual(
self,
attributions,
attributions_reference,
delta=0.07,
mode="max",
)
# Test if batching is working correctly for inputs with multiple examples
if test_input.shape[0] > 1:
for i in range(test_input.shape[0]):
single_attributions = cast(
Tensor,
cond.attribute(
test_input[i : i + 1],
baselines=test_baseline[i : i + 1]
if test_baseline is not None
else None,
target=target_index,
n_steps=n_steps,
method="gausslegendre",
),
)
# Verify that attributions when passing example independently
# matches corresponding attribution of batched input.
assertTensorAlmostEqual(
self,
attributions[i : i + 1],
single_attributions,
delta=0.01,
mode="max",
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import unittest
from typing import Any, List, Tuple, Union
import torch
from captum._utils.typing import BaselineType
from captum.attr._core.layer.internal_influence import InternalInfluence
from tests.helpers.basic import assertTensorTuplesAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_simple_input_internal_inf(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
self._internal_influence_test_assert(net, net.linear0, inp, [[3.9, 3.9, 3.9]])
def test_simple_input_multi_internal_inf(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
self._internal_influence_test_assert(
net,
net.multi_relu,
inp,
([[0.9, 1.0, 1.0, 1.0]], [[0.9, 1.0, 1.0, 1.0]]),
attribute_to_layer_input=True,
)
def test_simple_linear_internal_inf(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._internal_influence_test_assert(
net, net.linear1, inp, [[0.9, 1.0, 1.0, 1.0]]
)
def test_simple_relu_input_internal_inf_inplace(self) -> None:
net = BasicModel_MultiLayer(inplace=True)
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._internal_influence_test_assert(
net, net.relu, inp, ([0.9, 1.0, 1.0, 1.0],), attribute_to_layer_input=True
)
def test_simple_linear_internal_inf_inplace(self) -> None:
net = BasicModel_MultiLayer(inplace=True)
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._internal_influence_test_assert(
net, net.linear1, inp, [[0.9, 1.0, 1.0, 1.0]]
)
def test_simple_relu_internal_inf(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[3.0, 4.0, 0.0]], requires_grad=True)
self._internal_influence_test_assert(net, net.relu, inp, [[1.0, 1.0, 1.0, 1.0]])
def test_simple_output_internal_inf(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._internal_influence_test_assert(net, net.linear2, inp, [[1.0, 0.0]])
def test_simple_with_baseline_internal_inf(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 80.0, 0.0]])
base = torch.tensor([[0.0, -20.0, 0.0]])
self._internal_influence_test_assert(
net, net.linear1, inp, [[0.7, 0.8, 0.8, 0.8]], base
)
def test_simple_multi_input_linear2_internal_inf(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 0.0]])
inp2 = torch.tensor([[0.0, 10.0, 0.0]])
inp3 = torch.tensor([[0.0, 5.0, 0.0]])
self._internal_influence_test_assert(
net,
net.model.linear2,
(inp1, inp2, inp3),
[[1.0, 0.0]],
additional_args=(4,),
)
def test_simple_multi_input_relu_internal_inf(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 1.0]])
inp2 = torch.tensor([[0.0, 4.0, 5.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0]])
self._internal_influence_test_assert(
net,
net.model.relu,
(inp1, inp2),
[[1.0, 1.0, 1.0, 1.0]],
additional_args=(inp3, 5),
)
def test_simple_multi_input_batch_relu_internal_inf(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 6.0, 14.0], [0.0, 80.0, 0.0]])
inp2 = torch.tensor([[0.0, 6.0, 14.0], [0.0, 20.0, 0.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0], [0.0, 20.0, 0.0]])
self._internal_influence_test_assert(
net,
net.model.linear1,
(inp1, inp2),
[[0.95, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]],
additional_args=(inp3, 5),
)
def test_multiple_linear_internal_inf(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor(
[
[0.0, 100.0, 0.0],
[0.0, 100.0, 0.0],
[0.0, 100.0, 0.0],
[0.0, 100.0, 0.0],
],
requires_grad=True,
)
self._internal_influence_test_assert(
net,
net.linear1,
inp,
[
[0.9, 1.0, 1.0, 1.0],
[0.9, 1.0, 1.0, 1.0],
[0.9, 1.0, 1.0, 1.0],
[0.9, 1.0, 1.0, 1.0],
],
)
def test_multiple_with_baseline_internal_inf(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 80.0, 0.0], [30.0, 30.0, 0.0]], requires_grad=True)
base = torch.tensor(
[[0.0, -20.0, 0.0], [-20.0, -20.0, 0.0]], requires_grad=True
)
self._internal_influence_test_assert(
net, net.linear1, inp, [[0.7, 0.8, 0.8, 0.8], [0.5, 0.6, 0.6, 0.6]], base
)
def _internal_influence_test_assert(
self,
model: Module,
target_layer: Module,
test_input: Union[Tensor, Tuple[Tensor, ...]],
expected_activation: Union[
float,
List[List[float]],
Tuple[List[float], ...],
Tuple[List[List[float]], ...],
],
baseline: BaselineType = None,
additional_args: Any = None,
attribute_to_layer_input: bool = False,
):
for internal_batch_size in [None, 5, 20]:
int_inf = InternalInfluence(model, target_layer)
self.assertFalse(int_inf.multiplies_by_inputs)
attributions = int_inf.attribute(
test_input,
baselines=baseline,
target=0,
n_steps=500,
method="riemann_trapezoid",
additional_forward_args=additional_args,
internal_batch_size=internal_batch_size,
attribute_to_layer_input=attribute_to_layer_input,
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_activation, delta=0.01, mode="max"
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import unittest
from typing import Any, List, Tuple, Union
import torch
import torch.nn as nn
from captum.attr._core.layer.layer_activation import LayerActivation
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
)
from tests.helpers.basic_models import (
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
Conv1dSeqModel,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_simple_input_activation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
self._layer_activation_test_assert(net, net.linear0, inp, [[0.0, 100.0, 0.0]])
def test_simple_linear_activation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._layer_activation_test_assert(
net, net.linear1, inp, [[90.0, 101.0, 101.0, 101.0]]
)
def test_simple_multi_linear_activation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._multiple_layer_activation_test_assert(
net,
[net.linear1, net.linear0],
inp,
([[90.0, 101.0, 101.0, 101.0]], [[0.0, 100.0, 0.0]]),
)
def test_simple_relu_activation_input_inplace(self) -> None:
net = BasicModel_MultiLayer(inplace=True)
inp = torch.tensor([[2.0, -5.0, 4.0]])
self._layer_activation_test_assert(
net, net.relu, inp, ([-9.0, 2.0, 2.0, 2.0],), attribute_to_layer_input=True
)
def test_simple_linear_activation_inplace(self) -> None:
net = BasicModel_MultiLayer(inplace=True)
inp = torch.tensor([[2.0, -5.0, 4.0]])
self._layer_activation_test_assert(
net, net.linear1, inp, [[-9.0, 2.0, 2.0, 2.0]]
)
def test_simple_relu_activation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[3.0, 4.0, 0.0]], requires_grad=True)
self._layer_activation_test_assert(net, net.relu, inp, [[0.0, 8.0, 8.0, 8.0]])
def test_simple_output_activation(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 100.0, 0.0]])
self._layer_activation_test_assert(net, net.linear2, inp, [[392.0, 394.0]])
def test_simple_multi_output_activation(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 6.0, 0.0]])
self._layer_activation_test_assert(
net, net.multi_relu, inp, ([[0.0, 7.0, 7.0, 7.0]], [[0.0, 7.0, 7.0, 7.0]])
)
def test_simple_multi_layer_multi_output_activation(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 6.0, 0.0]])
self._multiple_layer_activation_test_assert(
net,
[net.multi_relu, net.linear0, net.linear1],
inp,
[
([[0.0, 7.0, 7.0, 7.0]], [[0.0, 7.0, 7.0, 7.0]]),
[[0.0, 6.0, 0.0]],
[[-4.0, 7.0, 7.0, 7.0]],
],
)
def test_simple_multi_input_activation(self) -> None:
net = BasicModel_MultiLayer(multi_input_module=True)
inp = torch.tensor([[0.0, 6.0, 0.0]])
self._layer_activation_test_assert(
net,
net.multi_relu,
inp,
([[-4.0, 7.0, 7.0, 7.0]], [[-4.0, 7.0, 7.0, 7.0]]),
attribute_to_layer_input=True,
)
def test_simple_multi_input_linear2_activation(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 0.0]])
inp2 = torch.tensor([[0.0, 10.0, 0.0]])
inp3 = torch.tensor([[0.0, 5.0, 0.0]])
self._layer_activation_test_assert(
net, net.model.linear2, (inp1, inp2, inp3), [[392.0, 394.0]], (4,)
)
def test_simple_multi_input_relu_activation(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[0.0, 10.0, 1.0]])
inp2 = torch.tensor([[0.0, 4.0, 5.0]])
inp3 = torch.tensor([[0.0, 0.0, 0.0]])
self._layer_activation_test_assert(
net, net.model.relu, (inp1, inp2), [[90.0, 101.0, 101.0, 101.0]], (inp3, 5)
)
def test_sequential_in_place(self) -> None:
model = nn.Sequential(nn.Conv2d(3, 4, 3), nn.ReLU(inplace=True))
layer_act = LayerActivation(model, model[0])
input = torch.randn(1, 3, 5, 5)
assertTensorAlmostEqual(self, layer_act.attribute(input), model[0](input))
def test_sequential_module(self) -> None:
model = Conv1dSeqModel()
layer_act = LayerActivation(model, model.seq)
input = torch.randn(2, 4, 1000)
out = model(input)
assertTensorAlmostEqual(self, layer_act.attribute(input), out)
def _layer_activation_test_assert(
self,
model: Module,
target_layer: Module,
test_input: Union[Tensor, Tuple[Tensor, ...]],
expected_activation: Union[
List[List[float]], Tuple[List[float], ...], Tuple[List[List[float]], ...]
],
additional_input: Any = None,
attribute_to_layer_input: bool = False,
):
layer_act = LayerActivation(model, target_layer)
self.assertTrue(layer_act.multiplies_by_inputs)
attributions = layer_act.attribute(
test_input,
additional_forward_args=additional_input,
attribute_to_layer_input=attribute_to_layer_input,
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_activation, delta=0.01
)
def _multiple_layer_activation_test_assert(
self,
model: Module,
target_layers: List[Module],
test_input: Union[Tensor, Tuple[Tensor, ...]],
expected_activation: Union[
List, Tuple[List[float], ...], Tuple[List[List[float]], ...]
],
additional_input: Any = None,
attribute_to_layer_input: bool = False,
):
layer_act = LayerActivation(model, target_layers)
self.assertTrue(layer_act.multiplies_by_inputs)
attributions = layer_act.attribute(
test_input,
additional_forward_args=additional_input,
attribute_to_layer_input=attribute_to_layer_input,
)
for i in range(len(target_layers)):
assertTensorTuplesAlmostEqual(
self, attributions[i], expected_activation[i], delta=0.01
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Tuple, Union
import torch
from captum._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.gradient_shap import GradientShap
from captum.attr._core.layer.layer_gradient_shap import LayerGradientShap
from tests.attr.test_gradient_shap import _assert_attribution_delta
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
)
from tests.helpers.basic_models import (
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
from tests.helpers.classification_models import SoftmaxModel
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_basic_multilayer(self) -> None:
model = BasicModel_MultiLayer(inplace=True)
model.eval()
inputs = torch.tensor([[1.0, -20.0, 10.0]])
baselines = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [2.0, 2.0, 2.0]])
expected = [[-8.4, 0.0]]
self._assert_attributions(model, model.linear2, inputs, baselines, 0, expected)
def test_basic_multilayer_wo_multiplying_by_inputs(self) -> None:
model = BasicModel_MultiLayer(inplace=True)
model.eval()
inputs = torch.tensor([[1.0, -20.0, 10.0]])
baselines = torch.zeros(3, 3)
lgs = LayerGradientShap(model, model.linear2, multiply_by_inputs=False)
attrs = lgs.attribute(
inputs,
baselines,
target=0,
stdevs=0.0,
)
assertTensorAlmostEqual(self, attrs, torch.tensor([[1.0, 0.0]]))
def test_basic_multi_tensor_output(self) -> None:
model = BasicModel_MultiLayer(multi_input_module=True)
model.eval()
inputs = torch.tensor([[0.0, 100.0, 0.0]])
expected = ([[90.0, 100.0, 100.0, 100.0]], [[90.0, 100.0, 100.0, 100.0]])
self._assert_attributions(
model,
model.multi_relu,
inputs,
torch.zeros_like(inputs),
0,
expected,
n_samples=5,
)
def test_basic_multilayer_with_add_args(self) -> None:
model = BasicModel_MultiLayer(inplace=True)
model.eval()
inputs = torch.tensor([[1.0, -20.0, 10.0]])
add_args = torch.ones(1, 3)
baselines = torch.randn(30, 3)
expected = [[-13.9510, 0.0]]
self._assert_attributions(
model, model.linear2, inputs, baselines, 0, expected, add_args=add_args
)
def test_basic_multilayer_compare_w_inp_features(self) -> None:
model = BasicModel_MultiLayer()
model.eval()
inputs = torch.tensor([[10.0, 20.0, 10.0]])
baselines = torch.randn(30, 3)
gs = GradientShap(model)
expected, delta = gs.attribute(
inputs, baselines, target=0, return_convergence_delta=True
)
self.setUp()
self._assert_attributions(
model,
model.linear0,
inputs,
baselines,
0,
expected,
expected_delta=delta,
attribute_to_layer_input=True,
)
def test_classification(self) -> None:
def custom_baseline_fn(inputs):
num_in = inputs.shape[1]
return torch.arange(0.0, num_in * 4.0).reshape(4, num_in)
num_in = 40
n_samples = 10
# 10-class classification model
model = SoftmaxModel(num_in, 20, 10)
model.eval()
inputs = torch.arange(0.0, num_in * 2.0).reshape(2, num_in)
baselines = custom_baseline_fn
expected = torch.zeros(2, 20)
self._assert_attributions(
model, model.relu1, inputs, baselines, 1, expected, n_samples=n_samples
)
def test_basic_multi_input(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inputs = (torch.tensor([[10.0, 20.0, 10.0]]), torch.tensor([[1.0, 2.0, 1.0]]))
add_args = (torch.tensor([[1.0, 2.0, 3.0]]), 1.0)
baselines = (torch.randn(30, 3), torch.randn(30, 3))
expected = torch.tensor([[171.6841, 0.0]])
self._assert_attributions(
net, net.model.linear2, inputs, baselines, 0, expected, add_args=add_args
)
def _assert_attributions(
self,
model: Module,
layer: Module,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: Union[TensorOrTupleOfTensorsGeneric, Callable],
target: TargetType,
expected: Union[
Tensor,
Tuple[Tensor, ...],
List[float],
List[List[float]],
Tuple[List[float], ...],
Tuple[List[List[float]], ...],
],
expected_delta: Tensor = None,
n_samples: int = 5,
attribute_to_layer_input: bool = False,
add_args: Any = None,
) -> None:
lgs = LayerGradientShap(model, layer)
attrs, delta = lgs.attribute(
inputs,
baselines,
target=target,
additional_forward_args=add_args,
n_samples=n_samples,
stdevs=0.0009,
return_convergence_delta=True,
attribute_to_layer_input=attribute_to_layer_input,
)
assertTensorTuplesAlmostEqual(self, attrs, expected, delta=0.005)
if expected_delta is None:
_assert_attribution_delta(
self, inputs, attrs, n_samples, delta, is_layer=True
)
else:
for delta_i, expected_delta_i in zip(delta, expected_delta):
assertTensorAlmostEqual(self, delta_i, expected_delta_i, delta=0.01)
|
#!/usr/bin/env python3
import torch
from captum.attr._core.deep_lift import DeepLift, DeepLiftShap
from captum.attr._core.feature_ablation import FeatureAblation
from captum.attr._core.feature_permutation import FeaturePermutation
from captum.attr._core.gradient_shap import GradientShap
from captum.attr._core.guided_backprop_deconvnet import Deconvolution, GuidedBackprop
from captum.attr._core.guided_grad_cam import GuidedGradCam
from captum.attr._core.input_x_gradient import InputXGradient
from captum.attr._core.integrated_gradients import IntegratedGradients
from captum.attr._core.kernel_shap import KernelShap
from captum.attr._core.layer.grad_cam import LayerGradCam
from captum.attr._core.layer.internal_influence import InternalInfluence
from captum.attr._core.layer.layer_activation import LayerActivation
from captum.attr._core.layer.layer_conductance import LayerConductance
from captum.attr._core.layer.layer_deep_lift import LayerDeepLift, LayerDeepLiftShap
from captum.attr._core.layer.layer_feature_ablation import LayerFeatureAblation
from captum.attr._core.layer.layer_gradient_shap import LayerGradientShap
from captum.attr._core.layer.layer_gradient_x_activation import LayerGradientXActivation
from captum.attr._core.layer.layer_integrated_gradients import LayerIntegratedGradients
from captum.attr._core.layer.layer_lrp import LayerLRP
from captum.attr._core.lime import Lime
from captum.attr._core.lrp import LRP
from captum.attr._core.neuron.neuron_conductance import NeuronConductance
from captum.attr._core.neuron.neuron_deep_lift import NeuronDeepLift, NeuronDeepLiftShap
from captum.attr._core.neuron.neuron_feature_ablation import NeuronFeatureAblation
from captum.attr._core.neuron.neuron_gradient import NeuronGradient
from captum.attr._core.neuron.neuron_gradient_shap import NeuronGradientShap
from captum.attr._core.neuron.neuron_guided_backprop_deconvnet import (
NeuronDeconvolution,
NeuronGuidedBackprop,
)
from captum.attr._core.neuron.neuron_integrated_gradients import (
NeuronIntegratedGradients,
)
from captum.attr._core.occlusion import Occlusion
from captum.attr._core.saliency import Saliency
from captum.attr._core.shapley_value import ShapleyValueSampling
from captum.attr._utils.input_layer_wrapper import ModelInputWrapper
from tests.helpers.basic import set_all_random_seeds
from tests.helpers.basic_models import (
BasicModel_ConvNet,
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
BasicModel_MultiLayer_TrueMultiInput,
ReLULinearModel,
)
"""
This file defines a test configuration for attribution methods, particularly
defining valid input parameters for attribution methods. These test cases are
utilized for DataParallel tests, JIT tests, and target tests. Generally, these
tests follow a consistent structure of running the identified algorithm(s) in
two different way, e.g. with a DataParallel or JIT wrapped model versus a standard
model and verifying that the results match. New tests for additional model variants
or features can be built using this config.
The current schema for each test cases (each element in the list config) includes
the following information:
* "name": String defining name for test config
* "algorithms": List of algorithms (Attribution classes) which are applicable for
the given test case
* "model": nn.Module model for given test
* "attribute_args": Arguments to be passed to attribute call of algorithm
* "layer": nn.Module corresponding to layer for Layer or Neuron attribution
* "noise_tunnel": True or False, based on whether to apply NoiseTunnel to the algorithm.
If True, "attribute_args" corresponds to arguments for NoiseTunnel.attribute.
* "baseline_distr": True or False based on whether baselines in "attribute_args" are
provided as a distribution or per-example.
* "target_delta": Delta for comparison in test_targets
* "dp_delta": Delta for comparison in test_data_parallel
To add tests for a new algorithm, simply add the algorithm to any existing test
case with applicable parameters by adding the algorithm to the corresponding
algorithms list. If the algorithm has particular arguments not covered by existing
test cases, add a new test case following the config schema described above. For
targets tests, ensure that the new test cases includes cases with tensor or list
targets. If the new algorithm works with JIT models, make sure to also
add the method to the whitelist in test_jit.
To create new tests for all methods, follow the same structure as test_jit,
test_targets, or test_data_parallel. Each of these iterates through the test
config and creates relevant test cases based on the config.
"""
# Set random seeds to ensure deterministic behavior
set_all_random_seeds(1234)
config = [
# Attribution Method Configs
# Primary Methods (Generic Configs)
{
"name": "basic_single_target",
"algorithms": [
IntegratedGradients,
InputXGradient,
FeatureAblation,
DeepLift,
Saliency,
GuidedBackprop,
Deconvolution,
ShapleyValueSampling,
FeaturePermutation,
Lime,
KernelShap,
LRP,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {"inputs": torch.randn(4, 3), "target": 1},
},
{
"name": "basic_multi_input",
"algorithms": [
IntegratedGradients,
InputXGradient,
FeatureAblation,
DeepLift,
Saliency,
GuidedBackprop,
Deconvolution,
ShapleyValueSampling,
FeaturePermutation,
Lime,
KernelShap,
LRP,
],
"model": BasicModel_MultiLayer_MultiInput(),
"attribute_args": {
"inputs": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"additional_forward_args": (2 * torch.randn(12, 3), 5),
"target": 0,
},
"dp_delta": 0.001,
},
{
"name": "basic_multi_target",
"algorithms": [
IntegratedGradients,
InputXGradient,
FeatureAblation,
DeepLift,
Saliency,
GuidedBackprop,
Deconvolution,
ShapleyValueSampling,
FeaturePermutation,
Lime,
KernelShap,
LRP,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {"inputs": torch.randn(4, 3), "target": [0, 1, 1, 0]},
},
{
"name": "basic_multi_input_multi_target",
"algorithms": [
IntegratedGradients,
InputXGradient,
FeatureAblation,
DeepLift,
Saliency,
GuidedBackprop,
Deconvolution,
ShapleyValueSampling,
FeaturePermutation,
Lime,
KernelShap,
LRP,
],
"model": BasicModel_MultiLayer_MultiInput(),
"attribute_args": {
"inputs": (10 * torch.randn(6, 3), 5 * torch.randn(6, 3)),
"additional_forward_args": (2 * torch.randn(6, 3), 5),
"target": [0, 1, 1, 0, 0, 1],
},
"dp_delta": 0.0005,
},
{
"name": "basic_multiple_tuple_target",
"algorithms": [
IntegratedGradients,
Saliency,
InputXGradient,
FeatureAblation,
DeepLift,
GuidedBackprop,
Deconvolution,
ShapleyValueSampling,
FeaturePermutation,
Lime,
KernelShap,
LRP,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
},
},
{
"name": "basic_tensor_single_target",
"algorithms": [
IntegratedGradients,
Saliency,
InputXGradient,
FeatureAblation,
DeepLift,
GuidedBackprop,
Deconvolution,
ShapleyValueSampling,
FeaturePermutation,
Lime,
KernelShap,
LRP,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {"inputs": torch.randn(4, 3), "target": torch.tensor([0])},
},
{
"name": "basic_tensor_multi_target",
"algorithms": [
IntegratedGradients,
Saliency,
InputXGradient,
FeatureAblation,
DeepLift,
GuidedBackprop,
Deconvolution,
ShapleyValueSampling,
FeaturePermutation,
Lime,
KernelShap,
LRP,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": torch.tensor([1, 1, 0, 0]),
},
},
# Primary Configs with Baselines
{
"name": "basic_multiple_tuple_target_with_baselines",
"algorithms": [
IntegratedGradients,
FeatureAblation,
DeepLift,
ShapleyValueSampling,
Lime,
KernelShap,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
},
},
{
"name": "basic_tensor_single_target_with_baselines",
"algorithms": [
IntegratedGradients,
FeatureAblation,
DeepLift,
ShapleyValueSampling,
Lime,
KernelShap,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(4, 3),
"target": torch.tensor([0]),
},
},
# Primary Configs with Internal Batching
{
"name": "basic_multiple_tuple_target_with_internal_batching",
"algorithms": [IntegratedGradients],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
"internal_batch_size": 2,
},
},
# NoiseTunnel
{
"name": "basic_multi_input_multi_target_nt",
"algorithms": [
IntegratedGradients,
InputXGradient,
FeatureAblation,
DeepLift,
Saliency,
GuidedBackprop,
Deconvolution,
LRP,
],
"model": BasicModel_MultiLayer_MultiInput(),
"attribute_args": {
"inputs": (10 * torch.randn(6, 3), 5 * torch.randn(6, 3)),
"additional_forward_args": (2 * torch.randn(6, 3), 5),
"target": [0, 1, 1, 0, 0, 1],
"nt_samples": 20,
"stdevs": 0.0,
},
"noise_tunnel": True,
"dp_delta": 0.01,
},
{
"name": "basic_multiple_target_with_baseline_nt",
"algorithms": [
IntegratedGradients,
Saliency,
InputXGradient,
FeatureAblation,
DeepLift,
GuidedBackprop,
Deconvolution,
LRP,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [0, 1, 1, 0],
"nt_samples": 20,
"stdevs": 0.0,
},
"noise_tunnel": True,
},
{
"name": "basic_multiple_tuple_target_nt",
"algorithms": [
IntegratedGradients,
Saliency,
InputXGradient,
FeatureAblation,
DeepLift,
GuidedBackprop,
Deconvolution,
LRP,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
"nt_samples": 20,
"stdevs": 0.0,
},
"noise_tunnel": True,
},
{
"name": "basic_single_tensor_target_nt",
"algorithms": [
IntegratedGradients,
Saliency,
InputXGradient,
FeatureAblation,
DeepLift,
GuidedBackprop,
Deconvolution,
LRP,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": torch.tensor([0]),
"nt_samples": 20,
"stdevs": 0.0,
},
"noise_tunnel": True,
},
{
"name": "basic_multi_tensor_target_nt",
"algorithms": [
IntegratedGradients,
Saliency,
InputXGradient,
FeatureAblation,
DeepLift,
GuidedBackprop,
Deconvolution,
LRP,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": torch.tensor([0, 1, 1, 0]),
"nt_samples": 20,
"stdevs": 0.0,
},
"noise_tunnel": True,
},
{
"name": "basic_multi_tensor_target_batched_nt",
"algorithms": [
IntegratedGradients,
Saliency,
InputXGradient,
FeatureAblation,
DeepLift,
GuidedBackprop,
Deconvolution,
LRP,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": torch.tensor([0, 1, 1, 0]),
"nt_samples": 20,
"nt_samples_batch_size": 2,
"stdevs": 0.0,
},
"noise_tunnel": True,
},
# DeepLift SHAP
{
"name": "basic_dl_shap",
"algorithms": [DeepLiftShap],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(6, 3),
"target": 0,
},
"baseline_distr": True,
},
{
"name": "basic_multi_input_dl_shap",
"algorithms": [DeepLiftShap],
"model": BasicModel_MultiLayer_MultiInput(),
"attribute_args": {
"inputs": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"baselines": (torch.randn(4, 3), torch.randn(4, 3)),
"additional_forward_args": (2 * torch.randn(12, 3), 5),
"target": 0,
},
"baseline_distr": True,
},
{
"name": "basic_multiple_target_dl_shap",
"algorithms": [DeepLiftShap],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(4, 3),
"target": [0, 1, 1, 0],
},
"baseline_distr": True,
},
{
"name": "basic_multiple_tuple_target_dl_shap",
"algorithms": [DeepLiftShap],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
},
"baseline_distr": True,
},
{
"name": "basic_single_tensor_targe_dl_shap",
"algorithms": [DeepLiftShap],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(4, 3),
"target": torch.tensor([0]),
},
"baseline_distr": True,
},
{
"name": "basic_multi_tensor_target_dl_shap",
"algorithms": [DeepLiftShap],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(4, 3),
"target": torch.tensor([0, 1, 1, 0]),
},
"baseline_distr": True,
},
# Gradient SHAP
{
"name": "basic_multi_inp_with_single_baseline_grad_shap",
"algorithms": [GradientShap],
"model": BasicModel_MultiLayer_MultiInput(),
"attribute_args": {
"inputs": (torch.randn(6, 3), torch.randn(6, 3)),
"baselines": (torch.randn(1, 3), torch.randn(1, 3)),
"additional_forward_args": (torch.randn(6, 3), 5),
"target": [0, 1, 1, 0, 0, 1],
"stdevs": 0.0,
"n_samples": 2000,
},
"target_delta": 1.0,
"dp_delta": 0.005,
"baseline_distr": True,
},
{
"name": "basic_multiple_target_with_single_baseline_grad_shap",
"algorithms": [GradientShap],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(1, 3),
"target": [0, 1, 1, 0],
"n_samples": 800,
"stdevs": 0.0,
},
"target_delta": 0.6,
"baseline_distr": True,
},
{
"name": "basic_multiple_tuple_target_with_single_baseline_grad_shap",
"algorithms": [GradientShap],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.15 * torch.randn(1, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
"n_samples": 2000,
"stdevs": 0.0,
},
"target_delta": 0.6,
"dp_delta": 0.003,
"baseline_distr": True,
},
{
"name": "basic_single_tensor_target_with_single_baseline_grad_shap",
"algorithms": [GradientShap],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(1, 3),
"target": torch.tensor([0]),
"n_samples": 500,
"stdevs": 0.0,
},
"target_delta": 0.6,
"baseline_distr": True,
},
{
"name": "basic_multi_tensor_target_with_single_baseline_grad_shap",
"algorithms": [GradientShap],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(1, 3),
"target": torch.tensor([0, 1, 1, 0]),
"n_samples": 500,
"stdevs": 0.0,
},
"target_delta": 0.6,
"baseline_distr": True,
},
# Perturbation-Specific Configs
{
"name": "conv_with_perturbations_per_eval",
"algorithms": [
FeatureAblation,
ShapleyValueSampling,
FeaturePermutation,
Lime,
KernelShap,
],
"model": BasicModel_ConvNet(),
"attribute_args": {
"inputs": torch.arange(400).view(4, 1, 10, 10).float(),
"target": 0,
"perturbations_per_eval": 20,
},
"dp_delta": 0.008,
},
{
"name": "basic_multiple_tuple_target_with_perturbations_per_eval",
"algorithms": [
FeatureAblation,
ShapleyValueSampling,
FeaturePermutation,
Lime,
KernelShap,
],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
"perturbations_per_eval": 2,
},
},
{
"name": "conv_occlusion_with_perturbations_per_eval",
"algorithms": [Occlusion],
"model": BasicModel_ConvNet(),
"attribute_args": {
"inputs": torch.arange(400).view(4, 1, 10, 10).float(),
"perturbations_per_eval": 8,
"sliding_window_shapes": (1, 4, 2),
"target": 0,
},
},
{
"name": "basic_multi_input_with_perturbations_per_eval_occlusion",
"algorithms": [Occlusion],
"model": ReLULinearModel(),
"attribute_args": {
"inputs": (torch.randn(4, 3), torch.randn(4, 3)),
"perturbations_per_eval": 2,
"sliding_window_shapes": ((2,), (1,)),
},
},
{
"name": "basic_multiple_tuple_target_occlusion",
"algorithms": [Occlusion],
"model": BasicModel_MultiLayer(),
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
"sliding_window_shapes": (2,),
},
},
# Layer Attribution Method Configs
{
"name": "conv_layer_single_target",
"algorithms": [
LayerConductance,
LayerIntegratedGradients,
LayerDeepLift,
InternalInfluence,
LayerFeatureAblation,
LayerGradientXActivation,
LayerGradCam,
GuidedGradCam,
],
"model": BasicModel_ConvNet(),
"layer": "conv2",
"attribute_args": {"inputs": 100 * torch.randn(4, 1, 10, 10), "target": 1},
},
{
"name": "basic_layer_in_place",
"algorithms": [
LayerConductance,
LayerIntegratedGradients,
LayerDeepLift,
InternalInfluence,
LayerFeatureAblation,
LayerGradientXActivation,
LayerGradCam,
],
"model": BasicModel_MultiLayer(inplace=True),
"layer": "relu",
"attribute_args": {"inputs": torch.randn(4, 3), "target": 0},
},
{
"name": "basic_layer_multi_output",
"algorithms": [
LayerConductance,
LayerIntegratedGradients,
LayerDeepLift,
InternalInfluence,
LayerFeatureAblation,
LayerGradientXActivation,
LayerGradCam,
],
"model": BasicModel_MultiLayer(multi_input_module=True),
"layer": "multi_relu",
"attribute_args": {"inputs": torch.randn(4, 3), "target": 0},
},
{
"name": "basic_layer_multi_input",
"algorithms": [
LayerConductance,
LayerIntegratedGradients,
LayerDeepLift,
InternalInfluence,
LayerFeatureAblation,
LayerGradientXActivation,
LayerGradCam,
],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"additional_forward_args": (2 * torch.randn(12, 3), 5),
"target": 0,
},
},
{
"name": "basic_layer_multiple_target",
"algorithms": [
LayerConductance,
LayerIntegratedGradients,
LayerDeepLift,
InternalInfluence,
LayerFeatureAblation,
LayerGradientXActivation,
LayerGradCam,
],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {"inputs": torch.randn(4, 3), "target": [0, 1, 1, 0]},
},
{
"name": "basic_layer_tensor_multiple_target",
"algorithms": [
LayerConductance,
LayerIntegratedGradients,
LayerDeepLift,
InternalInfluence,
LayerFeatureAblation,
LayerGradientXActivation,
LayerGradCam,
],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": torch.tensor([0, 1, 1, 0]),
},
},
{
"name": "basic_layer_multiple_tuple_target",
"algorithms": [
LayerConductance,
LayerIntegratedGradients,
LayerDeepLift,
InternalInfluence,
LayerFeatureAblation,
LayerGradientXActivation,
LayerGradCam,
],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
},
},
{
"name": "basic_layer_multiple_tuple_target_with_internal_batching",
"algorithms": [LayerConductance, InternalInfluence, LayerIntegratedGradients],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
"internal_batch_size": 2,
},
},
{
"name": "basic_layer_multi_input_with_internal_batching",
"algorithms": [LayerConductance, InternalInfluence, LayerIntegratedGradients],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"additional_forward_args": (2 * torch.randn(12, 3), 5),
"target": 0,
"internal_batch_size": 2,
},
},
{
"name": "basic_layer_multi_output_with_internal_batching",
"algorithms": [LayerConductance, InternalInfluence, LayerIntegratedGradients],
"model": BasicModel_MultiLayer(multi_input_module=True),
"layer": "multi_relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": 0,
"internal_batch_size": 2,
},
},
# Layer Perturbation
{
"name": "basic_layer_multi_input_with_perturbations_per_eval",
"algorithms": [LayerFeatureAblation],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"additional_forward_args": (2 * torch.randn(12, 3), 5),
"target": 0,
"perturbations_per_eval": 2,
},
},
{
"name": "basic_layer_multi_output_perturbations_per_eval",
"algorithms": [LayerFeatureAblation],
"model": BasicModel_MultiLayer(multi_input_module=True),
"layer": "multi_relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": 0,
"perturbations_per_eval": 2,
},
},
{
"name": "conv_layer_with_perturbations_per_eval",
"algorithms": [LayerFeatureAblation],
"model": BasicModel_ConvNet(),
"layer": "conv2",
"attribute_args": {
"inputs": 100 * torch.randn(4, 1, 10, 10),
"target": 1,
"perturbations_per_eval": 20,
},
},
# Layer DeepLiftSHAP
{
"name": "relu_layer_multi_inp_dl_shap",
"algorithms": [LayerDeepLiftShap],
"model": ReLULinearModel(),
"layer": "l3",
"attribute_args": {
"inputs": (10 * torch.randn(6, 3), 5 * torch.randn(6, 3)),
"baselines": (2 * torch.randn(2, 3), 6 * torch.randn(2, 3)),
},
"baseline_distr": True,
},
{
"name": "basic_layer_multi_output_dl_shap",
"algorithms": [LayerDeepLiftShap],
"model": BasicModel_MultiLayer(multi_input_module=True),
"layer": "multi_relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": torch.randn(2, 3),
"target": 0,
},
"baseline_distr": True,
},
{
"name": "basic_layer_multi_inp_multi_target_dl_shap",
"algorithms": [LayerDeepLiftShap],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(6, 3), 5 * torch.randn(6, 3)),
"baselines": (2 * torch.randn(11, 3), 6 * torch.randn(11, 3)),
"additional_forward_args": (2 * torch.randn(6, 3), 5),
"target": [0, 1, 1, 0, 0, 1],
},
"baseline_distr": True,
},
{
"name": "basic_layer_multiple_target_dl_shap",
"algorithms": [LayerDeepLiftShap],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(6, 3),
"target": [0, 1, 1, 0],
},
"baseline_distr": True,
},
# Layer Gradient SHAP
{
"name": "relu_layer_multi_inp_grad_shap",
"algorithms": [LayerGradientShap],
"model": ReLULinearModel(),
"layer": "l3",
"attribute_args": {
"inputs": (10 * torch.randn(6, 3), 5 * torch.randn(6, 3)),
"baselines": (2 * torch.randn(2, 3), 6 * torch.randn(2, 3)),
},
"baseline_distr": True,
},
{
"name": "basic_layer_multi_output_grad_shap",
"algorithms": [LayerGradientShap],
"model": BasicModel_MultiLayer(multi_input_module=True),
"layer": "multi_relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": torch.randn(2, 3),
"target": 0,
},
"baseline_distr": True,
},
{
"name": "basic_layer_multi_inp_multi_target_grad_shap",
"algorithms": [LayerGradientShap],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (torch.randn(6, 3), torch.randn(6, 3)),
"baselines": (torch.randn(2, 3), torch.randn(2, 3)),
"additional_forward_args": (2 * torch.randn(6, 3), 5),
"target": [0, 1, 1, 0, 0, 1],
"n_samples": 1000,
},
"baseline_distr": True,
"target_delta": 0.6,
},
# Neuron Attribution Method Configs
{
"name": "basic_neuron",
"algorithms": [
NeuronGradient,
NeuronIntegratedGradients,
NeuronGuidedBackprop,
NeuronDeconvolution,
NeuronDeepLift,
NeuronFeatureAblation,
],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {"inputs": torch.randn(4, 3), "neuron_selector": 3},
},
{
"name": "conv_neuron",
"algorithms": [
NeuronGradient,
NeuronIntegratedGradients,
NeuronGuidedBackprop,
NeuronDeconvolution,
NeuronDeepLift,
NeuronFeatureAblation,
],
"model": BasicModel_ConvNet(),
"layer": "conv2",
"attribute_args": {
"inputs": 100 * torch.randn(4, 1, 10, 10),
"neuron_selector": (0, 1, 0),
},
},
{
"name": "basic_neuron_multi_input",
"algorithms": [
NeuronGradient,
NeuronIntegratedGradients,
NeuronGuidedBackprop,
NeuronDeconvolution,
NeuronDeepLift,
NeuronFeatureAblation,
],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"additional_forward_args": (2 * torch.randn(12, 3), 5),
"neuron_selector": (3,),
},
},
# Neuron Conductance (with target)
{
"name": "basic_neuron_single_target",
"algorithms": [NeuronConductance],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": 1,
"neuron_selector": 3,
},
},
{
"name": "basic_neuron_multiple_target",
"algorithms": [NeuronConductance],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [0, 1, 1, 0],
"neuron_selector": 3,
},
},
{
"name": "conv_neuron_single_target",
"algorithms": [NeuronConductance],
"model": BasicModel_ConvNet(),
"layer": "conv2",
"attribute_args": {
"inputs": 100 * torch.randn(4, 1, 10, 10),
"target": 1,
"neuron_selector": (0, 1, 0),
},
},
{
"name": "basic_neuron_multi_input_multi_target",
"algorithms": [NeuronConductance],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(6, 3), 5 * torch.randn(6, 3)),
"additional_forward_args": (2 * torch.randn(6, 3), 5),
"target": [0, 1, 1, 0, 0, 1],
"neuron_selector": 3,
},
},
{
"name": "basic_neuron_tensor_multiple_target",
"algorithms": [NeuronConductance],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": torch.tensor([0, 1, 1, 0]),
"neuron_selector": 3,
},
},
{
"name": "basic_neuron_multiple_tuple_target",
"algorithms": [NeuronConductance],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
"neuron_selector": 3,
},
},
# Neuron Conductance with Internal Batching
{
"name": "basic_neuron_multiple_tuple_target_with_internal_batching",
"algorithms": [NeuronConductance],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
"additional_forward_args": (None, True),
"internal_batch_size": 2,
"neuron_selector": 3,
},
},
{
"name": "basic_neuron_multi_input_multi_target_with_internal_batching",
"algorithms": [NeuronConductance],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(6, 3), 5 * torch.randn(6, 3)),
"additional_forward_args": (2 * torch.randn(6, 3), 5),
"target": [0, 1, 1, 0, 0, 1],
"internal_batch_size": 2,
"neuron_selector": 3,
},
},
# Neuron Gradient SHAP
{
"name": "basic_neuron_grad_shap",
"algorithms": [NeuronGradientShap],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": torch.randn(1, 3),
"neuron_selector": 3,
},
"target_delta": 0.6,
"baseline_distr": True,
},
{
"name": "basic_neuron_multi_inp_grad_shap",
"algorithms": [NeuronGradientShap],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(6, 3), 5 * torch.randn(6, 3)),
"baselines": (10 * torch.randn(1, 3), 5 * torch.randn(1, 3)),
"additional_forward_args": (2 * torch.randn(6, 3), 5),
"neuron_selector": 3,
},
"target_delta": 0.6,
"baseline_distr": True,
},
# Neuron DeepLift SHAP
{
"name": "basic_neuron_dl_shap",
"algorithms": [NeuronDeepLiftShap],
"model": BasicModel_MultiLayer(),
"layer": "relu",
"attribute_args": {
"inputs": torch.randn(4, 3),
"baselines": 0.5 * torch.randn(6, 3),
"neuron_selector": (3,),
},
"baseline_distr": True,
},
{
"name": "basic_neuron_multi_input_dl_shap",
"algorithms": [NeuronDeepLiftShap],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"baselines": (torch.randn(4, 3), torch.randn(4, 3)),
"additional_forward_args": (2 * torch.randn(12, 3), 5),
"neuron_selector": 3,
},
"baseline_distr": True,
},
# Neuron Feature Ablation
{
"name": "conv_neuron_with_perturbations_per_eval",
"algorithms": [NeuronFeatureAblation],
"model": BasicModel_ConvNet(),
"layer": "conv2",
"attribute_args": {
"inputs": torch.arange(400).view(4, 1, 10, 10).float(),
"perturbations_per_eval": 20,
"neuron_selector": (0, 1, 0),
},
},
{
"name": "basic_neuron_multiple_input_with_baselines_and_perturbations_per_eval",
"algorithms": [NeuronFeatureAblation],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"baselines": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"additional_forward_args": (2 * torch.randn(12, 3), 5),
"neuron_selector": (3,),
"perturbations_per_eval": 2,
},
},
# Neuron Attribution with Functional Selector
{
"name": "basic_neuron_multi_input_function_selector",
"algorithms": [
NeuronGradient,
NeuronIntegratedGradients,
NeuronGuidedBackprop,
NeuronDeconvolution,
NeuronDeepLift,
NeuronFeatureAblation,
],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.relu",
"attribute_args": {
"inputs": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"additional_forward_args": (2 * torch.randn(12, 3), 5),
"neuron_selector": lambda x: torch.sum(x, 1),
},
},
# Neuron Attribution with slice Selector
{
"name": "conv_neuron_slice_selector",
"algorithms": [
NeuronGradient,
NeuronIntegratedGradients,
NeuronGuidedBackprop,
NeuronDeconvolution,
NeuronDeepLift,
NeuronFeatureAblation,
],
"model": BasicModel_ConvNet(),
"layer": "conv2",
"attribute_args": {
"inputs": 100 * torch.randn(4, 1, 10, 10),
"neuron_selector": (slice(0, 2, 1), 1, slice(0, 2, 1)),
},
},
# Layer Attribution with Multiple Layers
{
"name": "basic_activation_multi_layer_multi_output",
"algorithms": [LayerActivation],
"model": BasicModel_MultiLayer(multi_input_module=True),
"layer": ["multi_relu", "linear1", "linear0"],
"attribute_args": {"inputs": torch.randn(4, 3)},
},
{
"name": "basic_gradient_multi_layer_multi_output",
"algorithms": [LayerGradientXActivation],
"model": BasicModel_MultiLayer(multi_input_module=True),
"layer": ["multi_relu", "linear1", "linear0"],
"attribute_args": {"inputs": torch.randn(4, 3), "target": 0},
},
{
"name": "basic_layer_ig_multi_layer_multi_output",
"algorithms": [LayerIntegratedGradients],
"model": BasicModel_MultiLayer_TrueMultiInput(),
"layer": ["m1", "m234"],
"attribute_args": {
"inputs": (
torch.randn(5, 3),
torch.randn(5, 3),
torch.randn(5, 3),
torch.randn(5, 3),
),
"target": 0,
},
},
{
"name": "basic_layer_ig_multi_layer_multi_output_with_input_wrapper",
"algorithms": [LayerIntegratedGradients],
"model": ModelInputWrapper(BasicModel_MultiLayer_TrueMultiInput()),
"layer": ["module.m1", "module.m234"],
"attribute_args": {
"inputs": (
torch.randn(5, 3),
torch.randn(5, 3),
torch.randn(5, 3),
torch.randn(5, 3),
),
"target": 0,
},
},
# Layer LRP
{
"name": "basic_layer_lrp",
"algorithms": [
LayerLRP,
],
"model": BasicModel_MultiLayer(),
"layer": "linear2",
"attribute_args": {"inputs": torch.randn(4, 3), "target": 0},
},
{
"name": "basic_layer_lrp_multi_input",
"algorithms": [
LayerLRP,
],
"model": BasicModel_MultiLayer_MultiInput(),
"layer": "model.linear1",
"attribute_args": {
"inputs": (10 * torch.randn(12, 3), 5 * torch.randn(12, 3)),
"additional_forward_args": (2 * torch.randn(12, 3), 5),
"target": 0,
},
"dp_delta": 0.0002,
},
]
|
#!/usr/bin/env python3
import typing
from typing import Any, cast, Dict, List, Tuple, Type, Union
from captum.attr._core.lime import Lime
from captum.attr._models.base import _get_deep_layer_name
from captum.attr._utils.attribution import Attribution
from torch.nn import Module
def gen_test_name(
prefix: str, test_name: str, algorithm: Type[Attribution], noise_tunnel: bool
) -> str:
# Generates test name for dynamically generated tests
return (
prefix
+ "_"
+ test_name
+ "_"
+ algorithm.__name__
+ ("NoiseTunnel" if noise_tunnel else "")
)
def parse_test_config(
test_config: Dict,
) -> Tuple[List[Type[Attribution]], Module, Dict[str, Any], Module, bool, bool]:
algorithms = cast(List[Type[Attribution]], test_config["algorithms"])
model = test_config["model"]
args = cast(Dict[str, Any], test_config["attribute_args"])
layer = test_config["layer"] if "layer" in test_config else None
noise_tunnel = (
test_config["noise_tunnel"] if "noise_tunnel" in test_config else False
)
baseline_distr = (
test_config["baseline_distr"] if "baseline_distr" in test_config else False
)
return algorithms, model, args, layer, noise_tunnel, baseline_distr
def should_create_generated_test(algorithm: Type[Attribution]) -> bool:
if issubclass(algorithm, Lime):
try:
import sklearn # noqa: F401
assert (
sklearn.__version__ >= "0.23.0"
), "Must have sklearn version 0.23.0 or higher to use "
"sample_weight in Lasso regression."
return True
except (ImportError, AssertionError):
return False
return True
@typing.overload
def get_target_layer(model: Module, layer_name: str) -> Module:
...
@typing.overload
def get_target_layer(model: Module, layer_name: List[str]) -> List[Module]:
...
def get_target_layer(
model: Module, layer_name: Union[str, List[str]]
) -> Union[Module, List[Module]]:
if isinstance(layer_name, str):
return _get_deep_layer_name(model, layer_name)
else:
return [
_get_deep_layer_name(model, single_layer_name)
for single_layer_name in layer_name
]
|
#!/usr/bin/env python3
import numpy as np
import torch
from captum._utils.gradient import (
apply_gradient_requirements,
undo_gradient_requirements,
)
from captum.attr._utils.approximation_methods import approximation_parameters
from captum.attr._utils.attribution import LayerAttribution
from captum.attr._utils.common import _reshape_and_sum
"""
Note: This implementation of conductance follows the procedure described in the original
paper exactly (https://arxiv.org/abs/1805.12233), computing gradients of output with
respect to hidden neurons and each hidden neuron with respect to the input and summing
appropriately. Computing the gradient of each neuron with respect to the input is
not necessary to just compute the conductance of a given layer, so the main
implementationof conductance does not use this approach in order to compute layer
conductance more efficiently (https://arxiv.org/pdf/1807.09946.pdf).
This implementation is used only for testing to verify that the output matches
that of the main implementation.
"""
class ConductanceReference(LayerAttribution):
def __init__(self, forward_func, layer) -> None:
r"""
Args
forward_func: The forward function of the model or any modification of it
layer: Layer for which output attributions are computed.
Output size of attribute matches that of layer output.
"""
super().__init__(forward_func, layer)
def _conductance_grads(self, forward_fn, input, target_ind=None):
with torch.autograd.set_grad_enabled(True):
# Set a forward hook on specified module and run forward pass to
# get output tensor size.
saved_tensor = None
def forward_hook(module, inp, out):
nonlocal saved_tensor
saved_tensor = out
hook = self.layer.register_forward_hook(forward_hook)
output = forward_fn(input)
# Compute layer output tensor dimensions and total number of units.
# The hidden layer tensor is assumed to have dimension (num_hidden, ...)
# where the product of the dimensions >= 1 correspond to the total
# number of hidden neurons in the layer.
layer_size = tuple(saved_tensor.size())[1:]
layer_units = int(np.prod(layer_size))
# Remove unnecessary forward hook.
hook.remove()
# Backward hook function to override gradients in order to obtain
# just the gradient of each hidden unit with respect to input.
saved_grads = None
def backward_hook(grads):
nonlocal saved_grads
saved_grads = grads
zero_mat = torch.zeros((1,) + layer_size)
scatter_indices = torch.arange(0, layer_units).view_as(zero_mat)
# Creates matrix with each layer containing a single unit with
# value 1 and remaining zeros, which will provide gradients
# with respect to each unit independently.
to_return = torch.zeros((layer_units,) + layer_size).scatter(
0, scatter_indices, 1
)
to_repeat = [1] * len(to_return.shape)
to_repeat[0] = grads.shape[0] // to_return.shape[0]
expanded = to_return.repeat(to_repeat)
return expanded
# Create a forward hook in order to attach backward hook to appropriate
# tensor. Save backward hook in order to remove hook appropriately.
back_hook = None
def forward_hook_register_back(module, inp, out):
nonlocal back_hook
back_hook = out.register_hook(backward_hook)
hook = self.layer.register_forward_hook(forward_hook_register_back)
# Expand input to include layer_units copies of each input.
# This allows obtaining gradient with respect to each hidden unit
# in one pass.
expanded_input = torch.repeat_interleave(input, layer_units, dim=0)
output = forward_fn(expanded_input)
hook.remove()
output = output[:, target_ind] if target_ind is not None else output
input_grads = torch.autograd.grad(torch.unbind(output), expanded_input)
# Remove backwards hook
back_hook.remove()
# Remove duplicates in gradient with respect to hidden layer,
# choose one for each layer_units indices.
output_mid_grads = torch.index_select(
saved_grads,
0,
torch.tensor(range(0, input_grads[0].shape[0], layer_units)),
)
return input_grads[0], output_mid_grads, layer_units
def attribute(
self,
inputs,
baselines=None,
target=None,
n_steps=500,
method="riemann_trapezoid",
):
r"""
Computes conductance using gradients along the path, applying
riemann's method or gauss-legendre.
The details of the approach can be found here:
https://arxiv.org/abs/1805.12233
Args
inputs: A single high dimensional input tensor, in which
dimension 0 corresponds to number of examples.
baselines: A single high dimensional baseline tensor,
which has the same shape as the input
target: Predicted class index. This is necessary only for
classification use cases
n_steps: The number of steps used by the approximation method
method: Method for integral approximation, one of `riemann_right`,
`riemann_middle`, `riemann_trapezoid` or `gausslegendre`
Return
attributions: Total conductance with respect to each neuron in
output of given layer
"""
if baselines is None:
baselines = 0
gradient_mask = apply_gradient_requirements((inputs,))
# retrieve step size and scaling factor for specified approximation method
step_sizes_func, alphas_func = approximation_parameters(method)
step_sizes, alphas = step_sizes_func(n_steps), alphas_func(n_steps)
# compute scaled inputs from baseline to final input.
scaled_features = torch.cat(
[baselines + alpha * (inputs - baselines) for alpha in alphas], dim=0
)
# Conductance Gradients - Returns gradient of output with respect to
# hidden layer, gradient of hidden layer with respect to input,
# and number of hidden units.
input_gradients, mid_layer_gradients, hidden_units = self._conductance_grads(
self.forward_func, scaled_features, target
)
# Multiply gradient of hidden layer with respect to input by input - baseline
scaled_input_gradients = torch.repeat_interleave(
inputs - baselines, hidden_units, dim=0
)
scaled_input_gradients = input_gradients * scaled_input_gradients.repeat(
*([len(alphas)] + [1] * (len(scaled_input_gradients.shape) - 1))
)
# Sum gradients for each input neuron in order to have total
# for each hidden unit and reshape to match hidden layer shape
summed_input_grads = torch.sum(
scaled_input_gradients, tuple(range(1, len(scaled_input_gradients.shape)))
).view_as(mid_layer_gradients)
# Rescale gradients of hidden layer by by step size.
scaled_grads = mid_layer_gradients.contiguous().view(
n_steps, -1
) * torch.tensor(step_sizes).view(n_steps, 1).to(mid_layer_gradients.device)
undo_gradient_requirements((inputs,), gradient_mask)
# Element-wise mutliply gradient of output with respect to hidden layer
# and summed gradients with respect to input (chain rule) and sum across
# stepped inputs.
return _reshape_and_sum(
scaled_grads.view(mid_layer_gradients.shape) * summed_input_grads,
n_steps,
inputs.shape[0],
mid_layer_gradients.shape[1:],
)
|
#!/usr/bin/env python3import
from typing import cast, Iterable
import torch
from captum.concept._core.concept import Concept
from captum.concept._utils.data_iterator import dataset_to_dataloader
from tests.helpers.basic import BaseTest
from torch.utils.data import IterableDataset
class CustomIterableDataset(IterableDataset):
r"""
An auxiliary class for iterating through an image dataset.
"""
def __init__(self, get_tensor_from_filename_func, path) -> None:
r"""
Args:
path (str): Path to dataset files
"""
self.path = path
self.file_itr = ["x"] * 2
self.get_tensor_from_filename_func = get_tensor_from_filename_func
def get_tensor_from_filename(self, filename):
return self.get_tensor_from_filename_func(filename)
def __iter__(self):
mapped_itr = map(self.get_tensor_from_filename, self.file_itr)
return mapped_itr
class Test(BaseTest):
def test_create_concepts_from_images(self) -> None:
def get_tensor_from_filename(filename):
return torch.rand(3, 224, 224)
# Striped
concepts_path = "./dummy/concepts/striped/"
dataset = CustomIterableDataset(get_tensor_from_filename, concepts_path)
striped_iter = dataset_to_dataloader(dataset)
self.assertEqual(
len(cast(CustomIterableDataset, striped_iter.dataset).file_itr), 2
)
concept = Concept(id=0, name="striped", data_iter=striped_iter)
for data in cast(Iterable, concept.data_iter):
self.assertEqual(data.shape[1:], torch.Size([3, 224, 224]))
# Random
concepts_path = "./dummy/concepts/random/"
dataset = CustomIterableDataset(get_tensor_from_filename, concepts_path)
random_iter = dataset_to_dataloader(dataset)
self.assertEqual(
len(cast(CustomIterableDataset, random_iter.dataset).file_itr), 2
)
concept = Concept(id=1, name="random", data_iter=random_iter)
for data in cast(Iterable, concept.data_iter):
self.assertEqual(data.shape[1:], torch.Size([3, 224, 224]))
|
#!/usr/bin/env python3import
import glob
import os
import tempfile
import unittest
from collections import defaultdict, OrderedDict
from typing import (
Any,
Callable,
cast,
Dict,
Iterable,
Iterator,
List,
Set,
Tuple,
Union,
)
import torch
from captum._utils.av import AV
from captum._utils.common import _get_module_from_name
from captum.concept._core.concept import Concept
from captum.concept._core.tcav import TCAV
from captum.concept._utils.classifier import Classifier
from captum.concept._utils.common import concepts_to_str
from captum.concept._utils.data_iterator import dataset_to_dataloader
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModel_ConvNet
from torch import Tensor
from torch.utils.data import DataLoader, IterableDataset
class CustomClassifier(Classifier):
r"""
Wrapps a custom linear Classifier that is necessary for the
impementation of Concept Activation Vectors (TCAVs), as described
in the paper:
https://arxiv.org/pdf/1711.11279.pdf
This class simulates the output of a Linear Classifier such as
sklearn without actually using it.
"""
def __init__(self) -> None:
Classifier.__init__(self)
def train_and_eval(
self, dataloader: DataLoader, **kwargs: Any
) -> Union[Dict, None]:
inputs = []
labels = []
for input, label in dataloader:
inputs.append(input)
labels.append(label)
inputs = torch.cat(inputs)
labels = torch.cat(labels)
# update concept ids aka classes
self._classes = list(OrderedDict.fromkeys([label.item() for label in labels]))
# Training is skipped for performance and indepenence of sklearn reasons
_, x_test, _, y_test = train_test_split(inputs, labels)
# A tensor with dimensions n_inputs x (1 - test_split) x n_concepts
# should be returned here.
# Assemble a list with size inputs.shape[0], divided in 4 quarters
# [0, 0, 0, ... | 1, 1, 1, ... | 0, 0, 0, ... | 1, 1, 1, ... ]
pred = [0] * x_test.shape[0]
# Store the shape of 1/4 of inputs.shape[0] (sh_4) and use it
sh_4 = x_test.shape[0] / 4
for i in range(1, 4, 2):
from_ = round(i * sh_4)
to_ = round((i + 1) * sh_4)
pred[from_:to_] = [1] * (round((i + 1) * sh_4) - round(i * sh_4))
y_pred = torch.tensor(pred)
score = y_pred == y_test
accs = score.float().mean()
# A hack to mock weights for two different layer
self.num_features = input.shape[1]
return {"accs": accs}
def weights(self) -> Tensor:
if self.num_features != 16:
return torch.randn(2, self.num_features)
return torch.tensor(
[
[
-0.2167,
-0.0809,
-0.1235,
-0.2450,
0.2954,
0.5409,
-0.2587,
-0.3428,
0.2486,
-0.0123,
0.2737,
0.4876,
-0.1133,
0.1616,
-0.2016,
-0.0413,
],
[
-0.2167,
-0.0809,
-0.1235,
-0.2450,
0.2954,
0.5409,
-0.2587,
-0.3428,
0.2486,
-0.0123,
0.2737,
0.4876,
-0.1133,
0.2616,
-0.2016,
-0.0413,
],
],
dtype=torch.float64,
)
def classes(self) -> List[int]:
return self._classes
class CustomClassifier_WO_Returning_Metrics(CustomClassifier):
def __init__(self) -> None:
CustomClassifier.__init__(self)
def train_and_eval(
self, dataloader: DataLoader, **kwargs: Any
) -> Union[Dict, None]:
CustomClassifier.train_and_eval(self, dataloader)
return None
class CustomClassifier_W_Flipped_Class_Id(CustomClassifier):
def __init__(self) -> None:
CustomClassifier.__init__(self)
def weights(self) -> Tensor:
_weights = CustomClassifier.weights(self)
_weights[0], _weights[1] = _weights[1], _weights[0].clone()
return _weights
def classes(self) -> List[int]:
_classes = CustomClassifier.classes(self)
_classes[0], _classes[1] = _classes[1], _classes[0]
return _classes
class CustomIterableDataset(IterableDataset):
r"""
Auxiliary class for iterating through an image dataset.
"""
def __init__(
self, get_tensor_from_filename_func: Callable, path: str, num_samples=100
) -> None:
r"""
Args:
path (str): Path to dataset files
"""
self.path = path
self.file_itr = ["x"] * num_samples
self.get_tensor_from_filename_func = get_tensor_from_filename_func
def get_tensor_from_filename(self, filename: str) -> Tensor:
return self.get_tensor_from_filename_func(filename)
def __iter__(self) -> Iterator:
mapped_itr = map(self.get_tensor_from_filename, self.file_itr)
return mapped_itr
def train_test_split(
x_list: Tensor, y_list: Union[Tensor, List[int]], test_split: float = 0.33
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
z_list = list(zip(x_list, y_list))
# Split
test_size = int(test_split * len(z_list))
z_test, z_train = z_list[:test_size], z_list[test_size:]
x_test, y_test = zip(*z_test)
x_train, y_train = zip(*z_train)
x_train = torch.stack(x_train)
x_test = torch.stack(x_test)
y_train = torch.stack(y_train)
y_test = torch.stack(y_test)
y_train[: len(y_train) // 2] = 0
y_train[len(y_train) // 2 :] = 1
y_test[: len(y_test) // 2] = 0
y_test[len(y_test) // 2 :] = 1
return x_train, x_test, y_train, y_test
def get_tensor_from_filename(filename: str) -> Tensor:
file_tensor = (
torch.tensor(
[
[
[
0.4963,
0.7682,
0.0885,
0.1320,
0.3074,
0.6341,
0.4901,
0.8964,
0.4556,
0.6323,
],
[
0.3489,
0.4017,
0.0223,
0.1689,
0.2939,
0.5185,
0.6977,
0.8000,
0.1610,
0.2823,
],
[
0.6816,
0.9152,
0.3971,
0.8742,
0.4194,
0.5529,
0.9527,
0.0362,
0.1852,
0.3734,
],
[
0.3051,
0.9320,
0.1759,
0.2698,
0.1507,
0.0317,
0.2081,
0.9298,
0.7231,
0.7423,
],
[
0.5263,
0.2437,
0.5846,
0.0332,
0.1387,
0.2422,
0.8155,
0.7932,
0.2783,
0.4820,
],
[
0.8198,
0.9971,
0.6984,
0.5675,
0.8352,
0.2056,
0.5932,
0.1123,
0.1535,
0.2417,
],
[
0.7262,
0.7011,
0.2038,
0.6511,
0.7745,
0.4369,
0.5191,
0.6159,
0.8102,
0.9801,
],
[
0.1147,
0.3168,
0.6965,
0.9143,
0.9351,
0.9412,
0.5995,
0.0652,
0.5460,
0.1872,
],
[
0.0340,
0.9442,
0.8802,
0.0012,
0.5936,
0.4158,
0.4177,
0.2711,
0.6923,
0.2038,
],
[
0.6833,
0.7529,
0.8579,
0.6870,
0.0051,
0.1757,
0.7497,
0.6047,
0.1100,
0.2121,
],
]
]
)
* 100
)
return file_tensor
def get_inputs_tensor() -> Tensor:
input_tensor = torch.tensor(
[
[
[
[
-1.1258e00,
-1.1524e00,
-2.5058e-01,
-4.3388e-01,
8.4871e-01,
6.9201e-01,
-3.1601e-01,
-2.1152e00,
3.2227e-01,
-1.2633e00,
],
[
3.4998e-01,
3.0813e-01,
1.1984e-01,
1.2377e00,
1.1168e00,
-2.4728e-01,
-1.3527e00,
-1.6959e00,
5.6665e-01,
7.9351e-01,
],
[
5.9884e-01,
-1.5551e00,
-3.4136e-01,
1.8530e00,
7.5019e-01,
-5.8550e-01,
-1.7340e-01,
1.8348e-01,
1.3894e00,
1.5863e00,
],
[
9.4630e-01,
-8.4368e-01,
-6.1358e-01,
3.1593e-02,
-4.9268e-01,
2.4841e-01,
4.3970e-01,
1.1241e-01,
6.4079e-01,
4.4116e-01,
],
[
-1.0231e-01,
7.9244e-01,
-2.8967e-01,
5.2507e-02,
5.2286e-01,
2.3022e00,
-1.4689e00,
-1.5867e00,
-6.7309e-01,
8.7283e-01,
],
[
1.0554e00,
1.7784e-01,
-2.3034e-01,
-3.9175e-01,
5.4329e-01,
-3.9516e-01,
-4.4622e-01,
7.4402e-01,
1.5210e00,
3.4105e00,
],
[
-1.5312e00,
-1.2341e00,
1.8197e00,
-5.5153e-01,
-5.6925e-01,
9.1997e-01,
1.1108e00,
1.2899e00,
-1.4782e00,
2.5672e00,
],
[
-4.7312e-01,
3.3555e-01,
-1.6293e00,
-5.4974e-01,
-4.7983e-01,
-4.9968e-01,
-1.0670e00,
1.1149e00,
-1.4067e-01,
8.0575e-01,
],
[
-9.3348e-02,
6.8705e-01,
-8.3832e-01,
8.9182e-04,
8.4189e-01,
-4.0003e-01,
1.0395e00,
3.5815e-01,
-2.4600e-01,
2.3025e00,
],
[
-1.8817e00,
-4.9727e-02,
-1.0450e00,
-9.5650e-01,
3.3532e-02,
7.1009e-01,
1.6459e00,
-1.3602e00,
3.4457e-01,
5.1987e-01,
],
]
],
[
[
[
-2.6133e00,
-1.6965e00,
-2.2824e-01,
2.7995e-01,
2.4693e-01,
7.6887e-02,
3.3801e-01,
4.5440e-01,
4.5694e-01,
-8.6537e-01,
],
[
7.8131e-01,
-9.2679e-01,
-2.1883e-01,
-2.4351e00,
-7.2915e-02,
-3.3986e-02,
9.6252e-01,
3.4917e-01,
-9.2146e-01,
-5.6195e-02,
],
[
-6.2270e-01,
-4.6372e-01,
1.9218e00,
-4.0255e-01,
1.2390e-01,
1.1648e00,
9.2337e-01,
1.3873e00,
-8.8338e-01,
-4.1891e-01,
],
[
-8.0483e-01,
5.6561e-01,
6.1036e-01,
4.6688e-01,
1.9507e00,
-1.0631e00,
-7.7326e-02,
1.1640e-01,
-5.9399e-01,
-1.2439e00,
],
[
-1.0209e-01,
-1.0335e00,
-3.1264e-01,
2.4579e-01,
-2.5964e-01,
1.1834e-01,
2.4396e-01,
1.1646e00,
2.8858e-01,
3.8660e-01,
],
[
-2.0106e-01,
-1.1793e-01,
1.9220e-01,
-7.7216e-01,
-1.9003e00,
1.3068e-01,
-7.0429e-01,
3.1472e-01,
1.5739e-01,
3.8536e-01,
],
[
9.6715e-01,
-9.9108e-01,
3.0161e-01,
-1.0732e-01,
9.9846e-01,
-4.9871e-01,
7.6111e-01,
6.1830e-01,
3.1405e-01,
2.1333e-01,
],
[
-1.2005e-01,
3.6046e-01,
-3.1403e-01,
-1.0787e00,
2.4081e-01,
-1.3962e00,
-6.6144e-02,
-3.5836e-01,
-1.5616e00,
-3.5464e-01,
],
[
1.0811e00,
1.3148e-01,
1.5735e00,
7.8143e-01,
-5.1107e-01,
-1.7137e00,
-5.1006e-01,
-4.7489e-01,
-6.3340e-01,
-1.4677e00,
],
[
-8.7848e-01,
-2.0784e00,
-1.1005e00,
-7.2013e-01,
1.1931e-02,
3.3977e-01,
-2.6345e-01,
1.2805e00,
1.9395e-02,
-8.8080e-01,
],
]
],
],
requires_grad=True,
)
return input_tensor
def create_concept(concept_name: str, concept_id: int) -> Concept:
concepts_path = "./dummy/concepts/" + concept_name + "/"
dataset = CustomIterableDataset(get_tensor_from_filename, concepts_path)
concept_iter = dataset_to_dataloader(dataset)
concept = Concept(id=concept_id, name=concept_name, data_iter=concept_iter)
return concept
def create_concepts() -> Dict[str, Concept]:
# Function to create concept objects from a pre-set concept name list.
concept_names = ["striped", "ceo", "random", "dotted"]
concept_dict: Dict[str, Concept] = defaultdict()
for c, concept_name in enumerate(concept_names):
concept = create_concept(concept_name, c)
concept_dict[concept_name] = concept
return concept_dict
def find_concept_by_id(concepts: Set[Concept], id: int) -> Union[Concept, None]:
for concept in concepts:
if concept.id == id:
return concept
return None
def create_TCAV(
save_path: str,
classifier: Classifier,
layers: Union[str, List[str]],
attribute_to_layer_input: bool = False,
) -> TCAV:
model = BasicModel_ConvNet()
tcav = TCAV(
model,
layers,
classifier=classifier,
save_path=save_path,
attribute_to_layer_input=attribute_to_layer_input,
)
return tcav
def init_TCAV(
save_path: str,
classifier: Classifier,
layers: Union[str, List[str]],
attribute_to_layer_input: bool = False,
) -> Tuple[TCAV, Dict[str, Concept]]:
# Create Concepts
concepts_dict = create_concepts()
tcav = create_TCAV(
save_path, classifier, layers, attribute_to_layer_input=attribute_to_layer_input
)
return tcav, concepts_dict
def remove_pkls(path: str) -> None:
pkl_files = glob.glob(os.path.join(path, "*.pkl"))
for pkl_file in pkl_files:
os.remove(pkl_file)
class Test(BaseTest):
r"""
Class for testing the TCAV class through a sequence of operations:
- Create the Concepts (random tensor generation simulation)
- Create the TCAV class
- Generate Activations
- Compute the CAVs
- Interpret (the images - simulated with random tensors)
"""
def test_compute_cav_repeating_concept_ids(self) -> None:
with tempfile.TemporaryDirectory() as tmpdirname:
tcav = create_TCAV(tmpdirname, CustomClassifier(), "conv1")
experimental_sets = [
[create_concept("striped", 0), create_concept("random", 1)],
[create_concept("ceo", 2), create_concept("striped2", 0)],
]
with self.assertRaises(AssertionError):
tcav.compute_cavs(experimental_sets)
def test_compute_cav_repeating_concept_names(self) -> None:
with tempfile.TemporaryDirectory() as tmpdirname:
tcav = create_TCAV(tmpdirname, CustomClassifier(), "conv1")
experimental_sets = [
[create_concept("striped", 0), create_concept("random", 1)],
[create_concept("ceo", 2), create_concept("striped", 3)],
]
cavs = tcav.compute_cavs(experimental_sets)
self.assertTrue("0-1" in cavs.keys())
self.assertTrue("2-3" in cavs.keys())
self.assertEqual(cavs["0-1"]["conv1"].layer, "conv1")
self.assertEqual(cavs["2-3"]["conv1"].layer, "conv1")
self.assertEqual(cavs["0-1"]["conv1"].concepts[0].id, 0)
self.assertEqual(cavs["0-1"]["conv1"].concepts[0].name, "striped")
self.assertEqual(cavs["0-1"]["conv1"].concepts[1].id, 1)
self.assertEqual(cavs["0-1"]["conv1"].concepts[1].name, "random")
self.assertEqual(cavs["0-1"]["conv1"].stats["classes"], [0, 1])
self.assertAlmostEqual(
cavs["0-1"]["conv1"].stats["accs"].item(), 0.4848, delta=0.001
)
self.assertEqual(
list(cavs["0-1"]["conv1"].stats["weights"].shape), [2, 128]
)
self.assertEqual(cavs["2-3"]["conv1"].concepts[0].id, 2)
self.assertEqual(cavs["2-3"]["conv1"].concepts[0].name, "ceo")
self.assertEqual(cavs["2-3"]["conv1"].concepts[1].id, 3)
self.assertEqual(cavs["2-3"]["conv1"].concepts[1].name, "striped")
self.assertEqual(cavs["2-3"]["conv1"].stats["classes"], [2, 3])
self.assertAlmostEqual(
cavs["2-3"]["conv1"].stats["accs"].item(), 0.4848, delta=0.001
)
self.assertEqual(
list(cavs["2-3"]["conv1"].stats["weights"].shape), [2, 128]
)
def compute_cavs_interpret(
self,
experimental_sets: List[List[str]],
force_train: bool,
accs: float,
sign_count: float,
magnitude: float,
processes: int = 1,
remove_activation: bool = False,
layers: Union[str, List[str]] = "conv2",
attribute_to_layer_input: bool = False,
) -> None:
classifier = CustomClassifier()
self._compute_cavs_interpret(
experimental_sets,
force_train,
accs,
sign_count,
magnitude,
classifier,
processes=processes,
remove_activation=remove_activation,
layers=layers,
attribute_to_layer_input=attribute_to_layer_input,
)
def _compute_cavs_interpret(
self,
experimental_set_list: List[List[str]],
force_train: bool,
accs: Union[float, List[float]],
sign_count: Union[float, List[float]],
magnitude: Union[float, List[float]],
classifier: Classifier,
processes: int = 1,
remove_activation: bool = False,
layers: Union[str, List[str]] = "conv2",
attribute_to_layer_input: bool = False,
) -> None:
def wrap_in_list_if_not_already(input):
return (
input
if isinstance(input, list)
else [
input,
]
)
with tempfile.TemporaryDirectory() as tmpdirname:
tcav, concept_dict = init_TCAV(
tmpdirname,
classifier,
layers,
attribute_to_layer_input=attribute_to_layer_input,
)
experimental_sets = self._create_experimental_sets(
experimental_set_list, concept_dict
)
# Compute CAVs
tcav.compute_cavs(
experimental_sets,
force_train=force_train,
processes=processes,
)
concepts_key = concepts_to_str(experimental_sets[0])
_layers: List[str] = wrap_in_list_if_not_already(layers)
_accs: List[float] = wrap_in_list_if_not_already(accs)
_sign_counts: List[float] = wrap_in_list_if_not_already(sign_count)
_magnitudes: List[float] = wrap_in_list_if_not_already(magnitude)
for layer, acc, sign_count, magnitude in zip(
_layers, _accs, _sign_counts, _magnitudes
):
stats = cast(Dict[str, Tensor], tcav.cavs[concepts_key][layer].stats)
self.assertEqual(
stats["weights"].shape,
torch.Size([2, 16]),
)
if not isinstance(classifier, CustomClassifier_WO_Returning_Metrics):
self.assertAlmostEqual(
stats["accs"].item(),
acc,
delta=0.0001,
)
# Provoking a CAV absence by deleting the .pkl files and one
# activation
if remove_activation:
remove_pkls(tmpdirname)
for fl in glob.glob(tmpdirname + "/av/" + layer + "/random-*-*"):
os.remove(fl)
# Interpret
inputs = 100 * get_inputs_tensor()
scores = tcav.interpret(
inputs=inputs,
experimental_sets=experimental_sets,
target=0,
processes=processes,
)
self.assertAlmostEqual(
cast(float, scores[concepts_key][layer]["sign_count"][0].item()),
sign_count,
delta=0.0001,
)
self.assertAlmostEqual(
cast(float, scores[concepts_key][layer]["magnitude"][0].item()),
magnitude,
delta=0.0001,
)
def _create_experimental_sets(
self, experimental_set_list: List[List[str]], concept_dict: Dict[str, Concept]
) -> List[List[Concept]]:
experimental_sets = []
for concept_set in experimental_set_list:
concepts = []
for concept in concept_set:
self.assertTrue(concept in concept_dict)
concepts.append(concept_dict[concept])
experimental_sets.append(concepts)
return experimental_sets
# Init - Generate Activations
def test_TCAV_1(self) -> None:
# Create Concepts
concepts_dict = create_concepts()
for concept in concepts_dict.values():
self.assertTrue(concept.data_iter is not None)
data_iter = cast(DataLoader, concept.data_iter)
self.assertEqual(
len(cast(CustomIterableDataset, data_iter.dataset).file_itr), 100
)
self.assertTrue(concept.data_iter is not None)
total_batches = 0
for data in cast(Iterable, concept.data_iter):
total_batches += data.shape[0]
self.assertEqual(data.shape[1:], torch.Size([1, 10, 10]))
self.assertEqual(total_batches, 100)
def test_TCAV_generate_all_activations(self) -> None:
def forward_hook_wrapper(expected_act: Tensor):
def forward_hook(module, inp, out=None):
out = torch.reshape(out, (out.shape[0], -1))
self.assertEqual(out.detach().shape[1:], expected_act.shape[1:])
return forward_hook
with tempfile.TemporaryDirectory() as tmpdirname:
layers = ["conv1", "conv2", "fc1", "fc2"]
tcav, concept_dict = init_TCAV(
tmpdirname, CustomClassifier(), layers=layers
)
tcav.concepts = set(concept_dict.values())
# generating all activations for given layers and concepts
tcav.generate_all_activations()
# verify that all activations exist and have correct shapes
for layer in layers:
for _, concept in concept_dict.items():
self.assertTrue(
AV.exists(
tmpdirname, "default_model_id", concept.identifier, layer
)
)
concept_meta: Dict[int, int] = defaultdict(int)
for _, concept in concept_dict.items():
activations = AV.load(
tmpdirname, "default_model_id", concept.identifier, layer
)
def batch_collate(batch):
return torch.cat(batch)
self.assertTrue(concept.data_iter is not None)
assert not (activations is None)
for activation in cast(
Iterable, DataLoader(activations, collate_fn=batch_collate)
):
concept_meta[concept.id] += activation.shape[0]
layer_module = _get_module_from_name(tcav.model, layer)
for data in cast(Iterable, concept.data_iter):
hook = layer_module.register_forward_hook(
forward_hook_wrapper(activation)
)
tcav.model(data)
hook.remove()
# asserting the length of entire dataset for each concept
for concept_meta_i in concept_meta.values():
self.assertEqual(concept_meta_i, 100)
def test_TCAV_multi_layer(self) -> None:
concepts = [["striped", "random"], ["ceo", "random"]]
layers = ["conv1", "conv2"]
classifier = CustomClassifier()
with tempfile.TemporaryDirectory() as tmpdirname:
tcav, concept_dict = init_TCAV(tmpdirname, classifier, layers)
experimental_sets = self._create_experimental_sets(concepts, concept_dict)
# Interpret
inputs = 100 * get_inputs_tensor()
scores = tcav.interpret(
inputs=inputs,
experimental_sets=experimental_sets,
target=0,
processes=3,
)
self.assertEqual(len(scores.keys()), len(experimental_sets))
for _, tcavs in scores.items():
for _, tcav_i in tcavs.items():
self.assertEqual(tcav_i["sign_count"].shape[0], 2)
self.assertEqual(tcav_i["magnitude"].shape[0], 2)
# Force Train
def test_TCAV_1_1_a(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
True,
0.4848,
0.5000,
8.185208066890937e-09,
processes=5,
)
def test_TCAV_1_1_a_wo_acc_metric(self) -> None:
self._compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
True,
-1.0, # acc is not defined, this field will not be asserted
0.5000,
8.185208066890937e-09,
CustomClassifier_WO_Returning_Metrics(),
processes=2,
)
def test_TCAV_1_1_b(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"]], True, 0.4848, 0.5000, 8.185208066890937e-09
)
def test_TCAV_1_1_c(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"], ["striped", "ceo"]],
True,
0.4848,
0.5000,
8.185208066890937e-09,
processes=6,
)
# Non-existing concept in the experimental set ("dotted")
def test_TCAV_1_1_d(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["dotted", "random"]],
True,
0.4848,
0.5000,
8.185208066890937e-09,
processes=4,
)
# Force Train
def test_TCAV_0_1(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
True,
0.4848,
0.5000,
8.185208066890937e-09,
processes=2,
)
def test_TCAV_0_1_attr_to_inputs(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
True,
0.4848,
0.5000,
8.185208066890937e-09,
processes=2,
layers="relu2",
attribute_to_layer_input=True,
)
# Do not Force Train
def test_TCAV_0_0(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
False,
0.4848,
0.5000,
8.185208066890937e-09,
processes=2,
)
# Non-existing concept in the experimental set ("dotted"), do Not Force Train
def test_TCAV_1_0_b(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["dotted", "random"]],
False,
0.4848,
0.5000,
8.185208066890937e-09,
processes=5,
)
# Do not Force Train, Missing Activation
def test_TCAV_1_0_1(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
False,
0.4848,
0.5000,
8.185208066890937e-09,
processes=5,
remove_activation=True,
)
# Do not run parallel:
# Force Train
def test_TCAV_x_1_1_a(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
True,
0.4848,
0.5000,
8.185208066890937e-09,
processes=1,
)
def test_TCAV_x_1_1_b(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"]],
True,
0.4848,
0.5000,
8.185208066890937e-09,
processes=1,
)
def test_TCAV_x_1_1_c(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"], ["striped", "ceo"]],
True,
0.4848,
0.5000,
8.185208066890937e-09,
processes=1,
)
def test_TCAV_x_1_1_c_concept_order_changed(self) -> None:
self.compute_cavs_interpret(
[["random", "striped"], ["random", "ceo"], ["ceo", "striped"]],
True,
0.4848,
0.5000,
8.185208066890937e-09,
processes=1,
)
# Non-existing concept in the experimental set ("dotted")
def test_TCAV_x_1_1_d(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["dotted", "random"]],
True,
0.4848,
0.5000,
8.185208066890937e-09,
processes=1,
)
# Do not Force Train
def test_TCAV_x_1_0_a(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
False,
0.4848,
0.5000,
8.185208066890937e-09,
processes=1,
)
def test_TCAV_x_1_0_1_attr_to_inputs(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
False,
0.4848,
0.5000,
8.185208066890937e-09,
processes=1,
remove_activation=True,
layers="relu2",
attribute_to_layer_input=True,
)
# Non-existing concept in the experimental set ("dotted"), do Not Force Train
def test_TCAV_x_1_0_b(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["dotted", "random"]],
False,
0.4848,
0.5000,
8.185208066890937e-09,
processes=1,
)
# Do not Force Train, Missing Activation
def test_TCAV_x_1_0_1(self) -> None:
self.compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
False,
0.4848,
0.5000,
8.185208066890937e-09,
processes=1,
remove_activation=True,
)
def test_TCAV_x_1_0_1_w_flipped_class_id(self) -> None:
self._compute_cavs_interpret(
[["striped", "random"], ["ceo", "random"]],
False,
0.4848,
0.5000,
8.185208066890937e-09,
CustomClassifier_W_Flipped_Class_Id(),
processes=1,
)
# Testing TCAV with default classifier and experimental sets of varying lengths
def test_exp_sets_with_diffent_lengths(self) -> None:
try:
import sklearn
import sklearn.linear_model
import sklearn.svm # noqa: F401
except ImportError:
raise unittest.SkipTest("sklearn is not available.")
# Create Concepts
concepts_dict = create_concepts()
# defining experimental sets of different length
experimental_set_list = [["striped", "random"], ["ceo", "striped", "random"]]
experimental_sets_diff_length = self._create_experimental_sets(
experimental_set_list, concepts_dict
)
exp_sets_striped_random = self._create_experimental_sets(
[["striped", "random"]], concepts_dict
)
exp_sets_ceo_striped_random = self._create_experimental_sets(
[["ceo", "striped", "random"]], concepts_dict
)
striped_random_str = concepts_to_str(exp_sets_striped_random[0])
ceo_striped_random_str = concepts_to_str(exp_sets_ceo_striped_random[0])
model = BasicModel_ConvNet()
model.eval()
layers = ["conv1", "conv2", "fc1", "fc2"]
inputs = torch.randn(5, 1, 10, 10)
with tempfile.TemporaryDirectory() as tmpdirname:
tcav_diff_length = TCAV(
model,
layers,
save_path=tmpdirname,
)
# computing tcav scores for `striped and random` set and
# `ceo, striped and random` set at once using one `interpret`
# call.
interpret_diff_lengths = tcav_diff_length.interpret(
inputs, experimental_sets=experimental_sets_diff_length, target=0
)
# computing tcav scores for striped and random
interpret_striped_random = tcav_diff_length.interpret(
inputs, experimental_sets=exp_sets_striped_random, target=0
)
# computing tcav scores for ceo, striped and random
interpret_ceo_striped_random = tcav_diff_length.interpret(
inputs, experimental_sets=exp_sets_ceo_striped_random, target=0
)
for combined, separate in zip(
interpret_diff_lengths[striped_random_str].items(),
interpret_striped_random[striped_random_str].items(),
):
self.assertEqual(combined[0], separate[0])
for c_tcav, s_tcav in zip(combined[1].items(), separate[1].items()):
self.assertEqual(c_tcav[0], s_tcav[0])
assertTensorAlmostEqual(self, c_tcav[1], s_tcav[1])
for combined, separate in zip(
interpret_diff_lengths[ceo_striped_random_str].items(),
interpret_ceo_striped_random[ceo_striped_random_str].items(),
):
self.assertEqual(combined[0], separate[0])
for c_tcav, s_tcav in zip(combined[1].items(), separate[1].items()):
self.assertEqual(c_tcav[0], s_tcav[0])
assertTensorAlmostEqual(self, c_tcav[1], s_tcav[1])
def test_model_ids_in_tcav(
self,
) -> None:
# creating concepts and mapping between concepts and their names
concepts_dict = create_concepts()
# defining experimental sets of different length
experimental_set_list = [["striped", "random"], ["dotted", "random"]]
experimental_sets = self._create_experimental_sets(
experimental_set_list, concepts_dict
)
model = BasicModel_ConvNet()
model.eval()
layer = "conv2"
inputs = 100 * get_inputs_tensor()
with tempfile.TemporaryDirectory() as tmpdirname:
tcav1 = TCAV(
model,
layer,
model_id="my_basic_model1",
classifier=CustomClassifier(),
save_path=tmpdirname,
)
interpret1 = tcav1.interpret(
inputs, experimental_sets=experimental_sets, target=0
)
tcav2 = TCAV(
model,
layer,
model_id="my_basic_model2",
classifier=CustomClassifier(),
save_path=tmpdirname,
)
interpret2 = tcav2.interpret(
inputs, experimental_sets=experimental_sets, target=0
)
# testing that different folders were created for two different
# ids of the model
self.assertTrue(
AV.exists(
tmpdirname,
"my_basic_model1",
concepts_dict["striped"].identifier,
layer,
)
)
self.assertTrue(
AV.exists(
tmpdirname,
"my_basic_model2",
concepts_dict["striped"].identifier,
layer,
)
)
for interpret1_elem, interpret2_elem in zip(interpret1, interpret2):
for interpret1_sub_elem, interpret2_sub_elem in zip(
interpret1[interpret1_elem], interpret2[interpret2_elem]
):
assertTensorAlmostEqual(
self,
interpret1[interpret1_elem][interpret1_sub_elem]["sign_count"],
interpret2[interpret2_elem][interpret2_sub_elem]["sign_count"],
0.0,
)
assertTensorAlmostEqual(
self,
interpret1[interpret1_elem][interpret1_sub_elem]["magnitude"],
interpret2[interpret2_elem][interpret2_sub_elem]["magnitude"],
0.0,
)
self.assertEqual(interpret1_sub_elem, interpret2_sub_elem)
self.assertEqual(interpret1_elem, interpret2_elem)
|
#!/usr/bin/env python3
import torch
import torch.nn as nn
class SigmoidModel(nn.Module):
"""
Model architecture from:
https://medium.com/coinmonks/create-a-neural-network-in
-pytorch-and-make-your-life-simpler-ec5367895199
"""
def __init__(self, num_in, num_hidden, num_out) -> None:
super().__init__()
self.num_in = num_in
self.num_hidden = num_hidden
self.num_out = num_out
self.lin1 = nn.Linear(num_in, num_hidden)
self.lin2 = nn.Linear(num_hidden, num_out)
self.relu1 = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, input):
lin1 = self.lin1(input)
lin2 = self.lin2(self.relu1(lin1))
return self.sigmoid(lin2)
class SoftmaxModel(nn.Module):
"""
Model architecture from:
https://adventuresinmachinelearning.com/pytorch-tutorial-deep-learning/
"""
def __init__(self, num_in, num_hidden, num_out, inplace=False) -> None:
super().__init__()
self.num_in = num_in
self.num_hidden = num_hidden
self.num_out = num_out
self.lin1 = nn.Linear(num_in, num_hidden)
self.lin2 = nn.Linear(num_hidden, num_hidden)
self.lin3 = nn.Linear(num_hidden, num_out)
self.relu1 = nn.ReLU(inplace=inplace)
self.relu2 = nn.ReLU(inplace=inplace)
self.softmax = nn.Softmax(dim=1)
def forward(self, input):
lin1 = self.relu1(self.lin1(input))
lin2 = self.relu2(self.lin2(lin1))
lin3 = self.lin3(lin2)
return self.softmax(lin3)
class SigmoidDeepLiftModel(nn.Module):
"""
Model architecture from:
https://medium.com/coinmonks/create-a-neural-network-in
-pytorch-and-make-your-life-simpler-ec5367895199
"""
def __init__(self, num_in, num_hidden, num_out) -> None:
super().__init__()
self.num_in = num_in
self.num_hidden = num_hidden
self.num_out = num_out
self.lin1 = nn.Linear(num_in, num_hidden, bias=False)
self.lin2 = nn.Linear(num_hidden, num_out, bias=False)
self.lin1.weight = nn.Parameter(torch.ones(num_hidden, num_in))
self.lin2.weight = nn.Parameter(torch.ones(num_out, num_hidden))
self.relu1 = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, input):
lin1 = self.lin1(input)
lin2 = self.lin2(self.relu1(lin1))
return self.sigmoid(lin2)
class SoftmaxDeepLiftModel(nn.Module):
"""
Model architecture from:
https://adventuresinmachinelearning.com/pytorch-tutorial-deep-learning/
"""
def __init__(self, num_in, num_hidden, num_out) -> None:
super().__init__()
self.num_in = num_in
self.num_hidden = num_hidden
self.num_out = num_out
self.lin1 = nn.Linear(num_in, num_hidden)
self.lin2 = nn.Linear(num_hidden, num_hidden)
self.lin3 = nn.Linear(num_hidden, num_out)
self.lin1.weight = nn.Parameter(torch.ones(num_hidden, num_in))
self.lin2.weight = nn.Parameter(torch.ones(num_hidden, num_hidden))
self.lin3.weight = nn.Parameter(torch.ones(num_out, num_hidden))
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, input):
lin1 = self.relu1(self.lin1(input))
lin2 = self.relu2(self.lin2(lin1))
lin3 = self.lin3(lin2)
return self.softmax(lin3)
|
#!/usr/bin/env python3
from typing import no_type_check, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
"""
@no_type_check annotation is applied to type-hinted models to avoid errors
related to mismatch with parent (nn.Module) signature. # type_ignore is not
possible here, since it causes errors in JIT scripting code which parses
the relevant type hints.
"""
class BasicLinearReLULinear(nn.Module):
def __init__(self, in_features, out_features=5, bias=False) -> None:
super().__init__()
self.fc1 = nn.Linear(in_features, out_features, bias=bias)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(out_features, 1, bias=bias)
def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)
x = self.fc2(x)
return x
class MixedKwargsAndArgsModule(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x, y=None):
if y is not None:
return x + y
return x
class BasicModel(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, input):
input = 1 - F.relu(1 - input)
return input
class BasicModel2(nn.Module):
"""
Example model one from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1) - 1 - ReLU(x2))
"""
def __init__(self) -> None:
super().__init__()
def forward(self, input1, input2):
relu_out1 = F.relu(input1)
relu_out2 = F.relu(input2)
return F.relu(relu_out1 - 1 - relu_out2)
class BasicModel3(nn.Module):
"""
Example model two from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1 - 1) - ReLU(x2))
"""
def __init__(self) -> None:
super().__init__()
def forward(self, input1, input2):
relu_out1 = F.relu(input1 - 1)
relu_out2 = F.relu(input2)
return F.relu(relu_out1 - relu_out2)
class BasicModel4_MultiArgs(nn.Module):
"""
Slightly modified example model from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1 - 1) - ReLU(x2) / x3)
"""
def __init__(self) -> None:
super().__init__()
def forward(self, input1, input2, additional_input1, additional_input2=0):
relu_out1 = F.relu(input1 - 1)
relu_out2 = F.relu(input2)
relu_out2 = relu_out2.div(additional_input1)
return F.relu(relu_out1 - relu_out2)[:, additional_input2]
class BasicModel5_MultiArgs(nn.Module):
"""
Slightly modified example model from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1 - 1) * x3[0] - ReLU(x2) * x3[1])
"""
def __init__(self) -> None:
super().__init__()
def forward(self, input1, input2, additional_input1, additional_input2=0):
relu_out1 = F.relu(input1 - 1) * additional_input1[0]
relu_out2 = F.relu(input2)
relu_out2 = relu_out2 * additional_input1[1]
return F.relu(relu_out1 - relu_out2)[:, additional_input2]
class BasicModel6_MultiTensor(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, input1, input2):
input = input1 + input2
return 1 - F.relu(1 - input)[:, 1]
class BasicLinearModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(7, 1)
def forward(self, x1, x2):
return self.linear(torch.cat((x1, x2), dim=-1))
class BasicLinearModel2(nn.Module):
def __init__(self, in_features, out_features) -> None:
super().__init__()
self.linear = nn.Linear(in_features, out_features, bias=False)
def forward(self, input):
return self.linear(input)
class BasicLinearModel_Multilayer(nn.Module):
def __init__(self, in_features, hidden_nodes, out_features) -> None:
super().__init__()
self.linear1 = nn.Linear(in_features, hidden_nodes, bias=False)
self.linear2 = nn.Linear(hidden_nodes, out_features, bias=False)
def forward(self, input):
x = self.linear1(input)
return self.linear2(x)
class ReLUDeepLiftModel(nn.Module):
r"""
https://www.youtube.com/watch?v=f_iAM0NPwnM
"""
def __init__(self) -> None:
super().__init__()
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
def forward(self, x1, x2, x3=2):
return 2 * self.relu1(x1) + x3 * self.relu2(x2 - 1.5)
class LinearMaxPoolLinearModel(nn.Module):
def __init__(self) -> None:
super().__init__()
# kernel size -> 4
self.lin1 = nn.Linear(4, 4, bias=False)
self.lin1.weight = nn.Parameter(torch.eye(4, 4))
self.pool1 = nn.MaxPool1d(4)
self.lin2 = nn.Linear(1, 1, bias=False)
self.lin2.weight = nn.Parameter(torch.ones(1, 1))
def forward(self, x):
x = x.unsqueeze(1)
return self.lin2(self.pool1(self.lin1(x))[:, 0, :])
class BasicModelWithReusedModules(nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin1 = nn.Linear(3, 2)
self.relu = nn.ReLU()
self.lin2 = nn.Linear(2, 2)
def forward(self, inputs):
return self.relu(self.lin2(self.relu(self.lin1(inputs))))
class BasicModelWithReusedLinear(nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin1 = nn.Linear(3, 3)
self.relu = nn.ReLU()
def forward(self, inputs):
return self.relu(self.lin1(self.relu(self.lin1(inputs))))
class BasicModelWithSparseInputs(nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin1 = nn.Linear(3, 1)
self.lin1.weight = nn.Parameter(torch.tensor([[3.0, 1.0, 2.0]]))
self.lin1.bias = nn.Parameter(torch.zeros(1))
def forward(self, inputs, sparse_list):
return (
self.lin1(inputs) + (sparse_list[0] if torch.numel(sparse_list) > 0 else 0)
).sum()
class BasicModel_MaxPool_ReLU(nn.Module):
def __init__(self, inplace=False) -> None:
super().__init__()
self.maxpool = nn.MaxPool1d(3)
self.relu = nn.ReLU(inplace=inplace)
def forward(self, x):
return self.relu(self.maxpool(x)).sum(dim=1)
class TanhDeepLiftModel(nn.Module):
r"""
Same as the ReLUDeepLiftModel, but with activations
that can have negative outputs
"""
def __init__(self) -> None:
super().__init__()
self.tanh1 = nn.Tanh()
self.tanh2 = nn.Tanh()
def forward(self, x1, x2):
return 2 * self.tanh1(x1) + 2 * self.tanh2(x2 - 1.5)
class ReLULinearModel(nn.Module):
r"""
Simple architecture similar to:
https://github.com/marcoancona/DeepExplain/blob/master/deepexplain/tests/test_tensorflow.py#L65
"""
def __init__(self, inplace: bool = False) -> None:
super().__init__()
self.l1 = nn.Linear(3, 1, bias=False)
self.l2 = nn.Linear(3, 1, bias=False)
self.l1.weight = nn.Parameter(torch.tensor([[3.0, 1.0, 0.0]])) # type: ignore
self.l2.weight = nn.Parameter(torch.tensor([[2.0, 3.0, 0.0]])) # type: ignore
self.relu = nn.ReLU(inplace=inplace)
self.l3 = nn.Linear(2, 1, bias=False)
self.l3.weight = nn.Parameter(torch.tensor([[1.0, 1.0]])) # type: ignore
@no_type_check
def forward(self, x1: Tensor, x2: Tensor, x3: int = 1) -> Tensor:
return self.l3(self.relu(torch.cat([self.l1(x1), x3 * self.l2(x2)], dim=1)))
class SimpleLRPModel(nn.Module):
def __init__(self, inplace) -> None:
super().__init__()
self.linear = nn.Linear(3, 3, bias=False)
self.linear.weight.data.fill_(2.0)
self.relu = torch.nn.ReLU(inplace=inplace)
self.linear2 = nn.Linear(3, 1, bias=False)
self.linear2.weight.data.fill_(3.0)
self.dropout = torch.nn.Dropout(p=0.01)
def forward(self, x):
return self.dropout(self.linear2(self.relu(self.linear(x))))
class Conv1dSeqModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.seq = nn.Sequential(nn.Conv1d(4, 2, 1), nn.ReLU(), nn.Linear(1000, 1))
def forward(self, inputs):
return self.seq(inputs)
class TextModule(nn.Module):
r"""Basic model that has inner embedding layer. This layer can be pluged
into a larger network such as `BasicEmbeddingModel` and help us to test
nested embedding layers
"""
def __init__(self, num_embeddings, embedding_dim, second_embedding=False) -> None:
super().__init__()
self.inner_embedding = nn.Embedding(num_embeddings, embedding_dim)
self.second_embedding = second_embedding
if self.second_embedding:
self.inner_embedding2 = nn.Embedding(num_embeddings, embedding_dim)
def forward(self, input=None, another_input=None):
assert input is not None, "The inputs to embedding module must be specified"
embedding = self.inner_embedding(input)
if self.second_embedding:
another_embedding = self.inner_embedding2(
input if another_input is None else another_input
)
return embedding if another_input is None else embedding + another_embedding
class BasicEmbeddingModel(nn.Module):
r"""
Implements basic model with nn.Embedding layer. This simple model
will help us to test nested InterpretableEmbedding layers
The model has the following structure:
BasicEmbeddingModel(
(embedding1): Embedding(30, 100)
(embedding2): TextModule(
(inner_embedding): Embedding(30, 100)
)
(linear1): Linear(in_features=100, out_features=256, bias=True)
(relu): ReLU()
(linear2): Linear(in_features=256, out_features=1, bias=True)
)
"""
def __init__(
self,
num_embeddings=30,
embedding_dim=100,
hidden_dim=256,
output_dim=1,
nested_second_embedding=False,
) -> None:
super().__init__()
self.embedding1 = nn.Embedding(num_embeddings, embedding_dim)
self.embedding2 = TextModule(
num_embeddings, embedding_dim, nested_second_embedding
)
self.linear1 = nn.Linear(embedding_dim, hidden_dim, bias=False)
self.linear1.weight = nn.Parameter(torch.ones(hidden_dim, embedding_dim))
self.relu = nn.ReLU()
self.linear2 = nn.Linear(hidden_dim, output_dim)
self.linear2.weight = nn.Parameter(torch.ones(output_dim, hidden_dim))
def forward(self, input1, input2, input3=None):
embedding1 = self.embedding1(input1)
embedding2 = self.embedding2(input2, input3)
embeddings = embedding1 + embedding2
return self.linear2(self.relu(self.linear1(embeddings))).sum(1)
class MultiRelu(nn.Module):
def __init__(self, inplace: bool = False) -> None:
super().__init__()
self.relu1 = nn.ReLU(inplace=inplace)
self.relu2 = nn.ReLU(inplace=inplace)
@no_type_check
def forward(self, arg1: Tensor, arg2: Tensor) -> Tuple[Tensor, Tensor]:
return (self.relu1(arg1), self.relu2(arg2))
class BasicModel_MultiLayer(nn.Module):
def __init__(self, inplace=False, multi_input_module=False) -> None:
super().__init__()
# Linear 0 is simply identity transform
self.multi_input_module = multi_input_module
self.linear0 = nn.Linear(3, 3)
self.linear0.weight = nn.Parameter(torch.eye(3))
self.linear0.bias = nn.Parameter(torch.zeros(3))
self.linear1 = nn.Linear(3, 4)
self.linear1.weight = nn.Parameter(torch.ones(4, 3))
self.linear1.bias = nn.Parameter(torch.tensor([-10.0, 1.0, 1.0, 1.0]))
self.linear1_alt = nn.Linear(3, 4)
self.linear1_alt.weight = nn.Parameter(torch.ones(4, 3))
self.linear1_alt.bias = nn.Parameter(torch.tensor([-10.0, 1.0, 1.0, 1.0]))
self.multi_relu = MultiRelu(inplace=inplace)
self.relu = nn.ReLU(inplace=inplace)
self.linear2 = nn.Linear(4, 2)
self.linear2.weight = nn.Parameter(torch.ones(2, 4))
self.linear2.bias = nn.Parameter(torch.tensor([-1.0, 1.0]))
@no_type_check
def forward(
self,
x: Tensor,
add_input: Optional[Tensor] = None,
multidim_output: bool = False,
):
input = x if add_input is None else x + add_input
lin0_out = self.linear0(input)
lin1_out = self.linear1(lin0_out)
if self.multi_input_module:
relu_out1, relu_out2 = self.multi_relu(lin1_out, self.linear1_alt(input))
relu_out = relu_out1 + relu_out2
else:
relu_out = self.relu(lin1_out)
lin2_out = self.linear2(relu_out)
if multidim_output:
stack_mid = torch.stack((lin2_out, 2 * lin2_out), dim=2)
return torch.stack((stack_mid, 4 * stack_mid), dim=3)
else:
return lin2_out
class BasicModelBoolInput(nn.Module):
def __init__(self) -> None:
super().__init__()
self.mod = BasicModel_MultiLayer()
def forward(
self,
x: Tensor,
add_input: Optional[Tensor] = None,
mult: float = 10.0,
):
assert x.dtype is torch.bool, "Input must be boolean"
return self.mod(x.float() * mult, add_input)
class BasicModel_MultiLayer_MultiInput(nn.Module):
def __init__(self) -> None:
super().__init__()
self.model = BasicModel_MultiLayer()
@no_type_check
def forward(self, x1: Tensor, x2: Tensor, x3: Tensor, scale: int):
return self.model(scale * (x1 + x2 + x3))
class BasicModel_MultiLayer_TrueMultiInput(nn.Module):
def __init__(self) -> None:
super().__init__()
self.m1 = BasicModel_MultiLayer()
self.m234 = BasicModel_MultiLayer_MultiInput()
@no_type_check
def forward(
self, x1: Tensor, x2: Tensor, x3: Tensor, x4: Optional[Tensor] = None
) -> Tensor:
a = self.m1(x1)
if x4 is None:
b = self.m234(x2, x3, x1, scale=1)
else:
b = self.m234(x2, x3, x4, scale=1)
return a + b
class BasicModel_ConvNet_One_Conv(nn.Module):
def __init__(self, inplace: bool = False) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 2, 3, 1)
self.relu1 = nn.ReLU(inplace=inplace)
self.fc1 = nn.Linear(8, 4)
self.conv1.weight = nn.Parameter(torch.ones(2, 1, 3, 3)) # type: ignore
self.conv1.bias = nn.Parameter(torch.tensor([-50.0, -75.0])) # type: ignore
self.fc1.weight = nn.Parameter( # type: ignore
torch.cat([torch.ones(4, 5), -1 * torch.ones(4, 3)], dim=1)
)
self.fc1.bias = nn.Parameter(torch.zeros(4)) # type: ignore
self.relu2 = nn.ReLU(inplace=inplace)
@no_type_check
def forward(self, x: Tensor, x2: Optional[Tensor] = None):
if x2 is not None:
x = x + x2
x = self.relu1(self.conv1(x))
x = x.view(-1, 8)
return self.relu2(self.fc1(x))
class BasicModel_ConvNetWithPaddingDilation(nn.Module):
def __init__(self, inplace: bool = False) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 2, 3, padding=3, stride=2, dilation=2)
self.relu1 = nn.ReLU(inplace=inplace)
self.fc1 = nn.Linear(16, 4)
@no_type_check
def forward(self, x: Tensor):
bsz = x.shape[0]
x = self.relu1(self.conv1(x))
x = x.reshape(bsz, 2, -1)
return self.fc1(x).reshape(bsz, -1)
class BasicModel_ConvNet(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 2, 3, 1)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(2, 4, 3, 1)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(2)
self.fc1 = nn.Linear(4, 8)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(8, 10)
self.softmax = nn.Softmax(dim=1)
self.fc1.weight = nn.Parameter(torch.ones(8, 4))
self.fc2.weight = nn.Parameter(torch.ones(10, 8))
@no_type_check
def forward(self, x: Tensor) -> Tensor:
x = self.relu1(self.conv1(x))
x = self.pool1(x)
x = self.relu2(self.conv2(x))
x = self.pool2(x)
x = x.view(-1, 4)
x = self.relu3(self.fc1(x))
x = self.fc2(x)
return self.softmax(x)
class BasicModel_ConvNet_MaxPool1d(nn.Module):
"""Same as above, but with the MaxPool2d replaced
with a MaxPool1d. This is useful because the MaxPool modules
behave differently to other modules from the perspective
of the DeepLift Attributions
"""
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv1d(1, 2, 3)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool1d(2)
self.conv2 = nn.Conv1d(2, 4, 3)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool1d(2)
self.fc1 = nn.Linear(4, 8)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(8, 10)
self.softmax = nn.Softmax(dim=1)
self.fc1.weight = nn.Parameter(torch.ones(8, 4))
self.fc2.weight = nn.Parameter(torch.ones(10, 8))
@no_type_check
def forward(self, x: Tensor) -> Tensor:
x = self.relu1(self.conv1(x))
x = self.pool1(x)
x = self.relu2(self.conv2(x))
x = self.pool2(x)
x = x.view(-1, 4)
x = self.relu3(self.fc1(x))
x = self.fc2(x)
return self.softmax(x)
class BasicModel_ConvNet_MaxPool3d(nn.Module):
"""Same as above, but with the MaxPool1d replaced
with a MaxPool3d. This is useful because the MaxPool modules
behave differently to other modules from the perspective
of the DeepLift Attributions
"""
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv3d(1, 2, 3)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool3d(2)
self.conv2 = nn.Conv3d(2, 4, 3)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool3d(2)
self.fc1 = nn.Linear(4, 8)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(8, 10)
self.softmax = nn.Softmax(dim=1)
self.fc1.weight = nn.Parameter(torch.ones(8, 4))
self.fc2.weight = nn.Parameter(torch.ones(10, 8))
def forward(self, x):
x = self.relu1(self.conv1(x))
x = self.pool1(x)
x = self.relu2(self.conv2(x))
x = self.pool2(x)
x = x.view(-1, 4)
x = self.relu3(self.fc1(x))
x = self.fc2(x)
return self.softmax(x)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.