python_code
stringlengths 0
229k
|
---|
#!/usr/bin/env python3
from captum.robust._core.fgsm import FGSM # noqa
from captum.robust._core.metrics.attack_comparator import AttackComparator # noqa
from captum.robust._core.metrics.min_param_perturbation import ( # noqa
MinParamPerturbation,
)
from captum.robust._core.perturbation import Perturbation # noqa
from captum.robust._core.pgd import PGD # noqa
|
#!/usr/bin/env python3
from typing import Any, Callable, Optional, Tuple, Union
import torch
from captum._utils.common import (
_format_additional_forward_args,
_format_output,
_format_tensor_into_tuples,
_is_tuple,
_select_targets,
)
from captum._utils.gradient import (
apply_gradient_requirements,
compute_gradients,
undo_gradient_requirements,
)
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.log import log_usage
from captum.robust._core.perturbation import Perturbation
from torch import Tensor
class FGSM(Perturbation):
r"""
Fast Gradient Sign Method is a one-step method that can generate
adversarial examples.
For non-targeted attack, the formulation is::
x' = x + epsilon * sign(gradient of L(theta, x, y))
For targeted attack on t, the formulation is::
x' = x - epsilon * sign(gradient of L(theta, x, t))
``L(theta, x, y)`` is the model's loss function with respect to model
parameters, inputs and labels.
More details on Fast Gradient Sign Method can be found in the original
paper: https://arxiv.org/abs/1412.6572
"""
def __init__(
self,
forward_func: Callable,
loss_func: Optional[Callable] = None,
lower_bound: float = float("-inf"),
upper_bound: float = float("inf"),
) -> None:
r"""
Args:
forward_func (Callable): The pytorch model for which the attack is
computed.
loss_func (Callable, optional): Loss function of which the gradient
computed. The loss function should take in outputs of the
model and labels, and return a loss tensor.
The default loss function is negative log.
lower_bound (float, optional): Lower bound of input values.
Default: ``float("-inf")``
upper_bound (float, optional): Upper bound of input values.
e.g. image pixels must be in the range 0-255
Default: ``float("inf")``
Attributes:
bound (Callable): A function that bounds the input values based on
given lower_bound and upper_bound. Can be overwritten for
custom use cases if necessary.
zero_thresh (float): The threshold below which gradient will be treated
as zero. Can be modified for custom use cases if necessary.
"""
super().__init__()
self.forward_func = forward_func
self.loss_func = loss_func
self.bound = lambda x: torch.clamp(x, min=lower_bound, max=upper_bound)
self.zero_thresh = 10**-6
@log_usage()
def perturb(
self,
inputs: TensorOrTupleOfTensorsGeneric,
epsilon: float,
target: Any,
additional_forward_args: Any = None,
targeted: bool = False,
mask: Optional[TensorOrTupleOfTensorsGeneric] = None,
) -> TensorOrTupleOfTensorsGeneric:
r"""
This method computes and returns the perturbed input for each input tensor.
It supports both targeted and non-targeted attacks.
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which adversarial
attack is computed. It can be provided as a single
tensor or a tuple of multiple tensors. If multiple
input tensors are provided, the batch sizes must be
aligned across all tensors.
epsilon (float): Step size of perturbation.
target (Any): True labels of inputs if non-targeted attack is
desired. Target class of inputs if targeted attack
is desired. Target will be passed to the loss function
to compute loss, so the type needs to match the
argument type of the loss function.
If using the default negative log as loss function,
labels should be of type int, tuple, tensor or list.
For general 2D outputs, labels can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the label for the corresponding example.
For outputs with > 2 dimensions, labels can be either:
- A single tuple, which contains #output_dims - 1
elements. This label index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
label for the corresponding example.
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. These arguments are provided to
forward_func in order following the arguments in inputs.
Default: None.
targeted (bool, optional): If attack should be targeted.
Default: False.
mask (Tensor or tuple[Tensor, ...], optional): mask of zeroes and ones
that defines which elements within the input tensor(s) are
perturbed. This mask must have the same shape and
dimensionality as the inputs. If this argument is not
provided, all elements will be perturbed.
Default: None.
Returns:
- **perturbed inputs** (*Tensor* or *tuple[Tensor, ...]*):
Perturbed input for each
input tensor. The perturbed inputs have the same shape and
dimensionality as the inputs.
If a single tensor is provided as inputs, a single tensor
is returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
"""
is_inputs_tuple = _is_tuple(inputs)
inputs: Tuple[Tensor, ...] = _format_tensor_into_tuples(inputs)
masks: Union[Tuple[int, ...], Tuple[Tensor, ...]] = (
_format_tensor_into_tuples(mask)
if (mask is not None)
else (1,) * len(inputs)
)
gradient_mask = apply_gradient_requirements(inputs)
def _forward_with_loss() -> Tensor:
additional_inputs = _format_additional_forward_args(additional_forward_args)
outputs = self.forward_func( # type: ignore
*(*inputs, *additional_inputs) # type: ignore
if additional_inputs is not None
else inputs
)
if self.loss_func is not None:
return self.loss_func(outputs, target)
else:
loss = -torch.log(outputs)
return _select_targets(loss, target)
grads = compute_gradients(_forward_with_loss, inputs)
undo_gradient_requirements(inputs, gradient_mask)
perturbed_inputs = self._perturb(inputs, grads, epsilon, targeted, masks)
perturbed_inputs = tuple(
self.bound(perturbed_inputs[i]) for i in range(len(perturbed_inputs))
)
return _format_output(is_inputs_tuple, perturbed_inputs)
def _perturb(
self,
inputs: Tuple,
grads: Tuple,
epsilon: float,
targeted: bool,
masks: Tuple,
) -> Tuple:
r"""
A helper function to calculate the perturbed inputs given original
inputs, gradient of loss function and epsilon. The calculation is
different for targeted v.s. non-targeted as described above.
"""
multiplier = -1 if targeted else 1
inputs = tuple(
torch.where(
torch.abs(grad) > self.zero_thresh,
inp + multiplier * epsilon * torch.sign(grad) * mask,
inp,
)
for grad, inp, mask in zip(grads, inputs, masks)
)
return inputs
|
#!/usr/bin/env python3
from typing import Any, Callable, Optional, Tuple, Union
import torch
import torch.nn.functional as F
from captum._utils.common import _format_output, _format_tensor_into_tuples, _is_tuple
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.log import log_usage
from captum.robust._core.fgsm import FGSM
from captum.robust._core.perturbation import Perturbation
from torch import Tensor
class PGD(Perturbation):
r"""
Projected Gradient Descent is an iterative version of the one-step attack
FGSM that can generate adversarial examples. It takes multiple gradient
steps to search for an adversarial perturbation within the desired
neighbor ball around the original inputs. In a non-targeted attack, the
formulation is::
x_0 = x
x_(t+1) = Clip_r(x_t + alpha * sign(gradient of L(theta, x, t)))
where Clip denotes the function that projects its argument to the r-neighbor
ball around x so that the perturbation will be bounded. Alpha is the step
size. L(theta, x, y) is the model's loss function with respect to model
parameters, inputs and targets.
In a targeted attack, the formulation is similar::
x_0 = x
x_(t+1) = Clip_r(x_t - alpha * sign(gradient of L(theta, x, t)))
More details on Projected Gradient Descent can be found in the original
paper: https://arxiv.org/abs/1706.06083
"""
def __init__(
self,
forward_func: Callable,
loss_func: Callable = None,
lower_bound: float = float("-inf"),
upper_bound: float = float("inf"),
) -> None:
r"""
Args:
forward_func (Callable): The pytorch model for which the attack is
computed.
loss_func (Callable, optional): Loss function of which the gradient
computed. The loss function should take in outputs of the
model and labels, and return the loss for each input tensor.
The default loss function is negative log.
lower_bound (float, optional): Lower bound of input values.
Default: ``float("-inf")``
upper_bound (float, optional): Upper bound of input values.
e.g. image pixels must be in the range 0-255
Default: ``float("inf")``
Attributes:
bound (Callable): A function that bounds the input values based on
given lower_bound and upper_bound. Can be overwritten for
custom use cases if necessary.
"""
super().__init__()
self.forward_func = forward_func
self.fgsm = FGSM(forward_func, loss_func)
self.bound = lambda x: torch.clamp(x, min=lower_bound, max=upper_bound)
@log_usage()
def perturb(
self,
inputs: TensorOrTupleOfTensorsGeneric,
radius: float,
step_size: float,
step_num: int,
target: Any,
additional_forward_args: Any = None,
targeted: bool = False,
random_start: bool = False,
norm: str = "Linf",
mask: Optional[TensorOrTupleOfTensorsGeneric] = None,
) -> TensorOrTupleOfTensorsGeneric:
r"""
This method computes and returns the perturbed input for each input tensor.
It supports both targeted and non-targeted attacks.
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which adversarial
attack is computed. It can be provided as a single
tensor or a tuple of multiple tensors. If multiple
input tensors are provided, the batch sizes must be
aligned across all tensors.
radius (float): Radius of the neighbor ball centered around inputs.
The perturbation should be within this range.
step_size (float): Step size of each gradient step.
step_num (int): Step numbers. It usually guarantees that the perturbation
can reach the border.
target (Any): True labels of inputs if non-targeted attack is
desired. Target class of inputs if targeted attack
is desired. Target will be passed to the loss function
to compute loss, so the type needs to match the
argument type of the loss function.
If using the default negative log as loss function,
labels should be of type int, tuple, tensor or list.
For general 2D outputs, labels can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the label for the corresponding example.
For outputs with > 2 dimensions, labels can be either:
- A single tuple, which contains #output_dims - 1
elements. This label index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
label for the corresponding example.
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. These arguments are provided to
forward_func in order following the arguments in inputs.
Default: ``None``
targeted (bool, optional): If attack should be targeted.
Default: ``False``
random_start (bool, optional): If a random initialization is added to
inputs. Default: ``False``
norm (str, optional): Specifies the norm to calculate distance from
original inputs: ``Linf`` | ``L2``.
Default: ``Linf``
mask (Tensor or tuple[Tensor, ...], optional): mask of zeroes and ones
that defines which elements within the input tensor(s) are
perturbed. This mask must have the same shape and
dimensionality as the inputs. If this argument is not
provided, all elements are perturbed.
Default: None.
Returns:
- **perturbed inputs** (*Tensor* or *tuple[Tensor, ...]*):
Perturbed input for each
input tensor. The perturbed inputs have the same shape and
dimensionality as the inputs.
If a single tensor is provided as inputs, a single tensor
is returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
"""
def _clip(inputs: Tensor, outputs: Tensor) -> Tensor:
diff = outputs - inputs
if norm == "Linf":
return inputs + torch.clamp(diff, -radius, radius)
elif norm == "L2":
return inputs + torch.renorm(diff, 2, 0, radius)
else:
raise AssertionError("Norm constraint must be L2 or Linf.")
is_inputs_tuple = _is_tuple(inputs)
formatted_inputs = _format_tensor_into_tuples(inputs)
formatted_masks: Union[Tuple[int, ...], Tuple[Tensor, ...]] = (
_format_tensor_into_tuples(mask)
if (mask is not None)
else (1,) * len(formatted_inputs)
)
perturbed_inputs = formatted_inputs
if random_start:
perturbed_inputs = tuple(
self.bound(
self._random_point(
formatted_inputs[i], radius, norm, formatted_masks[i]
)
)
for i in range(len(formatted_inputs))
)
for _i in range(step_num):
perturbed_inputs = self.fgsm.perturb(
perturbed_inputs,
step_size,
target,
additional_forward_args,
targeted,
formatted_masks,
)
perturbed_inputs = tuple(
_clip(formatted_inputs[j], perturbed_inputs[j])
for j in range(len(perturbed_inputs))
)
# Detaching inputs to avoid dependency of gradient between steps
perturbed_inputs = tuple(
self.bound(perturbed_inputs[j]).detach()
for j in range(len(perturbed_inputs))
)
return _format_output(is_inputs_tuple, perturbed_inputs)
def _random_point(
self, center: Tensor, radius: float, norm: str, mask: Union[Tensor, int]
) -> Tensor:
r"""
A helper function that returns a uniform random point within the ball
with the given center and radius. Norm should be either L2 or Linf.
"""
if norm == "L2":
u = torch.randn_like(center)
unit_u = F.normalize(u.view(u.size(0), -1)).view(u.size())
d = torch.numel(center[0])
r = (torch.rand(u.size(0)) ** (1.0 / d)) * radius
r = r[(...,) + (None,) * (r.dim() - 1)]
x = r * unit_u
return center + (x * mask)
elif norm == "Linf":
x = torch.rand_like(center) * radius * 2 - radius
return center + (x * mask)
else:
raise AssertionError("Norm constraint must be L2 or Linf.")
|
#!/usr/bin/env python3
from typing import Callable
class Perturbation:
r"""
All perturbation and attack algorithms extend this class. It enforces
its child classes to extend and override core `perturb` method.
"""
perturb: Callable
r"""
This method computes and returns the perturbed input for each input tensor.
Deriving classes are responsible for implementing its logic accordingly.
Specific adversarial attack algorithms that extend this class take relevant
arguments.
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which adversarial attack
is computed. It can be provided as a single tensor or
a tuple of multiple tensors. If multiple input tensors
are provided, the batch sizes must be aligned across all
tensors.
Returns:
- **perturbed inputs** (*Tensor* or *tuple[Tensor, ...]*):
Perturbed input for each
input tensor. The perturbed inputs have the same shape and
dimensionality as the inputs.
If a single tensor is provided as inputs, a single tensor
is returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
"""
def __call__(self, *args, **kwargs):
return self.perturb(*args, **kwargs)
|
#!/usr/bin/env python3
import warnings
from collections import namedtuple
from typing import (
Any,
Callable,
cast,
Dict,
Generic,
List,
NamedTuple,
Optional,
Tuple,
TypeVar,
Union,
)
from captum._utils.common import (
_expand_additional_forward_args,
_format_additional_forward_args,
_reduce_list,
)
from captum.attr import Max, Mean, Min, Summarizer
from captum.log import log_usage
from captum.robust._core.perturbation import Perturbation
from torch import Tensor
ORIGINAL_KEY = "Original"
MetricResultType = TypeVar(
"MetricResultType", float, Tensor, Tuple[Union[float, Tensor], ...]
)
class AttackInfo(NamedTuple):
attack_fn: Union[Perturbation, Callable]
name: str
num_attempts: int
apply_before_preproc: bool
attack_kwargs: Dict[str, Any]
additional_args: List[str]
def agg_metric(inp):
if isinstance(inp, Tensor):
return inp.mean(dim=0)
elif isinstance(inp, tuple):
return tuple(agg_metric(elem) for elem in inp)
return inp
class AttackComparator(Generic[MetricResultType]):
r"""
Allows measuring model robustness for a given attack or set of attacks. This class
can be used with any metric(s) as well as any set of attacks, either based on
attacks / perturbations from captum.robust such as FGSM or PGD or external
augmentation methods or perturbations such as torchvision transforms.
"""
def __init__(
self,
forward_func: Callable,
metric: Callable[..., MetricResultType],
preproc_fn: Optional[Callable] = None,
) -> None:
r"""
Args:
forward_func (Callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of a model's forward
function.
metric (Callable): This function is applied to the model output in
order to compute the desired performance metric or metrics.
This function should have the following signature::
>>> def model_metric(model_out: Tensor, **kwargs: Any)
>>> -> Union[float, Tensor, Tuple[Union[float, Tensor], ...]:
All kwargs provided to evaluate are provided to the metric function,
following the model output. A single metric can be returned as
a float or tensor, and multiple metrics should be returned as either
a tuple or named tuple of floats or tensors. For a tensor metric,
the first dimension should match the batch size, corresponding to
metrics for each example. Tensor metrics are averaged over the first
dimension when aggregating multiple batch results.
If tensor metrics represent results for the full batch, the size of the
first dimension should be 1.
preproc_fn (Callable, optional): Optional method applied to inputs. Output
of preproc_fn is then provided as input to model, in addition to
additional_forward_args provided to evaluate.
Default: ``None``
"""
self.forward_func = forward_func
self.metric: Callable = metric
self.preproc_fn = preproc_fn
self.attacks: Dict[str, AttackInfo] = {}
self.summary_results: Dict[str, Summarizer] = {}
self.metric_aggregator = agg_metric
self.batch_stats = [Mean, Min, Max]
self.aggregate_stats = [Mean]
self.summary_results = {}
self.out_format = None
def add_attack(
self,
attack: Union[Perturbation, Callable],
name: Optional[str] = None,
num_attempts: int = 1,
apply_before_preproc: bool = True,
attack_kwargs: Optional[Dict[str, Any]] = None,
additional_attack_arg_names: Optional[List[str]] = None,
) -> None:
r"""
Adds attack to be evaluated when calling evaluate.
Args:
attack (Perturbation or Callable): This can either be an instance
of a Captum Perturbation / Attack
or any other perturbation or attack function such
as a torchvision transform.
name (str, optional): Name or identifier for attack, used as key for
attack results. This defaults to attack.__class__.__name__
if not provided and must be unique for all added attacks.
Default: ``None``
num_attempts (int, optional): Number of attempts that attack should be
repeated. This should only be set to > 1 for non-deterministic
attacks. The minimum, maximum, and average (best, worst, and
average case) are tracked for attack attempts.
Default: ``1``
apply_before_preproc (bool, optional): Defines whether attack should be
applied before or after preproc function.
Default: ``True``
attack_kwargs (dict, optional): Additional arguments to be provided to
given attack. This should be provided as a dictionary of keyword
arguments.
Default: ``None``
additional_attack_arg_names (list[str], optional): Any additional
arguments for the attack which are specific to the particular input
example or batch. An example of this is target, which is necessary
for some attacks such as FGSM or PGD. These arguments are included
if provided as a kwarg to evaluate.
Default: ``None``
"""
if name is None:
name = attack.__class__.__name__
if attack_kwargs is None:
attack_kwargs = {}
if additional_attack_arg_names is None:
additional_attack_arg_names = []
if name in self.attacks:
raise RuntimeError(
"Cannot add attack with same name as existing attack {}".format(name)
)
self.attacks[name] = AttackInfo(
attack_fn=attack,
name=name,
num_attempts=num_attempts,
apply_before_preproc=apply_before_preproc,
attack_kwargs=attack_kwargs,
additional_args=additional_attack_arg_names,
)
def _format_summary(
self, summary: Union[Dict, List[Dict]]
) -> Dict[str, MetricResultType]:
r"""
This method reformats a given summary; particularly for tuples,
the Summarizer's summary format is a list of dictionaries,
each containing the summary for the corresponding elements.
We reformat this to return a dictionary with tuples containing
the summary results.
"""
if isinstance(summary, dict):
return summary
else:
summary_dict: Dict[str, Tuple] = {}
for key in summary[0]:
summary_dict[key] = tuple(s[key] for s in summary)
if self.out_format:
summary_dict[key] = self.out_format(*summary_dict[key])
return summary_dict # type: ignore
def _update_out_format(
self, out_metric: Union[float, Tensor, Tuple[Union[float, Tensor], ...]]
) -> None:
if (
not self.out_format
and isinstance(out_metric, tuple)
and hasattr(out_metric, "_fields")
):
self.out_format = namedtuple( # type: ignore
type(out_metric).__name__, cast(NamedTuple, out_metric)._fields
)
def _evaluate_batch(
self,
input_list: List[Any],
additional_forward_args: Optional[Tuple],
key_list: List[str],
batch_summarizers: Dict[str, Summarizer],
metric_kwargs: Dict[str, Any],
) -> None:
if additional_forward_args is None:
additional_forward_args = ()
if len(input_list) == 1:
model_out = self.forward_func(input_list[0], *additional_forward_args)
out_metric = self.metric(model_out, **metric_kwargs)
self._update_out_format(out_metric)
batch_summarizers[key_list[0]].update(out_metric)
else:
batched_inps = _reduce_list(input_list)
model_out = self.forward_func(batched_inps, *additional_forward_args)
current_count = 0
for i in range(len(input_list)):
batch_size = (
input_list[i].shape[0]
if isinstance(input_list[i], Tensor)
else input_list[i][0].shape[0]
)
out_metric = self.metric(
model_out[current_count : current_count + batch_size],
**metric_kwargs,
)
self._update_out_format(out_metric)
batch_summarizers[key_list[i]].update(out_metric)
current_count += batch_size
@log_usage()
def evaluate(
self,
inputs: Any,
additional_forward_args: Any = None,
perturbations_per_eval: int = 1,
**kwargs,
) -> Dict[str, Union[MetricResultType, Dict[str, MetricResultType]]]:
r"""
Evaluate model and attack performance on provided inputs
Args:
inputs (Any): Input for which attack metrics
are computed. It can be provided as a tensor, tuple of tensors,
or any raw input type (e.g. PIL image or text string).
This input is provided directly as input to preproc function as well
as any attack applied before preprocessing. If no pre-processing
function is provided, this input is provided directly to the main
model and all attacks.
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the preprocessing
outputs (or inputs if preproc_fn is None), this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. For all other types,
the given argument is used for all forward evaluations.
Default: ``None``
perturbations_per_eval (int, optional): Allows perturbations of multiple
attacks to be grouped and evaluated in one call of forward_fn
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
In order to apply this functionality, the output of preproc_fn
(or inputs itself if no preproc_fn is provided) must be a tensor
or tuple of tensors.
Default: ``1``
kwargs (Any, optional): Additional keyword arguments provided to metric
function as well as selected attacks based on chosen additional_args.
Default: ``None``
Returns:
- **attack results** Dict: str -> Dict[str, Union[Tensor, Tuple[Tensor, ...]]]:
Dictionary containing attack results for provided batch.
Maps attack name to dictionary,
containing best-case, worst-case and average-case results for attack.
Dictionary contains keys "mean", "max" and "min" when num_attempts > 1
and only "mean" for num_attempts = 1, which contains the (single) metric
result for the attack attempt.
An additional key of 'Original' is included with metric results
without any perturbations.
Examples::
>>> def accuracy_metric(model_out: Tensor, targets: Tensor):
>>> return torch.argmax(model_out, dim=1) == targets).float()
>>> attack_metric = AttackComparator(model=resnet18,
metric=accuracy_metric,
preproc_fn=normalize)
>>> random_rotation = transforms.RandomRotation()
>>> jitter = transforms.ColorJitter()
>>> attack_metric.add_attack(random_rotation, "Random Rotation",
>>> num_attempts = 5)
>>> attack_metric.add_attack((jitter, "Jitter", num_attempts = 1)
>>> attack_metric.add_attack(FGSM(resnet18), "FGSM 0.1", num_attempts = 1,
>>> apply_before_preproc=False,
>>> attack_kwargs={epsilon: 0.1},
>>> additional_args=["targets"])
>>> for images, labels in dataloader:
>>> batch_results = attack_metric.evaluate(inputs=images, targets=labels)
"""
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
expanded_additional_args = (
_expand_additional_forward_args(
additional_forward_args, perturbations_per_eval
)
if perturbations_per_eval > 1
else additional_forward_args
)
preproc_input = None
if self.preproc_fn is not None:
preproc_input = self.preproc_fn(inputs)
else:
preproc_input = inputs
input_list = [preproc_input]
key_list = [ORIGINAL_KEY]
batch_summarizers = {ORIGINAL_KEY: Summarizer([Mean()])}
if ORIGINAL_KEY not in self.summary_results:
self.summary_results[ORIGINAL_KEY] = Summarizer(
[stat() for stat in self.aggregate_stats]
)
def _check_and_evaluate(input_list, key_list):
if len(input_list) == perturbations_per_eval:
self._evaluate_batch(
input_list,
expanded_additional_args,
key_list,
batch_summarizers,
kwargs,
)
return [], []
return input_list, key_list
input_list, key_list = _check_and_evaluate(input_list, key_list)
for attack_key in self.attacks:
attack = self.attacks[attack_key]
if attack.num_attempts > 1:
stats = [stat() for stat in self.batch_stats]
else:
stats = [Mean()]
batch_summarizers[attack.name] = Summarizer(stats)
additional_attack_args = {}
for key in attack.additional_args:
if key not in kwargs:
warnings.warn(
f"Additional sample arg {key} not provided for {attack_key}"
)
else:
additional_attack_args[key] = kwargs[key]
for _ in range(attack.num_attempts):
if attack.apply_before_preproc:
attacked_inp = attack.attack_fn(
inputs, **additional_attack_args, **attack.attack_kwargs
)
preproc_attacked_inp = (
self.preproc_fn(attacked_inp)
if self.preproc_fn
else attacked_inp
)
else:
preproc_attacked_inp = attack.attack_fn(
preproc_input, **additional_attack_args, **attack.attack_kwargs
)
input_list.append(preproc_attacked_inp)
key_list.append(attack.name)
input_list, key_list = _check_and_evaluate(input_list, key_list)
if len(input_list) > 0:
final_add_args = _expand_additional_forward_args(
additional_forward_args, len(input_list)
)
self._evaluate_batch(
input_list, final_add_args, key_list, batch_summarizers, kwargs
)
return self._parse_and_update_results(batch_summarizers)
def _parse_and_update_results(
self, batch_summarizers: Dict[str, Summarizer]
) -> Dict[str, Union[MetricResultType, Dict[str, MetricResultType]]]:
results: Dict[str, Union[MetricResultType, Dict[str, MetricResultType]]] = {
ORIGINAL_KEY: self._format_summary(
cast(Union[Dict, List], batch_summarizers[ORIGINAL_KEY].summary)
)["mean"]
}
self.summary_results[ORIGINAL_KEY].update(
self.metric_aggregator(results[ORIGINAL_KEY])
)
for attack_key in self.attacks:
attack = self.attacks[attack_key]
attack_results = self._format_summary(
cast(Union[Dict, List], batch_summarizers[attack.name].summary)
)
results[attack.name] = attack_results
if len(attack_results) == 1:
key = next(iter(attack_results))
if attack.name not in self.summary_results:
self.summary_results[attack.name] = Summarizer(
[stat() for stat in self.aggregate_stats]
)
self.summary_results[attack.name].update(
self.metric_aggregator(attack_results[key])
)
else:
for key in attack_results:
summary_key = f"{attack.name} {key.title()} Attempt"
if summary_key not in self.summary_results:
self.summary_results[summary_key] = Summarizer(
[stat() for stat in self.aggregate_stats]
)
self.summary_results[summary_key].update(
self.metric_aggregator(attack_results[key])
)
return results
def summary(self) -> Dict[str, Dict[str, MetricResultType]]:
r"""
Returns average results over all previous batches evaluated.
Returns:
- **summary** Dict: str -> Dict[str, Union[Tensor, Tuple[Tensor, ...]]]:
Dictionary containing summarized average attack results.
Maps attack name (with "Mean Attempt", "Max Attempt" and "Min Attempt"
suffixes if num_attempts > 1) to dictionary containing a key of "mean"
maintaining summarized results,
which is the running mean of results over all batches
since construction or previous reset call. Tensor metrics are averaged
over dimension 0 for each batch, in order to aggregte metrics collected
per batch.
"""
return {
key: self._format_summary(
cast(Union[Dict, List], self.summary_results[key].summary)
)
for key in self.summary_results
}
def reset(self) -> None:
r"""
Reset stored average summary results for previous batches
"""
self.summary_results = {}
|
#!/usr/bin/env python3
import math
from enum import Enum
from typing import Any, Callable, cast, Dict, Generator, List, Optional, Tuple, Union
import torch
from captum._utils.common import (
_expand_additional_forward_args,
_format_additional_forward_args,
_reduce_list,
)
from captum._utils.typing import TargetType
from captum.log import log_usage
from captum.robust._core.perturbation import Perturbation
from torch import Tensor
def drange(
min_val: Union[int, float], max_val: Union[int, float], step_val: Union[int, float]
) -> Generator[Union[int, float], None, None]:
curr = min_val
while curr < max_val:
yield curr
curr += step_val
def default_correct_fn(model_out: Tensor, target: TargetType) -> bool:
assert (
isinstance(model_out, Tensor) and model_out.ndim == 2
), "Model output must be a 2D tensor to use default correct function;"
" otherwise custom correct function must be provided"
target_tensor = torch.tensor(target) if not isinstance(target, Tensor) else target
return all(torch.argmax(model_out, dim=1) == target_tensor)
class MinParamPerturbationMode(Enum):
LINEAR = 0
BINARY = 1
class MinParamPerturbation:
def __init__(
self,
forward_func: Callable,
attack: Union[Callable, Perturbation],
arg_name: str,
arg_min: Union[int, float],
arg_max: Union[int, float],
arg_step: Union[int, float],
mode: str = "linear",
num_attempts: int = 1,
preproc_fn: Optional[Callable] = None,
apply_before_preproc: bool = False,
correct_fn: Optional[Callable] = None,
) -> None:
r"""
Identifies minimal perturbation based on target variable which causes
misclassification (or other incorrect prediction) of target input.
More specifically, given a perturbation parametrized by a single value
(e.g. rotation by angle or mask percentage of top features based on
attribution results), MinParamPerturbation helps identify the minimum value
which leads to misclassification (or other model output change) with the
corresponding perturbed input.
Args:
forward_func (Callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of a model's forward
function.
attack (Perturbation or Callable): This can either be an instance
of a Captum Perturbation / Attack
or any other perturbation or attack function such
as a torchvision transform.
Perturb function must take additional argument (var_name) used for
minimal perturbation search.
arg_name (str): Name of argument / variable paramterizing attack, must be
kwarg of attack. Examples are num_dropout or stdevs
arg_min (int, float): Minimum value of target variable
arg_max (int, float): Maximum value of target variable
(not included in range)
arg_step (int, float): Minimum interval for increase of target variable.
mode (str, optional): Mode for search of minimum attack value;
either ``linear`` for linear search on variable, or ``binary`` for
binary search of variable
Default: ``linear``
num_attempts (int, optional): Number of attempts or trials with
given variable. This should only be set to > 1 for non-deterministic
perturbation / attack functions
Default: ``1``
preproc_fn (Callable, optional): Optional method applied to inputs. Output
of preproc_fn is then provided as input to model, in addition to
additional_forward_args provided to evaluate.
Default: ``None``
apply_before_preproc (bool, optional): Defines whether attack should be
applied before or after preproc function.
Default: ``False``
correct_fn (Callable, optional): This determines whether the perturbed input
leads to a correct or incorrect prediction. By default, this function
is set to the standard classification test for correctness
(comparing argmax of output with target), which requires model output to
be a 2D tensor, returning True if all batch examples are correct and
false otherwise. Setting this method allows
any custom behavior defining whether the perturbation is successful
at fooling the model. For non-classification use cases, a custom
function must be provided which determines correctness.
The first argument to this function must be the model out;
any additional arguments should be provided through
``correct_fn_kwargs``.
This function should have the following signature::
def correct_fn(model_out: Tensor, **kwargs: Any) -> bool
Method should return a boolean if correct (True) and incorrect (False).
Default: ``None`` (applies standard correct_fn for classification)
"""
self.forward_func = forward_func
self.attack = attack
self.arg_name = arg_name
self.arg_min = arg_min
self.arg_max = arg_max
self.arg_step = arg_step
assert self.arg_max > (
self.arg_min + self.arg_step
), "Step size cannot be smaller than range between min and max"
self.num_attempts = num_attempts
self.preproc_fn = preproc_fn
self.apply_before_preproc = apply_before_preproc
self.correct_fn = cast(
Callable, correct_fn if correct_fn is not None else default_correct_fn
)
assert (
mode.upper() in MinParamPerturbationMode.__members__
), f"Provided perturb mode {mode} is not valid - must be linear or binary"
self.mode = MinParamPerturbationMode[mode.upper()]
def _evaluate_batch(
self,
input_list: List,
additional_forward_args: Any,
correct_fn_kwargs: Optional[Dict[str, Any]],
target: TargetType,
) -> Optional[int]:
if additional_forward_args is None:
additional_forward_args = ()
all_kwargs = {}
if target is not None:
all_kwargs["target"] = target
if correct_fn_kwargs is not None:
all_kwargs.update(correct_fn_kwargs)
if len(input_list) == 1:
model_out = self.forward_func(input_list[0], *additional_forward_args)
out_metric = self.correct_fn(model_out, **all_kwargs)
return 0 if not out_metric else None
else:
batched_inps = _reduce_list(input_list)
model_out = self.forward_func(batched_inps, *additional_forward_args)
current_count = 0
for i in range(len(input_list)):
batch_size = (
input_list[i].shape[0]
if isinstance(input_list[i], Tensor)
else input_list[i][0].shape[0]
)
out_metric = self.correct_fn(
model_out[current_count : current_count + batch_size], **all_kwargs
)
if not out_metric:
return i
current_count += batch_size
return None
def _apply_attack(
self,
inputs: Any,
preproc_input: Any,
attack_kwargs: Optional[Dict[str, Any]],
param: Union[int, float],
) -> Tuple[Any, Any]:
if attack_kwargs is None:
attack_kwargs = {}
if self.apply_before_preproc:
attacked_inp = self.attack(
inputs, **attack_kwargs, **{self.arg_name: param}
)
preproc_attacked_inp = (
self.preproc_fn(attacked_inp) if self.preproc_fn else attacked_inp
)
else:
attacked_inp = self.attack(
preproc_input, **attack_kwargs, **{self.arg_name: param}
)
preproc_attacked_inp = attacked_inp
return preproc_attacked_inp, attacked_inp
def _linear_search(
self,
inputs: Any,
preproc_input: Any,
attack_kwargs: Optional[Dict[str, Any]],
additional_forward_args: Any,
expanded_additional_args: Any,
correct_fn_kwargs: Optional[Dict[str, Any]],
target: TargetType,
perturbations_per_eval: int,
) -> Tuple[Any, Optional[Union[int, float]]]:
input_list = []
attack_inp_list = []
param_list = []
for param in drange(self.arg_min, self.arg_max, self.arg_step):
for _ in range(self.num_attempts):
preproc_attacked_inp, attacked_inp = self._apply_attack(
inputs, preproc_input, attack_kwargs, param
)
input_list.append(preproc_attacked_inp)
param_list.append(param)
attack_inp_list.append(attacked_inp)
if len(input_list) == perturbations_per_eval:
successful_ind = self._evaluate_batch(
input_list,
expanded_additional_args,
correct_fn_kwargs,
target,
)
if successful_ind is not None:
return (
attack_inp_list[successful_ind],
param_list[successful_ind],
)
input_list = []
param_list = []
attack_inp_list = []
if len(input_list) > 0:
final_add_args = _expand_additional_forward_args(
additional_forward_args, len(input_list)
)
successful_ind = self._evaluate_batch(
input_list,
final_add_args,
correct_fn_kwargs,
target,
)
if successful_ind is not None:
return (
attack_inp_list[successful_ind],
param_list[successful_ind],
)
return None, None
def _binary_search(
self,
inputs: Any,
preproc_input: Any,
attack_kwargs: Optional[Dict[str, Any]],
additional_forward_args: Any,
expanded_additional_args: Any,
correct_fn_kwargs: Optional[Dict[str, Any]],
target: TargetType,
perturbations_per_eval: int,
) -> Tuple[Any, Optional[Union[int, float]]]:
min_range = self.arg_min
max_range = self.arg_max
min_so_far = None
min_input = None
while max_range > min_range:
mid_step = ((max_range - min_range) // self.arg_step) // 2
if mid_step == 0 and min_range + self.arg_step < max_range:
mid_step = 1
mid = min_range + (mid_step * self.arg_step)
input_list = []
param_list = []
attack_inp_list = []
attack_success = False
for i in range(self.num_attempts):
preproc_attacked_inp, attacked_inp = self._apply_attack(
inputs, preproc_input, attack_kwargs, mid
)
input_list.append(preproc_attacked_inp)
param_list.append(mid)
attack_inp_list.append(attacked_inp)
if len(input_list) == perturbations_per_eval or i == (
self.num_attempts - 1
):
additional_args = expanded_additional_args
if len(input_list) != perturbations_per_eval:
additional_args = _expand_additional_forward_args(
additional_forward_args, len(input_list)
)
successful_ind = self._evaluate_batch(
input_list,
additional_args,
correct_fn_kwargs,
target,
)
if successful_ind is not None:
attack_success = True
max_range = mid
if min_so_far is None or min_so_far > mid:
min_so_far = mid
min_input = attack_inp_list[successful_ind]
break
input_list = []
param_list = []
attack_inp_list = []
if math.isclose(min_range, mid):
break
if not attack_success:
min_range = mid
return min_input, min_so_far
@log_usage()
def evaluate(
self,
inputs: Any,
additional_forward_args: Optional[Tuple] = None,
target: TargetType = None,
perturbations_per_eval: int = 1,
attack_kwargs: Optional[Dict[str, Any]] = None,
correct_fn_kwargs: Optional[Dict[str, Any]] = None,
) -> Tuple[Any, Optional[Union[int, float]]]:
r"""
This method evaluates the model at each perturbed input and identifies
the minimum perturbation that leads to an incorrect model prediction.
It is recommended to provide a single input (batch size = 1) when using
this to identify a minimal perturbation for the chosen example. If a
batch of examples is provided, the default correct function identifies
the minimal perturbation for at least 1 example in the batch to be
misclassified. A custom correct_fn can be provided to customize
this behavior and define correctness for the batch.
Args:
inputs (Any): Input for which minimal perturbation
is computed. It can be provided as a tensor, tuple of tensors,
or any raw input type (e.g. PIL image or text string).
This input is provided directly as input to preproc function
as well as any attack applied before preprocessing. If no
pre-processing function is provided,
this input is provided directly to the main model and all attacks.
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the preprocessing
outputs (or inputs if preproc_fn is None), this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. For all other types,
the given argument is used for all forward evaluations.
Default: ``None``
target (TargetType): Target class for classification. This is required if
using the default ``correct_fn``.
perturbations_per_eval (int, optional): Allows perturbations of multiple
attacks to be grouped and evaluated in one call of forward_fn
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
In order to apply this functionality, the output of preproc_fn
(or inputs itself if no preproc_fn is provided) must be a tensor
or tuple of tensors.
Default: ``1``
attack_kwargs (dict, optional): Optional dictionary of keyword
arguments provided to attack function
correct_fn_kwargs (dict, optional): Optional dictionary of keyword
arguments provided to correct function
Returns:
Tuple of (perturbed_inputs, param_val) if successful
else Tuple of (None, None)
- **perturbed inputs** (Any):
Perturbed input (output of attack) which results in incorrect
prediction.
- param_val (int, float)
Param value leading to perturbed inputs causing misclassification
Examples::
>>> def gaussian_noise(inp: Tensor, std: float) -> Tensor:
>>> return inp + std*torch.randn_like(inp)
>>> min_pert = MinParamPerturbation(forward_func=resnet18,
attack=gaussian_noise,
arg_name="std",
arg_min=0.0,
arg_max=2.0,
arg_step=0.01,
)
>>> for images, labels in dataloader:
>>> noised_image, min_std = min_pert.evaluate(inputs=images, target=labels)
"""
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
expanded_additional_args = (
_expand_additional_forward_args(
additional_forward_args, perturbations_per_eval
)
if perturbations_per_eval > 1
else additional_forward_args
)
preproc_input = inputs if not self.preproc_fn else self.preproc_fn(inputs)
if self.mode is MinParamPerturbationMode.LINEAR:
search_fn = self._linear_search
elif self.mode is MinParamPerturbationMode.BINARY:
search_fn = self._binary_search
else:
raise NotImplementedError(
"Chosen MinParamPerturbationMode is not supported!"
)
return search_fn(
inputs,
preproc_input,
attack_kwargs,
additional_forward_args,
expanded_additional_args,
correct_fn_kwargs,
target,
perturbations_per_eval,
)
|
#!/usr/bin/env python3
from captum.influence._core.influence import DataInfluence # noqa
from captum.influence._core.similarity_influence import SimilarityInfluence # noqa
from captum.influence._core.tracincp import TracInCP, TracInCPBase # noqa
from captum.influence._core.tracincp_fast_rand_proj import (
TracInCPFast,
TracInCPFastRandProj,
) # noqa
__all__ = [
"DataInfluence",
"SimilarityInfluence",
"TracInCPBase",
"TracInCP",
"TracInCPFast",
"TracInCPFastRandProj",
]
|
from abc import ABC, abstractmethod
from typing import Tuple
import torch
from torch import Tensor
class NearestNeighbors(ABC):
r"""
An abstract class to define a nearest neighbors data structure. Classes
implementing this interface are intended for computing proponents / opponents in
certain implementations of `TracInCPBase`. In particular, it is for use in
implementations which compute proponents / opponents of a test instance by
1) storing representations of training instances within a nearest neighbors data
structure, and 2) finding within that structure the nearest neighbor of the
representation of a test instance. The assumption is that the data structure
stores the tensors passed to the `setup` method, which we refer to as the "stored
tensors". If this class is used to find proponents / opponents, the nearest
neighbors of a tensor should be the stored tensors that have the largest
dot-product with the query.
"""
@abstractmethod
def get_nearest_neighbors(
self, query: torch.Tensor, k: int
) -> Tuple[Tensor, Tensor]:
r"""
Given a `query`, a tensor of shape (N, *), returns the nearest neighbors in the
"stored tensors" (see above). `query` represents a batch of N tensors, each
of common but arbitrary shape *. We always assume the 0-th dimension indexes
the batch. In use cases of this class for computing proponents / opponents,
the nearest neighbors of a tensor should be the stored tensors with the largest
dot-product with the tensor, and the tensors in `query` will all be 1D,
so that `query` is 2D.
Args:
query (Tensor): tensor representing the batch of tensors for which k-nearest
neighbors are desired. `query` is of shape (N, *), where N is the
size of the batch, i.e. the 0-th dimension of `query` indexes the
batch. * denotes an arbitrary shape, so that each tensor in the
batch can be of a common, but arbitrary shape.
k (int): The number of nearest neighbors to return.
Returns:
results (tuple): A tuple of `(indices, distances)` is returned. `indices`
is a 2D tensor where `indices[i,j]` is the index (within the
"stored tensors" passed to the `setup` method) of the `j`-th
nearest neighbor of the `i`-th instance in query, and
`distances[i,j]` is the corresponding distance. `indices` should
be of dtype `torch.long` so that it can be used to index torch
tensors.
"""
pass
@abstractmethod
def setup(self, data: torch.Tensor) -> None:
r"""
`data` denotes the "stored tensors". These are the tensors within which we
want to find the nearest neighbors to each tensor in a batch of tensors, via a
call to the`get_nearest_neighbors` method. Before we can call it, however,
we need to first store the stored tensors, by doing processing that indexes
the stored tensors in a form that enables nearest-neighbors computation.
This method does that preprocessing, and is assumed to be called before any
call to `get_nearest_neighbors`. For example, this method might put the
stored tensors in a K-d tree. The tensors in the "stored tensors" can be of a
common, but arbitrary shape, denoted *, so that `data` is of shape (N, *),
where N is the number of tensors in the stored tensors. Therefore, the 0-th
dimension indexes the tensors in the stored tensors.
Args:
data (Tensor): A tensor of shape (N, *) representing the stored tensors.
The 0-th dimension indexes the tensors in the stored tensors,
so that `data[i]` is the tensor with index `i`. The nearest
neighbors of a query will be referred to by their index.
"""
pass
class AnnoyNearestNeighbors(NearestNeighbors):
"""
This is an implementation of `NearestNeighbors` that uses the Annoy module. At a
high level, Annoy finds nearest neighbors by constructing binary trees in which
vectors reside at leaf nodes. Vectors near each other will tend to be in the same
leaf node. See https://tinyurl.com/2p89sb2h and https://github.com/spotify/annoy
for more details. Annoy has 1 key parameter: the number of trees to construct.
Increasing the number of trees leads to more accurate results, but longer time to
create the trees and memory usage. As mentioned in the `NearestNeighbors`
documentation, for the use case of computing proponents / opponents, the nearest
neighbors returned should be those with the largest dot product with the query
vector. The term "vector" is used here because Annoy stores 1D vectors. However
in our wrapper around Annoy, we will allow the stored tensors to be of a common
but arbitrary shape *, and flatten them before storing in the Annoy data structure.
"""
def __init__(self, num_trees: int = 10) -> None:
"""
Args:
num_trees (int): The number of trees to use. Increasing this number gives
more accurate computation of nearest neighbors, but requires longer
setup time to create the trees, as well as memory.
"""
try:
import annoy # noqa
except ImportError:
raise ValueError(
(
"Using `AnnoyNearestNeighbors` requires installing the annoy "
"module. If pip is installed, this can be done with "
"`pip install --user annoy`."
)
)
self.num_trees = num_trees
def setup(self, data: torch.Tensor) -> None:
"""
`data` denotes the "stored tensors". These are the tensors within which we
want to find the nearest neighbors to a query tensor, via a call to the
`get_nearest_neighbors` method. Before we can call `get_nearest_neighbors`,
we need to first store the stored tensors, by doing processing that indexes
the stored tensors in a form that enables nearest-neighbors computation.
This method does that preprocessing, and is assumed to be called before any
call to `get_nearest_neighbors`. In particular, it creates the trees used to
index the stored tensors. This index is built to enable computation of
vectors that have the largest dot-product with the query tensors. The tensors
in the "stored tensors" can be of a common, but arbitrary shape, denoted *, so
that `data` is of shape (N, *), where N is the number of tensors in the stored
tensors. Therefore, the 0-th dimension indexes the tensors in the stored
tensors.
Args:
data (Tensor): A tensor of shape (N, *) representing the stored tensors.
The 0-th dimension indexes the tensors in the stored tensors,
so that `data[i]` is the tensor with index `i`. The nearest
neighbors of a query will be referred to by their index.
"""
import annoy
data = data.view((len(data), -1))
projection_dim = data.shape[1]
self.knn_index = annoy.AnnoyIndex(projection_dim, "dot")
for (i, projection) in enumerate(data):
self.knn_index.add_item(i, projection)
self.knn_index.build(self.num_trees)
def get_nearest_neighbors(
self, query: torch.Tensor, k: int
) -> Tuple[Tensor, Tensor]:
r"""
Given a `query`, a tensor of shape (N, *), returns the nearest neighbors in the
"stored tensors" (see above). `query` represents a batch of N tensors, each
of common but arbitrary shape *. We always assume the 0-th dimension indexes
the batch. In use cases of this class for computing proponents / opponents,
the nearest neighbors of a tensor should be the stored tensors with the largest
dot-product with the tensor, and the tensors in `query` will all be 1D,
so that `query` is 2D. This implementation returns the stored tensors
that have the largest dot-product with the query tensor, and does not constrain
the tensors in `query` or in the stored tensors to be 1D. If tensors are of
dimension greater than 1D, their dot-product will be defined to be the
dot-product of the flattened version of tensors.
Args:
query (Tensor): tensor representing the batch of tensors for which k-nearest
neighbors are desired. `query` is of shape (N, *), where N is the
size of the batch, i.e. the 0-th dimension of `query` indexes the
batch. * denotes an arbitrary shape, so that each tensor in the
batch can be of a common, but arbitrary shape.
k (int): The number of nearest neighbors to return.
Returns:
results (tuple): A tuple of `(indices, distances)` is returned. `indices`
is a 2D tensor where `indices[i,j]` is the index (within the
"stored tensors" passed to the `setup` method) of the `j`-th
nearest neighbor of the `i`-th instance in query, and
`distances[i,j]` is the corresponding distance. `indices` should
be of dtype `torch.long` so that it can be used to index torch
tensors.
"""
query = query.view((len(query), -1))
indices_and_distances = [
self.knn_index.get_nns_by_vector(instance, k, include_distances=True)
for instance in query
]
indices, distances = zip(*indices_and_distances)
indices = torch.Tensor(indices).type(torch.long)
distances = torch.Tensor(distances)
return indices, distances
|
#!/usr/bin/env python3
import warnings
from typing import Any, Callable, List, Optional, Tuple, TYPE_CHECKING, Union
import torch
import torch.nn as nn
from captum._utils.common import _parse_version
from captum._utils.progress import progress
if TYPE_CHECKING:
from captum.influence._core.tracincp import TracInCPBase
from torch import Tensor
from torch.nn import Module
from torch.utils.data import DataLoader, Dataset
def _tensor_batch_dot(t1: Tensor, t2: Tensor) -> Tensor:
r"""
Computes pairwise dot product between two tensors
Args:
Tensors t1 and t2 are feature vectors with dimension (batch_size_1, *) and
(batch_size_2, *). The * dimensions must match in total number of elements.
Returns:
Tensor with shape (batch_size_1, batch_size_2) containing the pairwise dot
products. For example, Tensor[i][j] would be the dot product between
t1[i] and t2[j].
"""
msg = (
"Please ensure each batch member has the same feature dimension. "
f"First input has {torch.numel(t1) / t1.shape[0]} features, and "
f"second input has {torch.numel(t2) / t2.shape[0]} features."
)
assert torch.numel(t1) / t1.shape[0] == torch.numel(t2) / t2.shape[0], msg
return torch.mm(
t1.view(t1.shape[0], -1),
t2.view(t2.shape[0], -1).T,
)
def _gradient_dot_product(
input_grads: Tuple[Tensor], src_grads: Tuple[Tensor]
) -> Tensor:
r"""
Computes the dot product between the gradient vector for a model on an input batch
and src batch, for each pairwise batch member. Gradients are passed in as a tuple
corresponding to the trainable parameters returned by model.parameters(). Output
corresponds to a tensor of size (inputs_batch_size, src_batch_size) with all
pairwise dot products.
"""
assert len(input_grads) == len(src_grads), "Mismatching gradient parameters."
iterator = zip(input_grads, src_grads)
total = _tensor_batch_dot(*next(iterator))
for input_grad, src_grad in iterator:
total += _tensor_batch_dot(input_grad, src_grad)
return total
def _jacobian_loss_wrt_inputs(
loss_fn: Union[Module, Callable],
out: Tensor,
targets: Tensor,
vectorize: bool,
reduction_type: str,
) -> Tensor:
r"""
Often, we have a loss function that computes a per-sample loss given a 1D tensor
input, and we want to calculate the jacobian of the loss w.r.t. that input. For
example, the input could be a length K tensor specifying the probability a given
sample belongs to each of K possible classes, and the loss function could be
cross-entropy loss. This function performs that calculation, but does so for a
*batch* of inputs. We create this helper function for two reasons: 1) to handle
differences between Pytorch versiosn for vectorized jacobian calculations, and
2) this function does not accept the aforementioned per-sample loss function.
Instead, it accepts a "reduction" loss function that *reduces* the per-sample loss
for a batch into a single loss. Using a "reduction" loss improves speed.
We will allow this reduction to either be the mean or sum of the per-sample losses,
and this function provides an uniform way to handle different possible reductions,
and also check if the reduction used is valid. Regardless of the reduction used,
this function returns the jacobian for the per-sample loss (for each sample in the
batch).
Args:
loss_fn (torch.nn.Module, Callable, or None): The loss function. If a library
defined loss function is provided, it would be expected to be a
torch.nn.Module. If a custom loss is provided, it can be either type,
but must behave as a library loss function would if `reduction='sum'`
or `reduction='mean'`.
out (Tensor): This is a tensor that represents the batch of inputs to
`loss_fn`. In practice, this will be the output of a model; this is
why this argument is named `out`. `out` is a 2D tensor of shape
(batch size, model output dimensionality). We will call `loss_fn` via
`loss_fn(out, targets)`.
targets (Tensor): The labels for the batch of inputs.
vectorize (bool): Flag to use experimental vectorize functionality for
`torch.autograd.functional.jacobian`.
reduction_type (str): The type of reduction used by `loss_fn`. If `loss_fn`
has the "reduction" attribute, we will check that they match. Can
only be "mean" or "sum".
Returns:
jacobians (Tensor): Returns the jacobian of the per-sample loss (implicitly
defined by `loss_fn` and `reduction_type`) w.r.t each sample
in the batch represented by `out`. This is a 2D tensor, where the
first dimension is the batch dimension.
"""
# TODO: allow loss_fn to be Callable
if isinstance(loss_fn, Module) and hasattr(loss_fn, "reduction"):
msg0 = "Please ensure that loss_fn.reduction is set to `sum` or `mean`"
assert loss_fn.reduction != "none", msg0
msg1 = (
f"loss_fn.reduction ({loss_fn.reduction}) does not match"
f"reduction type ({reduction_type}). Please ensure they are"
" matching."
)
assert loss_fn.reduction == reduction_type, msg1
if reduction_type != "sum" and reduction_type != "mean":
raise ValueError(
f"{reduction_type} is not a valid value for reduction_type. "
"Must be either 'sum' or 'mean'."
)
if _parse_version(torch.__version__) >= (1, 8, 0):
input_jacobians = torch.autograd.functional.jacobian(
lambda out: loss_fn(out, targets), out, vectorize=vectorize
)
else:
input_jacobians = torch.autograd.functional.jacobian(
lambda out: loss_fn(out, targets), out
)
if reduction_type == "mean":
input_jacobians = input_jacobians * len(input_jacobians)
return input_jacobians
def _load_flexible_state_dict(model: Module, path: str) -> float:
r"""
Helper to load pytorch models. This function attempts to find compatibility for
loading models that were trained on different devices / with DataParallel but are
being loaded in a different environment.
Assumes that the model has been saved as a state_dict in some capacity. This can
either be a single state dict, or a nesting dictionary which contains the model
state_dict and other information.
Args:
model (torch.nn.Module): The model for which to load a checkpoint
path (str): The filepath to the checkpoint
The module state_dict is modified in-place, and the learning rate is returned.
"""
checkpoint = torch.load(path)
learning_rate = checkpoint.get("learning_rate", 1.0)
# can get learning rate from optimizer state_dict?
if "module." in next(iter(checkpoint)):
if isinstance(model, nn.DataParallel):
model.load_state_dict(checkpoint)
else:
model = nn.DataParallel(model)
model.load_state_dict(checkpoint)
model = model.module
else:
if isinstance(model, nn.DataParallel):
model = model.module
model.load_state_dict(checkpoint)
model = nn.DataParallel(model)
else:
model.load_state_dict(checkpoint)
return learning_rate
def _get_k_most_influential_helper(
influence_src_dataloader: DataLoader,
influence_batch_fn: Callable,
inputs: Tuple[Any, ...],
k: int = 5,
proponents: bool = True,
show_progress: bool = False,
desc: Optional[str] = None,
) -> Tuple[Tensor, Tensor]:
r"""
Helper function that computes the quantities returned by
`TracInCPBase._get_k_most_influential`, using a specific implementation that is
constant memory.
Args:
influence_src_dataloader (DataLoader): The DataLoader, representing training
data, for which we want to compute proponents / opponents.
influence_batch_fn (Callable): A callable that will be called via
`influence_batch_fn(inputs, batch)`, where `batch` is a batch
in the `influence_src_dataloader` argument.
inputs (tuple[Any, ...]): This argument represents the test batch, and is a
single tuple of any, where the last element is assumed to be the labels
for the batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any.
k (int, optional): The number of proponents or opponents to return per test
instance.
Default: 5
proponents (bool, optional): Whether seeking proponents (`proponents=True`)
or opponents (`proponents=False`)
Default: True
show_progress (bool, optional): To compute the proponents (or opponents)
for the batch of examples, we perform computation for each batch in
training dataset `influence_src_dataloader`, If `show_progress` is
true, the progress of this computation will be displayed. In
particular, the number of batches for which the computation has
been performed will be displayed. It will try to use tqdm if
available for advanced features (e.g. time estimation). Otherwise,
it will fallback to a simple output of progress.
Default: False
desc (str, optional): If `show_progress` is true, this is the description to
show when displaying progress. If `desc` is none, no description is
shown.
Default: None
Returns:
(indices, influence_scores): `indices` is a torch.long Tensor that contains the
indices of the proponents (or opponents) for each test example. Its
dimension is `(inputs_batch_size, k)`, where `inputs_batch_size` is the
number of examples in `inputs`. For example, if `proponents==True`,
`indices[i][j]` is the index of the example in training dataset
`influence_src_dataloader` with the k-th highest influence score for
the j-th example in `inputs`. `indices` is a `torch.long` tensor so that
it can directly be used to index other tensors. Each row of
`influence_scores` contains the influence scores for a different test
example, in sorted order. In particular, `influence_scores[i][j]` is
the influence score of example `indices[i][j]` in training dataset
`influence_src_dataloader` on example `i` in the test batch represented
by `inputs` and `targets`.
"""
# For each test instance, maintain the best indices and corresponding distances
# initially, these will be empty
topk_indices = torch.Tensor().long()
topk_tracin_scores = torch.Tensor()
multiplier = 1.0 if proponents else -1.0
# needed to map from relative index in a batch fo index within entire `dataloader`
num_instances_processed = 0
# if show_progress, create progress bar
total: Optional[int] = None
if show_progress:
try:
total = len(influence_src_dataloader)
except AttributeError:
pass
influence_src_dataloader = progress(
influence_src_dataloader,
desc=desc,
total=total,
)
for batch in influence_src_dataloader:
# calculate tracin_scores for the batch
batch_tracin_scores = influence_batch_fn(inputs, batch)
batch_tracin_scores *= multiplier
# get the top-k indices and tracin_scores for the batch
batch_size = batch_tracin_scores.shape[1]
batch_topk_tracin_scores, batch_topk_indices = torch.topk(
batch_tracin_scores, min(batch_size, k), dim=1
)
batch_topk_indices = batch_topk_indices + num_instances_processed
num_instances_processed += batch_size
# combine the top-k for the batch with those for previously seen batches
topk_indices = torch.cat(
[topk_indices.to(batch_topk_indices.device), batch_topk_indices], dim=1
)
topk_tracin_scores = torch.cat(
[
topk_tracin_scores.to(batch_topk_tracin_scores.device),
batch_topk_tracin_scores,
],
dim=1,
)
# retain only the top-k in terms of tracin_scores
topk_tracin_scores, topk_argsort = torch.topk(
topk_tracin_scores, min(k, topk_indices.shape[1]), dim=1
)
topk_indices = torch.gather(topk_indices, dim=1, index=topk_argsort)
# if seeking opponents, we were actually keeping track of negative tracin_scores
topk_tracin_scores *= multiplier
return topk_indices, topk_tracin_scores
class _DatasetFromList(Dataset):
def __init__(self, _l: List[Any]) -> None:
self._l = _l
def __getitem__(self, i: int) -> Any:
return self._l[i]
def __len__(self) -> int:
return len(self._l)
def _format_inputs_dataset(inputs_dataset: Union[Tuple[Any, ...], DataLoader]):
# if `inputs_dataset` is not a `DataLoader`, turn it into one.
# `_DatasetFromList` turns a list into a `Dataset` where `__getitem__`
# returns an element in the list, and using it to construct a `DataLoader`
# with `batch_size=None` gives a `DataLoader` that yields a single batch.
if not isinstance(inputs_dataset, DataLoader):
inputs_dataset = DataLoader(
_DatasetFromList([inputs_dataset]), shuffle=False, batch_size=None
)
return inputs_dataset
def _self_influence_by_batches_helper(
self_influence_batch_fn: Callable,
instance_name: str,
inputs_dataset: Union[Tuple[Any, ...], DataLoader],
show_progress: bool = False,
) -> Tensor:
"""
Computes self influence scores for the examples in `inputs_dataset`, which is
either a single batch or a Pytorch `DataLoader` that yields batches. The self
influence scores for a single batch are computed using the
`self_influence_batch_fn` input. Note that if `inputs_dataset` is a single batch,
this will call `model` on that single batch, where `model` is the model used to
compute self influence scores by `self_influence_batch_fn`, and if `inputs_dataset`
yields batches, this will call `model` on each batch that is yielded. Therefore,
please ensure that for both cases, the batch(es) that `model` is called
with are not too large, so that there will not be an out-of-memory error. This
implementation performs an outer iteration over all batches that
`inputs_dataset` represents, and an inner iteration over checkpoints. The pros
of this implementation are that showing the progress of the computation is
straightforward.
Args:
self_influence_batch_fn (Callable): This is the function that computes self
influence scores for a single batch.
instance_name (str): This is the name of the implementation class that
`self_influence_batch_fn` is a method of. This is used for displaying
warning messages.
batches (tuple or DataLoader): Either a single tuple of any, or a
`DataLoader`, where each batch yielded is a tuple of any. In
either case, the tuple represents a single batch, where the last
element is assumed to be the labels for the batch. That is,
`model(*batch[0:-1])` produces the output for `model`,
and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset`. Please see documentation for the
`train_dataset` argument to `TracInCP.__init__` for
more details on the assumed structure of a batch.
show_progress (bool, optional): Computation of self influence scores can
take a long time if `inputs_dataset` represents many examples. If
`show_progress`is true, the progress of this computation will be
displayed. In particular, the number of batches for which self
influence scores have been computed will be displayed. It will try
to use tqdm if available for advanced features (e.g. time
estimation). Otherwise, it will fallback to a simple output of
progress.
Default: False
Returns:
self_influence_scores (Tensor): This is a 1D tensor containing the self
influence scores of all examples in `inputs_dataset`, regardless of
whether it represents a single batch or a `DataLoader` that yields
batches.
"""
# If `inputs_dataset` is not a `DataLoader`, turn it into one.
inputs_dataset = _format_inputs_dataset(inputs_dataset)
# If `show_progress` is true, create a progress bar that keeps track of how
# many batches have been processed
if show_progress:
# First, try to determine length of progress bar if possible, with a
# default of `None`
inputs_dataset_len = None
try:
inputs_dataset_len = len(inputs_dataset)
except TypeError:
warnings.warn(
"Unable to determine the number of batches in `inputs_dataset`. "
"Therefore, if showing the progress of the computation of self "
"influence scores, only the number of batches processed can be "
"displayed, and not the percentage completion of the computation, "
"nor any time estimates."
)
# then create the progress bar
inputs_dataset = progress(
inputs_dataset,
desc=f"Using {instance_name} to compute self influence. Processing batch",
total=inputs_dataset_len,
)
# To compute self influence scores for each batch, we use
# `_self_influence_by_checkpoints`, which can accept a tuple representing a
# single batch as the `inputs_dataset` argument (as well as a DataLoader).
# Because we are already displaying progress in terms of number of batches
# processed in this method, we will not show progress for the call to
# `_self_influence_by_checkpoints`.
return torch.cat(
[
self_influence_batch_fn(batch, show_progress=False)
for batch in inputs_dataset
]
)
def _check_loss_fn(
influence_instance: "TracInCPBase",
loss_fn: Optional[Union[Module, Callable]],
loss_fn_name: str,
sample_wise_grads_per_batch: Optional[bool] = None,
) -> str:
"""
This checks whether `loss_fn` satisfies the requirements assumed of all
implementations of `TracInCPBase`. It works regardless of whether the
implementation has the `sample_wise_grads_per_batch` attribute.
It returns the reduction type of the loss_fn. If `sample_wise_grads_per_batch`
if not provided, we assume the implementation does not have that attribute.
"""
# if `loss_fn` is `None`, there is nothing to check. then, the reduction type is
# only used by `_compute_jacobian_wrt_params_with_sample_wise_trick`, where
# reduction type should be "sum" if `loss_fn` is `None`.
if loss_fn is None:
return "sum"
# perhaps since `Module` is an implementation of `Callable`, this has redundancy
assert isinstance(loss_fn, Module) or callable(loss_fn)
reduction_type = "none"
# If we are able to access the reduction used by `loss_fn`, we check whether
# the reduction is compatible with `sample_wise_grads_per_batch`, if it has the
# attribute.
if hasattr(loss_fn, "reduction"):
reduction = loss_fn.reduction # type: ignore
if sample_wise_grads_per_batch is None:
assert reduction in [
"sum",
"mean",
], 'reduction for `loss_fn` must be "sum" or "mean"'
reduction_type = str(reduction)
elif sample_wise_grads_per_batch:
assert reduction in ["sum", "mean"], (
'reduction for `loss_fn` must be "sum" or "mean" when '
"`sample_wise_grads_per_batch` is True"
)
reduction_type = str(reduction)
else:
assert reduction == "none", (
'reduction for `loss_fn` must be "none" when '
"`sample_wise_grads_per_batch` is False"
)
else:
# if we are unable to access the reduction used by `loss_fn`, we warn
# the user about the assumptions we are making regarding the reduction
# used by `loss_fn`
if sample_wise_grads_per_batch is None:
warnings.warn(
f'Since `{loss_fn_name}` has no "reduction" attribute, the '
f'implementation assumes that `{loss_fn_name}` is a "reduction" loss '
"function that reduces the per-example losses by taking their *sum*. "
f"If `{loss_fn_name}` instead reduces the per-example losses by "
f"taking their mean, please set the reduction attribute of "
f'`{loss_fn_name}` to "mean", i.e. '
f'`{loss_fn_name}.reduction = "mean"`.'
)
reduction_type = "sum"
elif sample_wise_grads_per_batch:
warnings.warn(
f"Since `{loss_fn_name}`` has no 'reduction' attribute, and "
"`sample_wise_grads_per_batch` is True, the implementation assumes "
f"that `{loss_fn_name}` is a 'reduction' loss function that reduces "
f"the per-example losses by taking their *sum*. If `{loss_fn_name}` "
"instead reduces the per-example losses by taking their mean, "
f'please set the reduction attribute of `{loss_fn_name}` to "mean", '
f'i.e. `{loss_fn_name}.reduction = "mean"`. Note that if '
"`sample_wise_grads_per_batch` is True, the implementation "
"assumes the reduction is either a sum or mean reduction."
)
reduction_type = "sum"
else:
warnings.warn(
f'Since `{loss_fn_name}` has no "reduction" attribute, and '
"`sample_wise_grads_per_batch` is False, the implementation "
f'assumes that `{loss_fn_name}` is a "per-example" loss function (see '
f"documentation for `{loss_fn_name}` for details). Please ensure "
"that this is the case."
)
return reduction_type
|
#!/usr/bin/env python3
import glob
import warnings
from abc import abstractmethod
from os.path import join
from typing import (
Any,
Callable,
Iterator,
List,
NamedTuple,
Optional,
Tuple,
Type,
Union,
)
import torch
from captum._utils.av import AV
from captum._utils.common import _get_module_from_name, _parse_version
from captum._utils.gradient import (
_compute_jacobian_wrt_params,
_compute_jacobian_wrt_params_with_sample_wise_trick,
)
from captum._utils.progress import NullProgress, progress
from captum.influence._core.influence import DataInfluence
from captum.influence._utils.common import (
_check_loss_fn,
_format_inputs_dataset,
_get_k_most_influential_helper,
_gradient_dot_product,
_load_flexible_state_dict,
_self_influence_by_batches_helper,
)
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
from torch.utils.data import DataLoader, Dataset
r"""
Note: methods starting with "_" are protected, not private, and can be overridden in
child classes. They are not part of the API.
Implements abstract DataInfluence class and provides implementation details for
influence computation based on the logic provided in TracIn paper
(https://arxiv.org/abs/2002.08484).
The TracIn paper proposes an idealized notion of influence which can be represented by
the total amount a training example reduces loss for a test example via a training
process such as stochastic gradient descent. As this idealized notion of influence is
impractical to compute, the TracIn paper proposes instead to compute an influence
score, which uses a first-order approximation for the change in loss for a test example
by a training example, which is accumulated across saved model checkpoints. This
influence score is accumulated via a summed dot-product of gradient vectors for the
scores/loss of a test and training example.
"""
"""
TODO: Support for checkpoint type. Currently only supports model parameters as saved
checkpoints. Can use enum or string.
Potential implementation from design doc:
checkpoint_type (Enum = [Parameters | Loss_Grad]): For performance,
saved / loaded checkpoints can be either model parameters, or
gradient of the loss function on an input w.r.t parameters.
"""
class KMostInfluentialResults(NamedTuple):
"""
This namedtuple stores the results of using the `influence` method. This method
is implemented by all subclasses of `TracInCPBase` to calculate
proponents / opponents. The `indices` field stores the indices of the
proponents / opponents for each example in the test dataset. For example, if
finding opponents, `indices[i][j]` stores the index in the training data of the
example with the `j`-th highest influence score on the `i`-th example in the test
dataset. Similarly, the `influence_scores` field stores the actual influence scores,
so that `influence_scores[i][j]` is the influence score of example `indices[i][j]`
in the training data on example `i` of the test dataset. Please see
`TracInCPBase.influence` for more details.
"""
indices: Tensor
influence_scores: Tensor
class TracInCPBase(DataInfluence):
"""
To implement the `influence` method, classes inheriting from `TracInCPBase` will
separately implement the private `_self_influence`, `_get_k_most_influential`,
and `_influence` methods. The public `influence` method is a wrapper for these
private methods.
"""
def __init__(
self,
model: Module,
train_dataset: Union[Dataset, DataLoader],
checkpoints: Union[str, List[str], Iterator],
checkpoints_load_func: Callable = _load_flexible_state_dict,
loss_fn: Optional[Union[Module, Callable]] = None,
batch_size: Union[int, None] = 1,
test_loss_fn: Optional[Union[Module, Callable]] = None,
) -> None:
r"""
Args:
model (torch.nn.Module): An instance of pytorch model. This model should
define all of its layers as attributes of the model.
train_dataset (torch.utils.data.Dataset or torch.utils.data.DataLoader):
In the `influence` method, we compute the influence score of
training examples on examples in a test batch.
This argument represents the training dataset containing those
training examples. In order to compute those influence scores, we
will create a Pytorch DataLoader yielding batches of training
examples that is then used for processing. If this argument is
already a Pytorch Dataloader, that DataLoader can be directly
used for processing. If it is instead a Pytorch Dataset, we will
create a DataLoader using it, with batch size specified by
`batch_size`. For efficiency purposes, the batch size of the
DataLoader used for processing should be as large as possible, but
not too large, so that certain intermediate quantities created
from a batch still fit in memory. Therefore, if
`train_dataset` is a Dataset, `batch_size` should be large.
If `train_dataset` was already a DataLoader to begin with,
it should have been constructed to have a large batch size. It is
assumed that the Dataloader (regardless of whether it is created
from a Pytorch Dataset or not) yields tuples. For a `batch` that is
yielded, of length `L`, it is assumed that the forward function of
`model` accepts `L-1` arguments, and the last element of `batch` is
the label. In other words, `model(*batch[:-1])` gives the output of
`model`, and `batch[-1]` are the labels for the batch.
checkpoints (str, list[str], or Iterator): Either the directory of the
path to store and retrieve model checkpoints, a list of
filepaths with checkpoints from which to load, or an iterator which
returns objects from which to load checkpoints.
checkpoints_load_func (Callable, optional): The function to load a saved
checkpoint into a model to update its parameters, and get the
learning rate if it is saved. By default uses a utility to load a
model saved as a state dict.
Default: _load_flexible_state_dict
loss_fn (Callable, optional): The loss function applied to model.
Default: None
batch_size (int or None, optional): Batch size of the DataLoader created to
iterate through `train_dataset`, if it is a Dataset.
`batch_size` should be chosen as large as possible so that certain
intermediate quantities created from a batch still fit in memory.
Specific implementations of `TracInCPBase` will detail the size of
the intermediate quantities. `batch_size` must be an int if
`train_dataset` is a Dataset. If `train_dataset`
is a DataLoader, then `batch_size` is ignored as an argument.
Default: 1
test_loss_fn (Callable, optional): In some cases, one may want to use a
separate loss functions for training examples, i.e. those in
`train_dataset`, and for test examples, i.e. those
represented by the `inputs` and `targets` arguments to the
`influence` method. For example, if one wants to calculate the
influence score of a training example on a test example's
prediction for a fixed class, `test_loss_fn` could map from the
logits for all classes to the logits for a fixed class.
`test_loss_fn` needs to satisfy the same constraints as `loss_fn`.
If not provided, the loss function for test examples is assumed to
be the same as the loss function for training examples, i.e.
`loss_fn`.
Default: None
"""
self.model = model
if isinstance(checkpoints, str):
self.checkpoints = AV.sort_files(glob.glob(join(checkpoints, "*")))
elif isinstance(checkpoints, List) and isinstance(checkpoints[0], str):
self.checkpoints = AV.sort_files(checkpoints)
else:
self.checkpoints = list(checkpoints) # cast to avoid mypy error
if isinstance(self.checkpoints, List):
assert len(self.checkpoints) > 0, "No checkpoints saved!"
self.checkpoints_load_func = checkpoints_load_func
self.loss_fn = loss_fn
# If test_loss_fn not provided, it's assumed to be same as loss_fn
self.test_loss_fn = loss_fn if test_loss_fn is None else test_loss_fn
self.batch_size = batch_size
if not isinstance(train_dataset, DataLoader):
assert isinstance(batch_size, int), (
"since the `train_dataset` argument was a `Dataset`, "
"`batch_size` must be an int."
)
self.train_dataloader = DataLoader(train_dataset, batch_size, shuffle=False)
else:
self.train_dataloader = train_dataset
self.train_dataloader_len: Optional[int] = None
try:
# since we will calculate the number of batches in
# `self.train_dataloader` whenever we use progress bar, calculate
# it once in initialization, for re-use.
self.train_dataloader_len = len(self.train_dataloader)
except TypeError:
warnings.warn(
"Unable to determine the number of batches in training dataset "
"`train_dataset`. Therefore, if showing the progress of computations, "
"only the number of batches processed can be displayed, and not the "
"percentage completion of the computation, nor any time estimates."
)
@abstractmethod
def self_influence(
self,
inputs: Optional[Union[Tuple[Any, ...], DataLoader]] = None,
show_progress: bool = False,
) -> Tensor:
"""
If `inputs` is not specified calculates the self influence
scores for the training dataset `train_dataset`. Otherwise, computes
self influence scores for the examples in `inputs`,
which is either a single batch or a Pytorch `DataLoader` that yields
batches. Therefore, in this case, the computed self influence scores
are *not* for the examples in training dataset `train_dataset`.
Note that if `inputs` is a single batch, this
will call `model` on that single batch, and if `inputs` yields
batches, this will call `model` on each batch that is yielded. Therefore,
please ensure that for both cases, the batch(es) that `model` is called
with are not too large, so that there will not be an out-of-memory error.
Args:
inputs (tuple or DataLoader, optional): This specifies the
dataset for which self influence scores will be computed.
Either a single tuple of any, or a `DataLoader`, where each
batch yielded is a tuple of type any. In either case, the tuple
represents a single batch, where the last element is assumed to
be the labels for the batch. That is, `model(*batch[0:-1])`
produces the output for `model`, and `batch[-1]` are the labels,
if any. This is the same assumption made for each batch yielded
by training dataset `train_dataset`. Please see documentation for
the `train_dataset` argument to `TracInCP.__init__` for
more details on the assumed structure of a batch. If not provided
or `None`, self influence scores will be computed for training
dataset `train_dataset`, which yields batches satisfying the
above assumptions.
Default: None.
show_progress (bool, optional): Computation of self influence scores can
take a long time if `inputs` represents many examples. If
`show_progress` is true, the progress of this computation will be
displayed. In more detail, this computation will iterate over all
checkpoints (provided as the `checkpoints` initialization argument)
in an outer loop, and iterate over all batches that
`inputs` represents in an inner loop. Therefore, the
total number of (checkpoint, batch) combinations that need to be
iterated over is
(# of checkpoints x # of batches that `inputs` represents).
If `show_progress` is True, the total progress of both the outer
iteration over checkpoints and the inner iteration over batches is
displayed. It will try to use tqdm if available for advanced
features (e.g. time estimation). Otherwise, it will fallback to a
simple output of progress.
Default: False
Returns:
self_influence_scores (Tensor): This is a 1D tensor containing the self
influence scores of all examples in `inputs`, regardless of
whether it represents a single batch or a `DataLoader` that yields
batches.
"""
pass
@abstractmethod
def _get_k_most_influential(
self,
inputs: Tuple[Any, ...],
k: int = 5,
proponents: bool = True,
show_progress: bool = False,
) -> KMostInfluentialResults:
r"""
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
k (int, optional): The number of proponents or opponents to return per test
example.
Default: 5
proponents (bool, optional): Whether seeking proponents (`proponents=True`)
or opponents (`proponents=False`)
Default: True
show_progress (bool, optional): To compute the proponents (or opponents)
for the batch of examples, we perform computation for each batch in
training dataset `train_dataset`, If `show_progress` is
true, the progress of this computation will be displayed. In
particular, the number of batches for which the computation has
been performed will be displayed. It will try to use tqdm if
available for advanced features (e.g. time estimation). Otherwise,
it will fallback to a simple output of progress.
Default: False
Returns:
(indices, influence_scores) (namedtuple): `indices` is a torch.long Tensor
that contains the indices of the proponents (or opponents) for each
test example. Its dimension is `(inputs_batch_size, k)`, where
`inputs_batch_size` is the number of examples in `inputs`. For
example, if `proponents==True`, `indices[i][j]` is the index of the
example in training dataset `train_dataset` with the
k-th highest influence score for the j-th example in `inputs`.
`indices` is a `torch.long` tensor so that it can directly be used
to index other tensors. Each row of `influence_scores` contains the
influence scores for a different test example, in sorted order. In
particular, `influence_scores[i][j]` is the influence score of
example `indices[i][j]` in training dataset `train_dataset`
on example `i` in the test batch represented by `inputs`.
"""
pass
@abstractmethod
def _influence(
self,
inputs: Tuple[Any, ...],
show_progress: bool = False,
) -> Tensor:
r"""
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
show_progress (bool, optional): To compute the influence of examples in
training dataset `train_dataset`, we compute the influence
of each batch. If `show_progress` is true, the progress of this
computation will be displayed. In particular, the number of batches
for which influence has been computed will be displayed. It will
try to use tqdm if available for advanced features (e.g. time
estimation). Otherwise, it will fallback to a simple output of
progress.
Default: False
Returns:
influence_scores (Tensor): Influence scores over the entire
training dataset `train_dataset`. Dimensionality is
(inputs_batch_size, src_dataset_size). For example:
influence_scores[i][j] = the influence score for the j-th training
example to the i-th example in the test batch.
"""
pass
@abstractmethod
def influence( # type: ignore[override]
self,
inputs: Tuple[Any, ...],
k: Optional[int] = None,
proponents: bool = True,
unpack_inputs: bool = True,
show_progress: bool = False,
) -> Union[Tensor, KMostInfluentialResults]:
r"""
This is the key method of this class, and can be run in 2 different modes,
where the mode that is run depends on the arguments passed to this method:
- influence score mode: This mode is used if `k` is None. This mode computes
the influence score of every example in training dataset `train_dataset`
on every example in the test batch represented by `inputs`.
- k-most influential mode: This mode is used if `k` is not None, and an int.
This mode computes the proponents or opponents of every example in the
test batch represented by `inputs`. In particular, for each test example in
the test batch, this mode computes its proponents (resp. opponents),
which are the indices in the training dataset `train_dataset` of the
training examples with the `k` highest (resp. lowest) influence scores on the
test example. Proponents are computed if `proponents` is True. Otherwise,
opponents are computed. For each test example, this method also returns the
actual influence score of each proponent (resp. opponent) on the test
example.
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
k (int, optional): If not provided or `None`, the influence score mode will
be run. Otherwise, the k-most influential mode will be run,
and `k` is the number of proponents / opponents to return per
example in the test batch.
Default: None
proponents (bool, optional): Whether seeking proponents (`proponents=True`)
or opponents (`proponents=False`), if running in k-most influential
mode.
Default: True
show_progress (bool, optional): For all modes, computation of results
requires "training dataset computations": computations for each
batch in the training dataset `train_dataset`, which may
take a long time. If `show_progress` is true, the progress of
"training dataset computations" will be displayed. In particular,
the number of batches for which computations have been performed
will be displayed. It will try to use tqdm if available for
advanced features (e.g. time estimation). Otherwise, it will
fallback to a simple output of progress.
Default: False
Returns:
The return value of this method depends on which mode is run.
- influence score mode: if this mode is run (`k` is None), returns a 2D
tensor `influence_scores` of shape `(input_size, train_dataset_size)`,
where `input_size` is the number of examples in the test dataset, and
`train_dataset_size` is the number of examples in training dataset
`train_dataset`. In other words, `influence_scores[i][j]` is the
influence score of the `j`-th example in `train_dataset` on the `i`-th
example in the test batch.
- k-most influential mode: if this mode is run (`k` is an int), returns
a namedtuple `(indices, influence_scores)`. `indices` is a 2D tensor of
shape `(input_size, k)`, where `input_size` is the number of examples in
the test batch. If computing proponents (resp. opponents),
`indices[i][j]` is the index in training dataset `train_dataset` of the
example with the `j`-th highest (resp. lowest) influence score (out of
the examples in `train_dataset`) on the `i`-th example in the test
dataset. `influence_scores` contains the corresponding influence scores.
In particular, `influence_scores[i][j]` is the influence score of example
`indices[i][j]` in `train_dataset` on example `i` in the test batch
represented by `inputs`.
"""
pass
@classmethod
def get_name(cls: Type["TracInCPBase"]) -> str:
r"""
Create readable class name. Due to the nature of the names of `TracInCPBase`
subclasses, simplies returns the class name. For example, for a class called
TracInCP, we return the string TracInCP.
Returns:
name (str): a readable class name
"""
return cls.__name__
def _influence_route_to_helpers(
influence_instance: TracInCPBase,
inputs: Tuple[Any, ...],
k: Optional[int] = None,
proponents: bool = True,
**kwargs,
) -> Union[Tensor, KMostInfluentialResults]:
"""
This is a helper function called by `TracInCP.influence` and
`TracInCPFast.influence`. Those methods share a common logic in that they assume
an instance of their respective classes implement 2 private methods
(``_influence`, `_get_k_most_influential`), and the logic of
which private method to call is common, as described in the documentation of the
`influence` method. The arguments and return values of this function are the exact
same as the `influence` method. Note that `influence_instance` refers to the
instance for which the `influence` method was called.
"""
if k is None:
return influence_instance._influence(inputs, **kwargs)
else:
return influence_instance._get_k_most_influential(
inputs,
k,
proponents,
**kwargs,
)
class TracInCP(TracInCPBase):
def __init__(
self,
model: Module,
train_dataset: Union[Dataset, DataLoader],
checkpoints: Union[str, List[str], Iterator],
checkpoints_load_func: Callable = _load_flexible_state_dict,
layers: Optional[List[str]] = None,
loss_fn: Optional[Union[Module, Callable]] = None,
batch_size: Union[int, None] = 1,
test_loss_fn: Optional[Union[Module, Callable]] = None,
sample_wise_grads_per_batch: bool = False,
) -> None:
r"""
Args:
model (torch.nn.Module): An instance of pytorch model. This model should
define all of its layers as attributes of the model.
train_dataset (torch.utils.data.Dataset or torch.utils.data.DataLoader):
In the `influence` method, we compute the influence score of
training examples on examples in a test batch.
This argument represents the training dataset containing those
training examples. In order to compute those influence scores, we
will create a Pytorch DataLoader yielding batches of training
examples that is then used for processing. If this argument is
already a Pytorch Dataloader, that DataLoader can be directly
used for processing. If it is instead a Pytorch Dataset, we will
create a DataLoader using it, with batch size specified by
`batch_size`. For efficiency purposes, the batch size of the
DataLoader used for processing should be as large as possible, but
not too large, so that certain intermediate quantities created
from a batch still fit in memory. Therefore, if
`train_dataset` is a Dataset, `batch_size` should be large.
If `train_dataset` was already a DataLoader to begin with,
it should have been constructed to have a large batch size. It is
assumed that the Dataloader (regardless of whether it is created
from a Pytorch Dataset or not) yields tuples. For a `batch` that is
yielded, of length `L`, it is assumed that the forward function of
`model` accepts `L-1` arguments, and the last element of `batch` is
the label. In other words, `model(*batch[:-1])` gives the output of
`model`, and `batch[-1]` are the labels for the batch.
checkpoints (str, list[str], or Iterator): Either the directory of the
path to store and retrieve model checkpoints, a list of
filepaths with checkpoints from which to load, or an iterator which
returns objects from which to load checkpoints.
checkpoints_load_func (Callable, optional): The function to load a saved
checkpoint into a model to update its parameters, and get the
learning rate if it is saved. By default uses a utility to load a
model saved as a state dict.
Default: _load_flexible_state_dict
layers (list[str] or None, optional): A list of layer names for which
gradients should be computed. If `layers` is None, gradients will
be computed for all layers. Otherwise, they will only be computed
for the layers specified in `layers`.
Default: None
loss_fn (Callable, optional): The loss function applied to model. There
are two options for the return type of `loss_fn`. First, `loss_fn`
can be a "per-example" loss function - returns a 1D Tensor of
losses for each example in a batch. `nn.BCELoss(reduction="none")`
would be an "per-example" loss function. Second, `loss_fn` can be
a "reduction" loss function that reduces the per-example losses,
in a batch, and returns a single scalar Tensor. For this option,
the reduction must be the *sum* or the *mean* of the per-example
losses. For instance, `nn.BCELoss(reduction="sum")` is acceptable.
Note for the first option, the `sample_wise_grads_per_batch`
argument must be False, and for the second option,
`sample_wise_grads_per_batch` must be True. Also note that for
the second option, if `loss_fn` has no "reduction" attribute,
the implementation assumes that the reduction is the *sum* of the
per-example losses. If this is not the case, i.e. the reduction
is the *mean*, please set the "reduction" attribute of `loss_fn`
to "mean", i.e. `loss_fn.reduction = "mean"`.
Default: None
batch_size (int or None, optional): Batch size of the DataLoader created to
iterate through `train_dataset`, if it is a Dataset.
`batch_size` should be chosen as large as possible so that certain
intermediate quantities created from a batch still fit in memory.
Specific implementations of `TracInCPBase` will detail the size of
the intermediate quantities. `batch_size` must be an int if
`train_dataset` is a Dataset. If `train_dataset`
is a DataLoader, then `batch_size` is ignored as an argument.
Default: 1
test_loss_fn (Callable, optional): In some cases, one may want to use a
separate loss functions for training examples, i.e. those in
`train_dataset`, and for test examples, i.e. those
represented by the `inputs` and `targets` arguments to the
`influence` method. For example, if one wants to calculate the
influence score of a training example on a test example's
prediction for a fixed class, `test_loss_fn` could map from the
logits for all classes to the logits for a fixed class.
`test_loss_fn` needs satisfy the same constraints as `loss_fn`.
Thus, the same checks that we apply to `loss_fn` are also applied
to `test_loss_fn`, if the latter is provided. Note that the
constraints on both `loss_fn` and `test_loss_fn` both depend on
`sample_wise_grads_per_batch`. This means `loss_fn` and
`test_loss_fn` must either both be "per-example" loss functions,
or both be "reduction" loss functions. If not provided, the loss
function for test examples is assumed to be the same as the loss
function for training examples, i.e. `loss_fn`.
Default: None
sample_wise_grads_per_batch (bool, optional): PyTorch's native gradient
computations w.r.t. model parameters aggregates the results for a
batch and does not allow to access sample-wise gradients w.r.t.
model parameters. This forces us to iterate over each sample in
the batch if we want sample-wise gradients which is computationally
inefficient. We offer an implementation of batch-wise gradient
computations w.r.t. to model parameters which is computationally
more efficient. This implementation can be enabled by setting the
`sample_wise_grad_per_batch` argument to `True`, and should be
enabled if and only if the `loss_fn` argument is a "reduction" loss
function. For example, `nn.BCELoss(reduction="sum")` would be a
valid `loss_fn` if this implementation is enabled (see
documentation for `loss_fn` for more details). Note that our
current implementation enables batch-wise gradient computations
only for a limited number of PyTorch nn.Modules: Conv2D and Linear.
This list will be expanded in the near future. Therefore, please
do not enable this implementation if gradients will be computed
for other kinds of layers.
Default: False
"""
TracInCPBase.__init__(
self,
model,
train_dataset,
checkpoints,
checkpoints_load_func,
loss_fn,
batch_size,
test_loss_fn,
)
self.sample_wise_grads_per_batch = sample_wise_grads_per_batch
# check `loss_fn`
self.reduction_type = _check_loss_fn(
self, loss_fn, "loss_fn", sample_wise_grads_per_batch
)
# check `test_loss_fn` if it was provided
self.test_reduction_type = (
self.reduction_type
if test_loss_fn is None
else _check_loss_fn(
self, test_loss_fn, "test_loss_fn", sample_wise_grads_per_batch
)
)
r"""
TODO: Either restore model state after done (would have to place functionality
within influence to restore after every influence call)? or make a copy so that
changes to grad_requires aren't persistent after using TracIn.
"""
self.layer_modules = None
if layers is not None:
assert isinstance(layers, List), "`layers` should be a list!"
assert len(layers) > 0, "`layers` cannot be empty!"
assert isinstance(
layers[0], str
), "`layers` should contain str layer names."
self.layer_modules = [
_get_module_from_name(self.model, layer) for layer in layers
]
for layer, layer_module in zip(layers, self.layer_modules):
for name, param in layer_module.named_parameters():
if not param.requires_grad:
warnings.warn(
"Setting required grads for layer: {}, name: {}".format(
".".join(layer), name
)
)
param.requires_grad = True
@log_usage()
def influence( # type: ignore[override]
self,
inputs: Tuple[Any, ...],
k: Optional[int] = None,
proponents: bool = True,
show_progress: bool = False,
) -> Union[Tensor, KMostInfluentialResults]:
r"""
This is the key method of this class, and can be run in 2 different modes,
where the mode that is run depends on the arguments passed to this method:
- influence score mode: This mode is used if `k` is None. This mode computes
the influence score of every example in training dataset `train_dataset`
on every example in the test batch represented by `inputs`.
- k-most influential mode: This mode is used if `k` is not None, and an int.
This mode computes the proponents or opponents of every example in the
test batch represented by `inputs`. In particular, for each test example in
the test batch, this mode computes its proponents (resp. opponents),
which are the indices in the training dataset `train_dataset` of the
training examples with the `k` highest (resp. lowest) influence scores on the
test example. Proponents are computed if `proponents` is True. Otherwise,
opponents are computed. For each test example, this method also returns the
actual influence score of each proponent (resp. opponent) on the test
example.
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
k (int, optional): If not provided or `None`, the influence score mode will
be run. Otherwise, the k-most influential mode will be run,
and `k` is the number of proponents / opponents to return per
example in the test batch.
Default: None
proponents (bool, optional): Whether seeking proponents (`proponents=True`)
or opponents (`proponents=False`), if running in k-most influential
mode.
Default: True
show_progress (bool, optional): For all modes, computation of results
requires "training dataset computations": computations for each
batch in the training dataset `train_dataset`, which may
take a long time. If `show_progress` is true, the progress of
"training dataset computations" will be displayed. In particular,
the number of batches for which computations have been performed
will be displayed. It will try to use tqdm if available for
advanced features (e.g. time estimation). Otherwise, it will
fallback to a simple output of progress.
Default: False
Returns:
The return value of this method depends on which mode is run.
- influence score mode: if this mode is run (`k` is None), returns a 2D
tensor `influence_scores` of shape `(input_size, train_dataset_size)`,
where `input_size` is the number of examples in the test batch, and
`train_dataset_size` is the number of examples in training dataset
`train_dataset`. In other words, `influence_scores[i][j]` is the
influence score of the `j`-th example in `train_dataset` on the `i`-th
example in the test batch.
- k-most influential mode: if this mode is run (`k` is an int), returns
a namedtuple `(indices, influence_scores)`. `indices` is a 2D tensor of
shape `(input_size, k)`, where `input_size` is the number of examples in
the test batch. If computing proponents (resp. opponents),
`indices[i][j]` is the index in training dataset `train_dataset` of the
example with the `j`-th highest (resp. lowest) influence score (out of
the examples in `train_dataset`) on the `i`-th example in the test
batch. `influence_scores` contains the corresponding influence scores.
In particular, `influence_scores[i][j]` is the influence score of example
`indices[i][j]` in `train_dataset` on example `i` in the test batch
represented by `inputs`.
"""
assert inputs is not None, (
"`inputs` argument is required."
"If you wish to calculate self influence scores,"
" please use the `self_influence` method instead."
)
return _influence_route_to_helpers(
self,
inputs,
k,
proponents,
show_progress=show_progress,
)
def _sum_jacobians(
self,
inputs: DataLoader,
loss_fn: Optional[Union[Module, Callable]] = None,
reduction_type: Optional[str] = None,
):
"""
sums the jacobians of all examples in `inputs`. result is of the
same format as layer_jacobians, but the batch dimension has size 1
"""
inputs_iter = iter(inputs)
inputs_batch = next(inputs_iter)
def get_batch_contribution(inputs_batch):
_input_jacobians = self._basic_computation_tracincp(
inputs_batch[0:-1],
inputs_batch[-1],
loss_fn,
reduction_type,
)
return tuple(
torch.sum(jacobian, dim=0).unsqueeze(0) for jacobian in _input_jacobians
)
inputs_jacobians = get_batch_contribution(inputs_batch)
for inputs_batch in inputs_iter:
inputs_batch_jacobians = get_batch_contribution(inputs_batch)
inputs_jacobians = tuple(
[
inputs_jacobian + inputs_batch_jacobian
for (inputs_jacobian, inputs_batch_jacobian) in zip(
inputs_jacobians, inputs_batch_jacobians
)
]
)
return inputs_jacobians
def _concat_jacobians(
self,
inputs: DataLoader,
loss_fn: Optional[Union[Module, Callable]] = None,
reduction_type: Optional[str] = None,
):
all_inputs_batch_jacobians = [
self._basic_computation_tracincp(
inputs_batch[0:-1],
inputs_batch[-1],
loss_fn,
reduction_type,
)
for inputs_batch in inputs
]
return tuple(
torch.cat(all_inputs_batch_jacobian, dim=0)
for all_inputs_batch_jacobian in zip(*all_inputs_batch_jacobians)
)
@log_usage()
def compute_intermediate_quantities(
self,
inputs: Union[Tuple[Any, ...], DataLoader],
aggregate: bool = False,
) -> Tensor:
"""
Computes "embedding" vectors for all examples in a single batch, or a
`Dataloader` that yields batches. These embedding vectors are constructed so
that the influence score of a training example on a test example is simply the
dot-product of their corresponding vectors. Allowing a `DataLoader`
yielding batches to be passed in (as opposed to a single batch) gives the
potential to improve efficiency, because we load each checkpoint only once in
this method call. Thus if a `DataLoader` yielding batches is passed in, this
reduces the total number of times each checkpoint is loaded for a dataset,
compared to if a single batch is passed in. The reason we do not just increase
the batch size is that for large models, large batches do not fit in memory.
If `aggregate` is True, the *sum* of the vectors for all examples is returned,
instead of the vectors for each example. This can be useful for computing the
influence of a given training example on the total loss over a validation
dataset, because due to properties of the dot-product, this influence is the
dot-product of the training example's vector with the sum of the vectors in the
validation dataset. Also, by doing the sum aggregation within this method as
opposed to outside of it (by computing all vectors for the validation dataset,
then taking the sum) allows memory usage to be reduced.
Args:
inputs (Tuple, or DataLoader): Either a single tuple of any, or a
`DataLoader`, where each batch yielded is a tuple of any. In
either case, the tuple represents a single batch, where the last
element is assumed to be the labels for the batch. That is,
`model(*batch[0:-1])` produces the output for `model`, and
and `batch[-1]` are the labels, if any. Here, `model` is model
provided in initialization. This is the same assumption made for
each batch yielded by training dataset `train_dataset`.
aggregate (bool): Whether to return the sum of the vectors for all
examples, as opposed to vectors for each example.
Returns:
intermediate_quantities (Tensor): A tensor of dimension
(N, D * C). Here, N is the total number of examples in
`inputs` if `aggregate` is False, and 1, otherwise (so that
a 2D tensor is always returned). C is the number of checkpoints
passed as the `checkpoints` argument of `TracInCP.__init__`, and
each row represents the vector for an example. Regarding D: Let I
be the dimension of the output of the last fully-connected layer
times the dimension of the input of the last fully-connected layer.
If `self.projection_dim` is specified in initialization,
D = min(I * C, `self.projection_dim` * C). Otherwise, D = I * C.
In summary, if `self.projection_dim` is None, the dimension of each
vector will be determined by the size of the input and output of
the last fully-connected layer of `model`. Otherwise,
`self.projection_dim` must be an int, and random projection will be
performed to ensure that the vector is of dimension no more than
`self.projection_dim` * C. `self.projection_dim` corresponds to
the variable d in the top of page 15 of the TracIn paper:
https://arxiv.org/pdf/2002.08484.pdf.
"""
# If `inputs` is not a `DataLoader`, turn it into one.
inputs = _format_inputs_dataset(inputs)
def get_checkpoint_contribution(checkpoint):
assert (
checkpoint is not None
), "None returned from `checkpoints`, cannot load."
learning_rate = self.checkpoints_load_func(self.model, checkpoint)
# get jacobians as tuple of tensors
if aggregate:
inputs_jacobians = self._sum_jacobians(
inputs, self.loss_fn, self.reduction_type
)
else:
inputs_jacobians = self._concat_jacobians(
inputs, self.loss_fn, self.reduction_type
)
# flatten into single tensor
return learning_rate * torch.cat(
[
input_jacobian.flatten(start_dim=1)
for input_jacobian in inputs_jacobians
],
dim=1,
)
return torch.cat(
[
get_checkpoint_contribution(checkpoint)
for checkpoint in self.checkpoints
],
dim=1,
)
def _influence_batch_tracincp(
self,
test_batch: Tuple[Any, ...],
train_batch: Tuple[Any, ...],
):
"""
computes influence scores for a single training batch
"""
def get_checkpoint_contribution(checkpoint):
assert (
checkpoint is not None
), "None returned from `checkpoints`, cannot load."
learning_rate = self.checkpoints_load_func(self.model, checkpoint)
input_jacobians = self._basic_computation_tracincp(
test_batch[0:-1],
test_batch[-1],
self.test_loss_fn,
self.test_reduction_type,
)
return (
_gradient_dot_product(
input_jacobians,
self._basic_computation_tracincp(
train_batch[0:-1],
train_batch[-1],
self.loss_fn,
self.reduction_type,
),
)
* learning_rate
)
batch_tracin_scores = get_checkpoint_contribution(self.checkpoints[0])
for checkpoint in self.checkpoints[1:]:
batch_tracin_scores += get_checkpoint_contribution(checkpoint)
return batch_tracin_scores
def _influence(
self,
inputs: Tuple[Any, ...],
show_progress: bool = False,
) -> Tensor:
r"""
Computes the influence of examples in training dataset `train_dataset`
on the examples in the test batch represented by `inputs`.
This implementation does not require knowing the number of training examples
in advance. Instead, the number of training examples is inferred from the
output of `self._basic_computation_tracincp`.
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
show_progress (bool, optional): To compute the influence of examples in
training dataset `train_dataset`, we compute the influence
of each batch. If `show_progress` is true, the progress of this
computation will be displayed. In particular, the number of batches
for which influence has been computed will be displayed. It will
try to use tqdm if available for advanced features (e.g. time
estimation). Otherwise, it will fallback to a simple output of
progress.
Default: False
Returns:
influence_scores (Tensor): Influence scores from the TracInCP method.
Its shape is `(input_size, train_dataset_size)`, where `input_size`
is the number of examples in the test batch, and
`train_dataset_size` is the number of examples in
training dataset `train_dataset`. For example:
`influence_scores[i][j]` is the influence score for the j-th training
example to the i-th example in the test batch.
"""
train_dataloader = self.train_dataloader
if show_progress:
train_dataloader = progress(
train_dataloader,
desc=(
f"Using {self.get_name()} to compute "
"influence for training batches"
),
total=self.train_dataloader_len,
)
return torch.cat(
[
self._influence_batch_tracincp(inputs, batch)
for batch in train_dataloader
],
dim=1,
)
def _get_k_most_influential(
self,
inputs: Tuple[Any, ...],
k: int = 5,
proponents: bool = True,
show_progress: bool = False,
) -> KMostInfluentialResults:
r"""
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
k (int, optional): The number of proponents or opponents to return per test
example.
Default: 5
proponents (bool, optional): Whether seeking proponents (`proponents=True`)
or opponents (`proponents=False`)
Default: True
show_progress (bool, optional): To compute the proponents (or opponents)
for the batch of examples, we perform computation for each batch in
training dataset `train_dataset`, If `show_progress` is
true, the progress of this computation will be displayed. In
particular, the number of batches for which the computation has
been performed will be displayed. It will try to use tqdm if
available for advanced features (e.g. time estimation). Otherwise,
it will fallback to a simple output of progress.
Default: False
Returns:
(indices, influence_scores) (namedtuple): `indices` is a torch.long Tensor
that contains the indices of the proponents (or opponents) for each
test example. Its dimension is `(inputs_batch_size, k)`, where
`inputs_batch_size` is the number of examples in `inputs`. For
example, if `proponents==True`, `indices[i][j]` is the index of the
example in training dataset `train_dataset` with the
k-th highest influence score for the j-th example in `inputs`.
`indices` is a `torch.long` tensor so that it can directly be used
to index other tensors. Each row of `influence_scores` contains the
influence scores for a different test example, in sorted order. In
particular, `influence_scores[i][j]` is the influence score of
example `indices[i][j]` in training dataset `train_dataset`
on example `i` in the test batch represented by `inputs`.
"""
desc = (
None
if not show_progress
else (
(
f"Using {self.get_name()} to perform computation for "
f'getting {"proponents" if proponents else "opponents"}. '
"Processing training batches"
)
)
)
return KMostInfluentialResults(
*_get_k_most_influential_helper(
self.train_dataloader,
self._influence_batch_tracincp,
inputs,
k,
proponents,
show_progress,
desc,
)
)
def _self_influence_by_checkpoints(
self,
inputs: Union[Tuple[Any, ...], DataLoader],
show_progress: bool = False,
) -> Tensor:
"""
Computes self influence scores for the examples in `inputs`, which is
either a single batch or a Pytorch `DataLoader` that yields batches. Therefore,
the computed self influence scores are *not* for the examples in training
dataset `train_dataset` (unlike when computing self influence scores using the
`influence` method). Note that if `inputs` is a single batch, this
will call `model` on that single batch, and if `inputs` yields
batches, this will call `model` on each batch that is yielded. Therefore,
please ensure that for both cases, the batch(es) that `model` is called
with are not too large, so that there will not be an out-of-memory error. This
implementation performs an outer iteration over checkpoints, and an inner
iteration over all batches that `inputs` represents. The pros of this
implementation are that the checkpoints do not need to be loaded too many
times.
Args:
batches (tuple or DataLoader): Either a single tuple of any, or a
`DataLoader`, where each batch yielded is a tuple of any. In
either case, the tuple represents a single batch, where the last
element is assumed to be the labels for the batch. That is,
`model(*batch[0:-1])` produces the output for `model`,
and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset`. Please see documentation for the
`train_dataset` argument to `TracInCP.__init__` for
more details on the assumed structure of a batch.
show_progress (bool, optional): Computation of self influence scores can
take a long time if `inputs` represents many examples. If
`show_progress` is true, the progress of this computation will be
displayed. In more detail, this computation will iterate over all
checkpoints (provided as the `checkpoints` initialization argument)
in an outer loop, and iterate over all batches that
`inputs` represents in an inner loop. Thus if
`show_progress` is True, the progress of both the outer
iteration and the inner iterations will be displayed. To show
progress, it will try to use tqdm if available for advanced
features (e.g. time estimation). Otherwise, it will fallback to a
simple output of progress.
Default: False
Returns:
self_influence_scores (Tensor): This is a 1D tensor containing the self
influence scores of all examples in `inputs`, regardless of
whether it represents a single batch or a `DataLoader` that yields
batches.
"""
# If `inputs` is not a `DataLoader`, turn it into one.
inputs = _format_inputs_dataset(inputs)
# If `show_progress` is true, create an outer progress bar that keeps track of
# how many checkpoints have been processed
if show_progress:
# Try to determine length of inner progress bar if possible, with a default
# of `None`.
inputs_len = None
try:
inputs_len = len(inputs)
except TypeError:
warnings.warn(
"Unable to determine the number of batches in `inputs`. "
"Therefore, if showing the progress of the computation of self "
"influence scores, only the number of batches processed can be "
"displayed, and not the percentage completion of the computation, "
"nor any time estimates."
)
def calculate_via_vector_norm(layer_jacobian):
# Helper to efficiently calculate vector norm if pytorch version permits.
return (
torch.linalg.vector_norm(
layer_jacobian,
dim=list(range(1, len(layer_jacobian.shape))),
)
** 2
)
def calculate_via_flatten(layer_jacobian):
return torch.sum(layer_jacobian.flatten(start_dim=1) ** 2, dim=1)
def get_checkpoint_contribution(checkpoint):
# This function returns a 1D tensor representing the contribution to the
# self influence score for the given checkpoint, for all batches in
# `inputs`. The length of the 1D tensor is the total number of
# examples in `inputs`.
assert (
checkpoint is not None
), "None returned from `checkpoints`, cannot load."
learning_rate = self.checkpoints_load_func(self.model, checkpoint)
# This will store a list of the contribution of the self influence score
# from each batch. Each element is a 1D tensor of length batch_size - the
# batch size of each batch in `inputs` (they do not need to be all
# the same)
checkpoint_contribution = []
_inputs = inputs
# If `show_progress` is true, create an inner progress bar that keeps track
# of how many batches have been processed for the current checkpoint
if show_progress:
_inputs = progress(
inputs,
desc=(
f"Using {self.get_name()} to compute self "
"influence. Processing batch"
),
total=inputs_len,
)
for batch in _inputs:
layer_jacobians = self._basic_computation_tracincp(
batch[0:-1],
batch[-1],
self.loss_fn,
self.reduction_type,
)
# Note that all variables in this function are for an entire batch.
# Each `layer_jacobian` in `layer_jacobians` corresponds to a different
# layer. `layer_jacobian` is the jacobian w.r.t to a given layer's
# parameters. If the given layer's parameters are of shape *, then
# `layer_jacobian` is of shape (batch_size, *). For each layer, we need
# the squared jacobian for each example. So we square the jacobian and
# sum over all dimensions except the 0-th (the batch dimension). We then
# sum the contribution over all layers. For Pytorch > 1.10 we use the
# optimized torch.linalg.vector_norm as opposed to the explicit flatten.
calculate_fn = calculate_via_flatten
if _parse_version(torch.__version__) >= (1, 10, 0):
calculate_fn = calculate_via_vector_norm
checkpoint_contribution.append(
torch.sum(
torch.stack(
[
calculate_fn(layer_jacobian)
for layer_jacobian in layer_jacobians
],
dim=0,
),
dim=0,
)
* learning_rate
)
# We concatenate the contributions from each batch into a single 1D tensor,
# which represents the contributions for all batches in `inputs`
return torch.cat(checkpoint_contribution, dim=0)
if show_progress:
checkpoints_progress = progress(
desc=(
f"Using {self.get_name()} to compute self "
"influence. Processing checkpoint"
),
total=len(self.checkpoints),
mininterval=0.0,
)
else:
checkpoints_progress = NullProgress()
with checkpoints_progress:
batches_self_tracin_scores = get_checkpoint_contribution(
self.checkpoints[0]
)
checkpoints_progress.update()
# The self influence score for all examples is the sum of contributions from
# each checkpoint
for checkpoint in self.checkpoints[1:]:
batches_self_tracin_scores += get_checkpoint_contribution(checkpoint)
checkpoints_progress.update()
return batches_self_tracin_scores
@log_usage()
def self_influence(
self,
inputs: Optional[Union[Tuple[Any, ...], DataLoader]] = None,
show_progress: bool = False,
outer_loop_by_checkpoints: bool = False,
) -> Tensor:
"""
Computes self influence scores for the examples in `inputs`, which is
either a single batch or a Pytorch `DataLoader` that yields batches.
If `inputs` is not specified or `None` calculates self influence
score for the training dataset `train_dataset`. Note that if `inputs`
is a single batch, this will call `model` on that single batch, and if
`inputs` yields batches, this will call `model` on each batch that is
yielded. Therefore, please ensure that for both cases, the batch(es) that
`model` is called with are not too large, so that there will not be an
out-of-memory error.
Internally, this computation requires iterating both over the batches in
`inputs`, as well as different model checkpoints. There are two ways
this iteration can be done. If `outer_loop_by_checkpoints` is False, the outer
iteration will be over batches, and the inner iteration will be over
checkpoints. This has the pro that displaying the progress of the computation
is more intuitive, involving displaying the number of batches for which self
influence scores have been computed. If `outer_loop_by_checkpoints` is True,
the outer iteration will be over checkpoints, and the inner iteration will be
over batches. This has the pro that the checkpoints do not need to be loaded
for each batch. For large models, loading checkpoints can be time-intensive.
Args:
inputs (tuple or DataLoader, optional): This specifies the
dataset for which self influence scores will be computed.
Either a single tuple of any, or a `DataLoader`, where each
batch yielded is a tuple of type any. In either case, the tuple
represents a single batch, where the last element is assumed to
be the labels for the batch. That is, `model(*batch[0:-1])`
produces the output for `model`, and `batch[-1]` are the labels,
if any. This is the same assumption made for each batch yielded
by training dataset `train_dataset`. Please see documentation for
the `train_dataset` argument to `TracInCP.__init__` for
more details on the assumed structure of a batch. If not provided
or `None`, self influence scores will be computed for training
dataset `train_dataset`, which yields batches satisfying the
above assumptions.
Default: None.
show_progress (bool, optional): Computation of self influence scores can
take a long time if `inputs` represents many examples. If
`show_progress`is true, the progress of this computation will be
displayed. In more detail, if `outer_loop_by_checkpoints` is False,
this computation will iterate over all batches in an outer loop.
Thus if `show_progress` is True, the number of batches for which
self influence scores have been computed will be displayed. If
`outer_loop_by_checkpoints` is True, this computation will iterate
over all checkpoints (provided as the `checkpoints` initialization
argument) in an outer loop, and iterate over all batches that
`inputs` represents in an inner loop. Thus if
`show_progress` is True, the progress of both the outer
iteration and the inner iterations will be displayed. To show
progress, it will try to use tqdm if available for advanced
features (e.g. time estimation). Otherwise, it will fallback to a
simple output of progress.
Default: False
outer_loop_by_checkpoints (bool, optional): If performing an outer
iteration over checkpoints; see method description for more
details.
Default: False
"""
inputs = inputs if inputs is not None else self.train_dataloader
if outer_loop_by_checkpoints:
return self._self_influence_by_checkpoints(inputs, show_progress)
return _self_influence_by_batches_helper(
self._self_influence_by_checkpoints,
self.get_name(),
inputs,
show_progress,
)
def _basic_computation_tracincp(
self,
inputs: Tuple[Any, ...],
targets: Optional[Tensor] = None,
loss_fn: Optional[Union[Module, Callable]] = None,
reduction_type: Optional[str] = None,
) -> Tuple[Tensor, ...]:
"""
For instances of TracInCP, computation of influence scores or self influence
scores repeatedly calls this function for different checkpoints
and batches. In particular, this function computes the jacobian of a loss
function w.r.t. parameters in the `layers` initialization argument.
Args:
inputs (tuple[Any, ...]): A batch of examples, which could be a training
batch or test batch, depending which method is the caller. Does not
represent labels, which are passed as `targets`. The assumption is
that `model(*inputs)` produces the predictions for the batch.
targets (tensor or None): If computing influence scores on a loss function,
these are the labels corresponding to the batch `inputs`.
Default: none
loss_fn (Callable, optional): The loss function to use when computing the
jacobian.
reduction_type (str, optional): The reduction type of `loss_fn`. This
argument is only used if `sample_wise_grads_per_batch` was true in
initialization.
"""
if self.sample_wise_grads_per_batch:
return _compute_jacobian_wrt_params_with_sample_wise_trick(
self.model,
inputs,
targets,
loss_fn,
reduction_type,
self.layer_modules,
)
return _compute_jacobian_wrt_params(
self.model,
inputs,
targets,
loss_fn,
self.layer_modules,
)
|
#!/usr/bin/env python3
import warnings
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import captum._utils.common as common
import torch
from captum._utils.av import AV
from captum.attr import LayerActivation
from captum.influence._core.influence import DataInfluence
from torch import Tensor
from torch.nn import Module
from torch.utils.data import DataLoader, Dataset
r"""
Additional helper functions to calculate similarity metrics.
"""
def euclidean_distance(test, train) -> Tensor:
r"""
Calculates the pairwise euclidean distance for batches of feature vectors.
Tensors test and train have shape (batch_size_1, *), and (batch_size_2, *).
Returns pairwise euclidean distance Tensor of shape (batch_size_1, batch_size_2).
"""
similarity = torch.cdist(
test.view(test.shape[0], -1).unsqueeze(0),
train.view(train.shape[0], -1).unsqueeze(0),
).squeeze(0)
return similarity
def cosine_similarity(test, train, replace_nan=0) -> Tensor:
r"""
Calculates the pairwise cosine similarity for batches of feature vectors.
Tensors test and train have shape (batch_size_1, *), and (batch_size_2, *).
Returns pairwise cosine similarity Tensor of shape (batch_size_1, batch_size_2).
"""
test = test.view(test.shape[0], -1)
train = train.view(train.shape[0], -1)
if common._parse_version(torch.__version__) <= (1, 6, 0):
test_norm = torch.norm(test, p=None, dim=1, keepdim=True)
train_norm = torch.norm(train, p=None, dim=1, keepdim=True)
else:
test_norm = torch.linalg.norm(test, ord=2, dim=1, keepdim=True)
train_norm = torch.linalg.norm(train, ord=2, dim=1, keepdim=True)
test = torch.where(test_norm != 0.0, test / test_norm, Tensor([replace_nan]))
train = torch.where(train_norm != 0.0, train / train_norm, Tensor([replace_nan])).T
similarity = torch.mm(test, train)
return similarity
r"""
Implements abstract DataInfluence class and provides implementation details for
similarity metric-based influence computation. Similarity metrics can be used to compare
intermediate or final activation vectors of a model for different sets of input. Then,
these can be used to draw conclusions about influential instances.
Some standard similarity metrics such as dot product similarity or euclidean distance
are provided, but the user can provide any custom similarity metric as well.
"""
class SimilarityInfluence(DataInfluence):
def __init__(
self,
module: Module,
layers: Union[str, List[str]],
influence_src_dataset: Dataset,
activation_dir: str,
model_id: str = "",
similarity_metric: Callable = cosine_similarity,
similarity_direction: str = "max",
batch_size: int = 1,
**kwargs: Any,
) -> None:
r"""
Args:
module (torch.nn.Module): An instance of pytorch model. This model should
define all of its layers as attributes of the model.
layers (str or list[str]): The fully qualified layer(s) for which the
activation vectors are computed.
influence_src_dataset (torch.utils.data.Dataset): PyTorch Dataset that is
used to create a PyTorch Dataloader to iterate over the dataset and
its labels. This is the dataset for which we will be seeking for
influential instances. In most cases this is the training dataset.
activation_dir (str): The directory of the path to store
and retrieve activation computations. Best practice would be to use
an absolute path.
model_id (str): The name/version of the model for which layer
activations are being computed. Activations will be stored and
loaded under the subdirectory with this name if provided.
similarity_metric (Callable): This is a callable function that computes a
similarity metric between two representations. For example, the
representations pair could be from the training and test sets.
This function must adhere to certain standards. The inputs should be
torch Tensors with shape (batch_size_i/j, feature dimensions). The
output Tensor should have shape (batch_size_i, batch_size_j) with
scalar values corresponding to the similarity metric used for each
pairwise combination from the two batches.
For example, suppose we use `batch_size_1 = 16` for iterating
through `influence_src_dataset`, and for the `inputs` argument
we pass in a Tensor with 3 examples, i.e. batch_size_2 = 3. Also,
suppose that our inputs and intermediate activations throughout the
model will have dimension (N, C, H, W). Then, the feature dimensions
should be flattened within this function. For example::
>>> av_test.shape
torch.Size([3, N, C, H, W])
>>> av_src.shape
torch.Size([16, N, C, H, W])
>>> av_test = torch.view(av_test.shape[0], -1)
>>> av_test.shape
torch.Size([3, N x C x H x W])
and similarly for av_src. The similarity_metric should then use
these flattened tensors to return the pairwise similarity matrix.
For example, `similarity_metric(av_test, av_src)` should return a
tensor of shape (3, 16).
batch_size (int): Batch size for iterating through `influence_src_dataset`.
**kwargs: Additional key-value arguments that are necessary for specific
implementation of `DataInfluence` abstract class.
"""
self.module = module
self.layers = [layers] if isinstance(layers, str) else layers
self.influence_src_dataset = influence_src_dataset
self.activation_dir = activation_dir
self.model_id = model_id
self.batch_size = batch_size
if similarity_direction == "max" or similarity_direction == "min":
self.similarity_direction = similarity_direction
else:
raise ValueError(
f"{similarity_direction} is not a valid value. "
"Must be either 'max' or 'min'"
)
if similarity_metric is cosine_similarity:
if "replace_nan" in kwargs:
self.replace_nan = kwargs["replace_nan"]
else:
self.replace_nan = -2 if self.similarity_direction == "max" else 2
similarity_metric = partial(cosine_similarity, replace_nan=self.replace_nan)
self.similarity_metric = similarity_metric
self.influence_src_dataloader = DataLoader(
influence_src_dataset, batch_size, shuffle=False
)
def influence( # type: ignore[override]
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
top_k: int = 1,
additional_forward_args: Optional[Any] = None,
load_src_from_disk: bool = True,
**kwargs: Any,
) -> Dict:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Batch of examples for which
influential instances are computed. They are passed to the
forward_func. The first dimension in `inputs` tensor or tuple
of tensors corresponds to the batch size. A tuple of tensors
is only passed in if thisis the input form that `module` accepts.
top_k (int): The number of top-matching activations to return
additional_forward_args (Any, optional): Additional arguments that will be
passed to forward_func after inputs.
load_src_from_disk (bool): Loads activations for `influence_src_dataset`
where possible. Setting to False would force regeneration of
activations.
load_input_from_disk (bool): Regenerates activations for inputs by default
and removes previous `inputs` activations that are flagged with
`inputs_id`. Setting to True will load prior matching inputs
activations. Note that this could lead to unexpected behavior if
`inputs_id` is not configured properly and activations are loaded
for a different, prior `inputs`.
inputs_id (str): Used to identify inputs for loading activations.
**kwargs: Additional key-value arguments that are necessary for specific
implementation of `DataInfluence` abstract class.
Returns:
influences (dict): Returns the influential instances retrieved from
`influence_src_dataset` for each test example represented through a
tensor or a tuple of tensor in `inputs`. Returned influential
examples are represented as dict, with keys corresponding to
the layer names passed in `layers`. Each value in the dict is a
tuple containing the indices and values for the top k similarities
from `influence_src_dataset` by the chosen metric. The first value
in the tuple corresponds to the indices corresponding to the top k
most similar examples, and the second value is the similarity score.
The batch dimension corresponds to the batch dimension of `inputs`.
If inputs.shape[0] == 5, then dict[`layer_name`][0].shape[0] == 5.
These tensors will be of shape (inputs.shape[0], top_k).
"""
inputs_batch_size = (
inputs[0].shape[0] if isinstance(inputs, tuple) else inputs.shape[0]
)
influences: Dict[str, Any] = {}
layer_AVDatasets = AV.generate_dataset_activations(
self.activation_dir,
self.module,
self.model_id,
self.layers,
DataLoader(self.influence_src_dataset, self.batch_size, shuffle=False),
identifier="src",
load_from_disk=load_src_from_disk,
return_activations=True,
)
assert layer_AVDatasets is not None and not isinstance(
layer_AVDatasets, AV.AVDataset
)
layer_modules = [
common._get_module_from_name(self.module, layer) for layer in self.layers
]
test_activations = LayerActivation(self.module, layer_modules).attribute(
inputs, additional_forward_args
)
minmax = self.similarity_direction == "max"
# av_inputs shape: (inputs_batch_size, *) e.g. (inputs_batch_size, N, C, H, W)
# av_src shape: (self.batch_size, *) e.g. (self.batch_size, N, C, H, W)
test_activations = (
test_activations if len(self.layers) > 1 else [test_activations]
)
for i, (layer, layer_AVDataset) in enumerate(
zip(self.layers, layer_AVDatasets)
):
topk_val, topk_idx = torch.Tensor(), torch.Tensor().long()
zero_acts = torch.Tensor().long()
av_inputs = test_activations[i]
src_loader = DataLoader(layer_AVDataset)
for j, av_src in enumerate(src_loader):
av_src = av_src.squeeze(0)
similarity = self.similarity_metric(av_inputs, av_src)
msg = (
"Output of custom similarity does not meet required dimensions. "
f"Your output has shape {similarity.shape}.\nPlease ensure the "
"output shape matches (inputs_batch_size, src_dataset_batch_size), "
f"which should be {(inputs_batch_size, self.batch_size)}."
)
assert similarity.shape == (inputs_batch_size, av_src.shape[0]), msg
if hasattr(self, "replace_nan"):
idx = (similarity == self.replace_nan).nonzero()
zero_acts = torch.cat((zero_acts, idx))
r"""
TODO: For models that can have tuples as activations, we should
allow similarity metrics to accept tuples, support topk selection.
"""
topk_batch = min(top_k, self.batch_size)
values, indices = torch.topk(
similarity, topk_batch, dim=1, largest=minmax
)
indices += int(j * self.batch_size)
topk_val = torch.cat((topk_val, values), dim=1)
topk_idx = torch.cat((topk_idx, indices), dim=1)
# can modify how often to sort for efficiency? minor
sort_idx = torch.argsort(topk_val, dim=1, descending=minmax)
topk_val = torch.gather(topk_val, 1, sort_idx[:, :top_k])
topk_idx = torch.gather(topk_idx, 1, sort_idx[:, :top_k])
influences[layer] = (topk_idx, topk_val)
if torch.numel(zero_acts != 0):
zero_warning = (
f"Layer {layer} has zero-vector activations for some inputs. This "
"may cause undefined behavior for cosine similarity. The indices "
"for the offending inputs will be included under the key "
f"'zero_acts-{layer}' in the output dictionary. Indices are "
"returned as a tensor with [inputs_idx, src_dataset_idx] pairs "
"which may have corrupted similarity scores."
)
warnings.warn(zero_warning, RuntimeWarning)
key = "-".join(["zero_acts", layer])
influences[key] = zero_acts
return influences
|
#!/usr/bin/env python3
from abc import ABC, abstractmethod
from typing import Any
from torch.nn import Module
from torch.utils.data import Dataset
class DataInfluence(ABC):
r"""
An abstract class to define model data influence skeleton.
"""
def __init_(self, model: Module, train_dataset: Dataset, **kwargs: Any) -> None:
r"""
Args:
model (torch.nn.Module): An instance of pytorch model.
train_dataset (torch.utils.data.Dataset): PyTorch Dataset that is
used to create a PyTorch Dataloader to iterate over the dataset and
its labels. This is the dataset for which we will be seeking for
influential instances. In most cases this is the training dataset.
**kwargs: Additional key-value arguments that are necessary for specific
implementation of `DataInfluence` abstract class.
"""
self.model = model
self.train_dataset = train_dataset
@abstractmethod
def influence(self, inputs: Any = None, **kwargs: Any) -> Any:
r"""
Args:
inputs (Any): Batch of examples for which influential
instances are computed. They are passed to the forward_func. If
`inputs` if a tensor or tuple of tensors, the first dimension
of a tensor corresponds to the batch dimension.
**kwargs: Additional key-value arguments that are necessary for specific
implementation of `DataInfluence` abstract class.
Returns:
influences (Any): We do not add restrictions on the return type for now,
though this may change in the future.
"""
pass
|
#!/usr/bin/env python3
import threading
import warnings
from collections import defaultdict
from typing import Any, Callable, cast, Dict, Iterator, List, Optional, Tuple, Union
import torch
from captum._utils.common import _get_module_from_name, _sort_key_list
from captum._utils.gradient import _gather_distributed_tensors
from captum._utils.progress import NullProgress, progress
from captum.influence._core.tracincp import (
_influence_route_to_helpers,
KMostInfluentialResults,
TracInCPBase,
)
from captum.influence._utils.common import (
_check_loss_fn,
_format_inputs_dataset,
_get_k_most_influential_helper,
_jacobian_loss_wrt_inputs,
_load_flexible_state_dict,
_self_influence_by_batches_helper,
_tensor_batch_dot,
)
from captum.influence._utils.nearest_neighbors import (
AnnoyNearestNeighbors,
NearestNeighbors,
)
from captum.log import log_usage
from torch import device, Tensor
from torch.nn import Module
from torch.utils.data import DataLoader, Dataset
r"""
Implements abstract DataInfluence class and also provides implementation details for
influence computation based on the logic provided in TracIn paper
(https://arxiv.org/abs/2002.08484).
The TracIn paper proposes an idealized notion of influence which can be represented by
the total amount a training example reduces loss for a test example via a training
process such as stochastic gradient descent. As this idealized notion of influence is
impractical to compute, the TracIn paper proposes instead to compute an influence
score, which uses a first-order approximation for the change in loss for a test example
by a training example, which is accumulated across saved model checkpoints. This
influence score is accumulated via a summed dot-product of gradient vectors for the
scores/loss of a test and training example.
"""
"""
TODO: Support for checkpoint type. Currently only supports model parameters as saved
checkpoints. Can use enum or string.
Potential implementation from design doc:
checkpoint_type (Enum = [Parameters | Loss_Grad]): For performance,
saved / loaded checkpoints can be either model parameters, or
gradient of the loss function on an input w.r.t parameters.
"""
class TracInCPFast(TracInCPBase):
r"""
In Appendix F, Page 14 of the TracIn paper, they show that the calculation
of the influence score of between a test example x' and a training example x,
can be computed much more quickly than naive back-propagation in the special
case when considering only gradients in the last fully-connected layer. This class
computes influence scores for that special case. Note that the computed
influence scores are exactly the same as when naive back-propagation is used -
there is no loss in accuracy.
In more detail regarding the influence score computation: let :math`x`
and :math`\nabla_y f(y)` be the input and output-gradient of the last
fully-connected layer, respectively, for a training example. Similarly, let
:math`x'` and :math`\nabla_{y'} f(y')` be the corresponding quantities for
a test example. Then, the influence score of the training example on the test
example is the sum of the contribution from each checkpoint. The contribution from
a given checkpoint is :math`(x^T x')(\nabla_y f(y)^T \nabla_{y'} f(y'))`.
"""
def __init__(
self,
model: Module,
final_fc_layer: Union[Module, str],
train_dataset: Union[Dataset, DataLoader],
checkpoints: Union[str, List[str], Iterator],
checkpoints_load_func: Callable = _load_flexible_state_dict,
loss_fn: Optional[Union[Module, Callable]] = None,
batch_size: Union[int, None] = 1,
test_loss_fn: Optional[Union[Module, Callable]] = None,
vectorize: bool = False,
) -> None:
r"""
Args:
model (torch.nn.Module): An instance of pytorch model. This model should
define all of its layers as attributes of the model.
final_fc_layer (torch.nn.Module or str): The last fully connected layer in
the network for which gradients will be approximated via fast random
projection method. Can be either the layer module itself, or the
fully qualified name of the layer if it is a defined attribute of
the passed `model`.
train_dataset (torch.utils.data.Dataset or torch.utils.data.DataLoader):
In the `influence` method, we compute the influence score of
training examples on examples in a test batch.
This argument represents the training dataset containing those
training examples. In order to compute those influence scores, we
will create a Pytorch DataLoader yielding batches of training
examples that is then used for processing. If this argument is
already a Pytorch Dataloader, that DataLoader can be directly
used for processing. If it is instead a Pytorch Dataset, we will
create a DataLoader using it, with batch size specified by
`batch_size`. For efficiency purposes, the batch size of the
DataLoader used for processing should be as large as possible, but
not too large, so that certain intermediate quantities created
from a batch still fit in memory. Therefore, if
`train_dataset` is a Dataset, `batch_size` should be large.
If `train_dataset` was already a DataLoader to begin with,
it should have been constructed to have a large batch size. It is
assumed that the Dataloader (regardless of whether it is created
from a Pytorch Dataset or not) yields tuples. For a `batch` that is
yielded, of length `L`, it is assumed that the forward function of
`model` accepts `L-1` arguments, and the last element of `batch` is
the label. In other words, `model(*batch[:-1])` gives the output of
`model`, and `batch[-1]` are the labels for the batch.
checkpoints (str, list[str], or Iterator): Either the directory of the
path to store and retrieve model checkpoints, a list of
filepaths with checkpoints from which to load, or an iterator which
returns objects from which to load checkpoints.
checkpoints_load_func (Callable, optional): The function to load a saved
checkpoint into a model to update its parameters, and get the
learning rate if it is saved. By default uses a utility to load a
model saved as a state dict.
Default: _load_flexible_state_dict
loss_fn (Callable, optional): The loss function applied to model. `loss_fn`
must be a "reduction" loss function that reduces the per-example
losses in a batch, and returns a single scalar Tensor. Furthermore,
the reduction must be the *sum* or the *mean* of the per-example
losses. For instance, `nn.BCELoss(reduction="sum")` is acceptable.
Also note that if `loss_fn` has no "reduction" attribute,
the implementation assumes that the reduction is the *sum* of the
per-example losses. If this is not the case, i.e. the reduction
is the *mean*, please set the "reduction" attribute of `loss_fn`
to "mean", i.e. `loss_fn.reduction = "mean"`.
Default: None
batch_size (int or None, optional): Batch size of the DataLoader created to
iterate through `train_dataset`, if it is a Dataset.
`batch_size` should be chosen as large as possible so that certain
intermediate quantities created from a batch still fit in memory.
Specific implementations of `TracInCPBase` will detail the size of
the intermediate quantities. `batch_size` must be an int if
`train_dataset` is a Dataset. If `train_dataset`
is a DataLoader, then `batch_size` is ignored as an argument.
Default: 1
test_loss_fn (Callable, optional): In some cases, one may want to use a
separate loss functions for training examples, i.e. those in
`train_dataset`, and for test examples, i.e. those
represented by the `inputs` and `targets` arguments to the
`influence` method. For example, if one wants to calculate the
influence score of a training example on a test example's
prediction for a fixed class, `test_loss_fn` could map from the
logits for all classes to the logits for a fixed class.
`test_loss_fn` needs satisfy the same constraints as `loss_fn`.
Thus, the same checks that we apply to `loss_fn` are also applied
to `test_loss_fn`, if the latter is provided. If not provided, the
loss function for test examples is assumed to be the same as the
loss function for training examples, i.e. `loss_fn`.
Default: None
vectorize (bool, optional): Flag to use experimental vectorize functionality
for `torch.autograd.functional.jacobian`.
Default: False
"""
TracInCPBase.__init__(
self,
model,
train_dataset,
checkpoints,
checkpoints_load_func,
loss_fn,
batch_size,
test_loss_fn,
)
self.vectorize = vectorize
# TODO: restore prior state
self.final_fc_layer = final_fc_layer
if isinstance(self.final_fc_layer, str):
self.final_fc_layer = _get_module_from_name(model, self.final_fc_layer)
assert isinstance(self.final_fc_layer, Module)
for param in self.final_fc_layer.parameters():
param.requires_grad = True
assert loss_fn is not None, "loss function must not be none"
# check `loss_fn`
self.reduction_type = _check_loss_fn(self, loss_fn, "loss_fn")
# check `test_loss_fn` if it was provided
self.test_reduction_type = (
self.reduction_type
if test_loss_fn is None
else _check_loss_fn(self, test_loss_fn, "test_loss_fn")
)
@log_usage()
def influence( # type: ignore[override]
self,
inputs: Tuple[Any, ...],
k: Optional[int] = None,
proponents: bool = True,
show_progress: bool = False,
) -> Union[Tensor, KMostInfluentialResults]:
r"""
This is the key method of this class, and can be run in 2 different modes,
where the mode that is run depends on the arguments passed to this method:
- influence score mode: This mode is used if `k` is None. This mode computes
the influence score of every example in training dataset `train_dataset`
on every example in the test batch represented by `inputs`.
- k-most influential mode: This mode is used if `k` is not None, and an int.
This mode computes the proponents or opponents of every example in the
test batch represented by `inputs`. In particular, for each test example in
the test batch, this mode computes its proponents (resp. opponents),
which are the indices in the training dataset `train_dataset` of the
training examples with the `k` highest (resp. lowest) influence scores on the
test example. Proponents are computed if `proponents` is True. Otherwise,
opponents are computed. For each test example, this method also returns the
actual influence score of each proponent (resp. opponent) on the test
example.
Args:
inputs (tuple or DataLoader): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
k (int, optional): If not provided or `None`, the influence score mode will
be run. Otherwise, the k-most influential mode will be run,
and `k` is the number of proponents / opponents to return per
example in the test batch.
Default: None
proponents (bool, optional): Whether seeking proponents (`proponents=True`)
or opponents (`proponents=False`), if running in k-most influential
mode.
Default: True
show_progress (bool, optional): For all modes, computation of results
requires "training dataset computations": computations for each
batch in the training dataset `train_dataset`, which may
take a long time. If `show_progress` is true, the progress of
"training dataset computations" will be displayed. In particular,
the number of batches for which computations have been performed
will be displayed. It will try to use tqdm if available for
advanced features (e.g. time estimation). Otherwise, it will
fallback to a simple output of progress.
Default: False
Returns:
The return value of this method depends on which mode is run.
- influence score mode: if this mode is run (`k` is None), returns a 2D
tensor `influence_scores` of shape `(input_size, train_dataset_size)`,
where `input_size` is the number of examples in the test batch, and
`train_dataset_size` is the number of examples in training dataset
`train_dataset`. In other words, `influence_scores[i][j]` is the
influence score of the `j`-th example in `train_dataset` on the `i`-th
example in the test batch.
- k-most influential mode: if this mode is run (`k` is an int), returns
a namedtuple `(indices, influence_scores)`. `indices` is a 2D tensor of
shape `(input_size, k)`, where `input_size` is the number of examples in
the test batch. If computing proponents (resp. opponents),
`indices[i][j]` is the index in training dataset `train_dataset` of the
example with the `j`-th highest (resp. lowest) influence score (out of
the examples in `train_dataset`) on the `i`-th example in the test
batch. `influence_scores` contains the corresponding influence scores.
In particular, `influence_scores[i][j]` is the influence score of example
`indices[i][j]` in `train_dataset` on example `i` in the test batch
represented by `inputs`.
"""
assert inputs is not None, (
"`inputs` argument is required."
"If you wish to calculate self influence scores,"
" please use the `self_influence` method instead."
)
return _influence_route_to_helpers(
self,
inputs,
k,
proponents,
show_progress=show_progress,
)
def _influence_batch_tracincp_fast(
self,
test_batch: Tuple[Any, ...],
train_batch: Tuple[Any, ...],
):
"""
computes influence scores for a single training batch, when only considering
gradients in the last fully-connected layer, using the computation trick
described in the `TracInCPFast` class description.
"""
def get_checkpoint_contribution(checkpoint):
assert (
checkpoint is not None
), "None returned from `checkpoints`, cannot load."
learning_rate = self.checkpoints_load_func(self.model, checkpoint)
input_jacobians, input_layer_inputs = _basic_computation_tracincp_fast(
self,
test_batch[0:-1],
test_batch[-1],
self.test_loss_fn,
self.test_reduction_type,
)
src_jacobian, src_layer_input = _basic_computation_tracincp_fast(
self,
train_batch[0:-1],
train_batch[-1],
self.loss_fn,
self.reduction_type,
)
return (
_tensor_batch_dot(
input_jacobians, src_jacobian
) # shape is (test batch size, training batch size), containing x^T x'
# for every example x in the training batch and example x' in the test
# batch
* _tensor_batch_dot(input_layer_inputs, src_layer_input)
# shape is (test batch size, training batch size), containing
# (\nabla_y f(y)^T \nabla_{y'} f(y')) for every label y in the training
# batch and label y' in the test batch
* learning_rate
)
batch_tracin_scores = get_checkpoint_contribution(self.checkpoints[0])
for checkpoint in self.checkpoints[1:]:
batch_tracin_scores += get_checkpoint_contribution(checkpoint)
return batch_tracin_scores
def _influence( # type: ignore[override]
self,
inputs: Tuple[Any, ...],
show_progress: bool = False,
) -> Tensor:
r"""
Computes the influence of examples in training dataset `train_dataset`
on the examples in the test batch represented by `inputs`.
This implementation does not require knowing the number of training examples
in advance. Instead, the number of training examples is inferred from the
output of `_basic_computation_tracincp_fast`.
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
show_progress (bool, optional): To compute the influence of examples in
training dataset `train_dataset`, we compute the influence
of each batch. If `show_progress` is true, the progress of this
computation will be displayed. In particular, the number of batches
for which influence has been computed will be displayed. It will
try to use tqdm if available for advanced features (e.g. time
estimation). Otherwise, it will fallback to a simple output of
progress.
Default: False
Returns:
influence_scores (Tensor): Influence scores from the `TracInCPFast` method.
Its shape is `(input_size, train_dataset_size)`, where `input_size`
is the number of examples in the test batch, and
`train_dataset_size` is the number of examples in
training dataset `train_dataset`. For example:
`influence_scores[i][j]` is the influence score for the j-th training
example to the i-th example in the test batch.
"""
train_dataloader = self.train_dataloader
if show_progress:
train_dataloader = progress(
train_dataloader,
desc=(
f"Using {self.get_name()} to compute "
"influence for training batches"
),
total=self.train_dataloader_len,
)
return torch.cat(
[
self._influence_batch_tracincp_fast(inputs, batch)
for batch in train_dataloader
],
dim=1,
)
def _get_k_most_influential( # type: ignore[override]
self,
inputs: Tuple[Any, ...],
k: int = 5,
proponents: bool = True,
show_progress: bool = False,
) -> KMostInfluentialResults:
r"""
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
k (int, optional): The number of proponents or opponents to return per test
example.
Default: 5
proponents (bool, optional): Whether seeking proponents (`proponents=True`)
or opponents (`proponents=False`)
Default: True
show_progress (bool, optional): To compute the proponents (or opponents)
for the batch of examples, we perform computation for each batch in
training dataset `train_dataset`, If `show_progress` is
true, the progress of this computation will be displayed. In
particular, the number of batches for which the computation has
been performed will be displayed. It will try to use tqdm if
available for advanced features (e.g. time estimation). Otherwise,
it will fallback to a simple output of progress.
Default: False
Returns:
(indices, influence_scores) (namedtuple): `indices` is a torch.long Tensor
that contains the indices of the proponents (or opponents) for each
test example. Its dimension is `(inputs_batch_size, k)`, where
`inputs_batch_size` is the number of examples in `inputs`. For
example, if `proponents==True`, `indices[i][j]` is the index of the
example in training dataset `train_dataset` with the
k-th highest influence score for the j-th example in `inputs`.
`indices` is a `torch.long` tensor so that it can directly be used
to index other tensors. Each row of `influence_scores` contains the
influence scores for a different test example, in sorted order. In
particular, `influence_scores[i][j]` is the influence score of
example `indices[i][j]` in training dataset `train_dataset`
on example `i` in the test batch represented by `inputs`.
"""
desc = (
None
if not show_progress
else (
(
f"Using {self.get_name()} to perform computation for "
f'getting {"proponents" if proponents else "opponents"}. '
"Processing training batches"
)
)
)
return KMostInfluentialResults(
*_get_k_most_influential_helper(
self.train_dataloader,
self._influence_batch_tracincp_fast,
inputs,
k,
proponents,
show_progress,
desc,
)
)
def _self_influence_by_checkpoints(
self,
inputs: Union[Tuple[Any, ...], DataLoader],
show_progress: bool = False,
) -> Tensor:
"""
Computes self influence scores for the examples in `inputs`, which is
either a single batch or a Pytorch `DataLoader` that yields batches. Therefore,
the computed self influence scores are *not* for the examples in training
dataset `train_dataset` (unlike when computing self influence scores using the
`influence` method). Note that if `inputs` is a single batch, this
will call `model` on that single batch, and if `inputs` yields
batches, this will call `model` on each batch that is yielded. Therefore,
please ensure that for both cases, the batch(es) that `model` is called
with are not too large, so that there will not be an out-of-memory error. This
implementation performs an outer iteration over checkpoints, and an inner
iteration over all batches that `inputs` represents. The pros of this
implementation are that the checkpoints do not need to be loaded too many
times.
Args:
batches (tuple or DataLoader): Either a single tuple of any, or a
`DataLoader`, where each batch yielded is a tuple of any. In
either case, the tuple represents a single batch, where the last
element is assumed to be the labels for the batch. That is,
`model(*batch[0:-1])` produces the output for `model`,
and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset`. Please see documentation for the
`train_dataset` argument to `TracInCP.__init__` for
more details on the assumed structure of a batch.
show_progress (bool, optional): Computation of self influence scores can
take a long time if `inputs` represents many examples. If
`show_progress` is true, the progress of this computation will be
displayed. In more detail, this computation will iterate over all
checkpoints (provided as the `checkpoints` initialization argument)
in an outer loop, and iterate over all batches that
`inputs` represents in an inner loop. Thus if
`show_progress` is True, the progress of both the outer
iteration and the inner iterations will be displayed. To show
progress, it will try to use tqdm if available for advanced
features (e.g. time estimation). Otherwise, it will fallback to a
simple output of progress.
Default: False
Returns:
self_influence_scores (Tensor): This is a 1D tensor containing the self
influence scores of all examples in `inputs`, regardless of
whether it represents a single batch or a `DataLoader` that yields
batches.
"""
# If `inputs` is not a `DataLoader`, turn it into one.
inputs = _format_inputs_dataset(inputs)
# If `show_progress` is true, create an outer progress bar that keeps track of
# how many checkpoints have been processed
if show_progress:
# Try to determine length of inner progress bar if possible, with a default
# of `None`.
inputs_len = None
try:
inputs_len = len(inputs)
except TypeError:
warnings.warn(
"Unable to determine the number of batches in `inputs`. "
"Therefore, if showing the progress of the computation of self "
"influence scores, only the number of batches processed can be "
"displayed, and not the percentage completion of the computation, "
"nor any time estimates."
)
def get_checkpoint_contribution(checkpoint):
# This function returns a 1D tensor representing the contribution to the
# self influence score for the given checkpoint, for all batches in
# `inputs`. The length of the 1D tensor is the total number of
# examples in `inputs`.
assert (
checkpoint is not None
), "None returned from `checkpoints`, cannot load."
learning_rate = self.checkpoints_load_func(self.model, checkpoint)
# This will store a list of the contribution of the self influence score
# from each batch. Each element is a 1D tensor of length batch_size - the
# batch size of each batch in `inputs` (they do not need to be all
# the same)
checkpoint_contribution = []
_inputs = inputs
# If `show_progress` is true, create an inner progress bar that keeps track
# of how many batches have been processed for the current checkpoint
if show_progress:
_inputs = progress(
inputs,
desc=(
f"Using {self.get_name()} to compute self "
"influence. Processing batch"
),
total=inputs_len,
)
for batch in _inputs:
batch_jacobian, batch_layer_input = _basic_computation_tracincp_fast(
self,
batch[0:-1],
batch[-1],
self.loss_fn,
self.reduction_type,
)
checkpoint_contribution.append(
torch.sum(batch_jacobian**2, dim=1)
* torch.sum(batch_layer_input**2, dim=1)
* learning_rate
)
# We concatenate the contributions from each batch into a single 1D tensor,
# which represents the contributions for all batches in `inputs`
return torch.cat(checkpoint_contribution, dim=0)
if show_progress:
checkpoints_progress = progress(
desc=(
f"Using {self.get_name()} to compute self "
"influence. Processing checkpoint"
),
total=len(self.checkpoints),
mininterval=0.0,
)
else:
checkpoints_progress = NullProgress()
with checkpoints_progress:
batches_self_tracin_scores = get_checkpoint_contribution(
self.checkpoints[0]
)
checkpoints_progress.update()
# The self influence score for all examples is the sum of contributions from
# each checkpoint
for checkpoint in self.checkpoints[1:]:
batches_self_tracin_scores += get_checkpoint_contribution(checkpoint)
checkpoints_progress.update()
return batches_self_tracin_scores
@log_usage()
def self_influence(
self,
inputs: Optional[Union[Tuple[Any, ...], DataLoader]] = None,
show_progress: bool = False,
outer_loop_by_checkpoints: bool = False,
) -> Tensor:
"""
Computes self influence scores for the examples in `inputs`, which is
either a single batch or a Pytorch `DataLoader` that yields batches.
If `inputs` is not specified or `None` calculates self influence
score for the training dataset `train_dataset`. Note that if `inputs`
is a single batch, this will call `model` on that single batch,
and if `inputs` yields batches, this will call `model`
on each batch that is yielded. Therefore, please ensure that for both cases,
the batch(es) that `model` is called with are not too large, so that
there will not be an out-of-memory error.
Internally, this computation requires iterating both over the batches in
`inputs`, as well as different model checkpoints. There are two ways
this iteration can be done. If `outer_loop_by_checkpoints` is False, the outer
iteration will be over batches, and the inner iteration will be over
checkpoints. This has the pro that displaying the progress of the computation
is more intuitive, involving displaying the number of batches for which self
influence scores have been computed. If `outer_loop_by_checkpoints` is True,
the outer iteration will be over checkpoints, and the inner iteration will be
over batches. This has the pro that the checkpoints do not need to be loaded
for each batch. For large models, loading checkpoints can be time-intensive.
Args:
inputs (tuple or DataLoader, optional): This specifies the
dataset for which self influence scores will be computed.
Either a single tuple of any, or a `DataLoader`, where each
batch yielded is a tuple of type any. In either case, the tuple
represents a single batch, where the last element is assumed to
be the labels for the batch. That is, `model(*batch[0:-1])`
produces the output for `model`, and `batch[-1]` are the labels,
if any. This is the same assumption made for each batch yielded
by training dataset `train_dataset`. Please see documentation for
the `train_dataset` argument to `TracInCP.__init__` for
more details on the assumed structure of a batch. If not provided
or `None`, self influence scores will be computed for training
dataset `train_dataset`, which yields batches satisfying the
above assumptions.
Default: None.
show_progress (bool, optional): Computation of self influence scores can
take a long time if `inputs` represents many examples. If
`show_progress`is true, the progress of this computation will be
displayed. In more detail, if `outer_loop_by_checkpoints` is False,
this computation will iterate over all batches in an outer loop.
Thus if `show_progress` is True, the number of batches for which
self influence scores have been computed will be displayed. If
`outer_loop_by_checkpoints` is True, this computation will iterate
over all checkpoints (provided as the `checkpoints` initialization
argument) in an outer loop, and iterate over all batches that
`inputs` represents in an inner loop. Thus if
`show_progress` is True, the progress of both the outer
iteration and the inner iterations will be displayed. To show
progress, it will try to use tqdm if available for advanced
features (e.g. time estimation). Otherwise, it will fallback to a
simple output of progress.
Default: False
outer_loop_by_checkpoints (bool, optional): If performing an outer
iteration over checkpoints; see method description for more
details.
Default: False
"""
inputs = inputs if inputs is not None else self.train_dataloader
if outer_loop_by_checkpoints:
return self._self_influence_by_checkpoints(inputs, show_progress)
return _self_influence_by_batches_helper(
self._self_influence_by_checkpoints,
self.get_name(),
inputs,
show_progress,
)
def _basic_computation_tracincp_fast(
influence_instance: TracInCPFast,
inputs: Tuple[Any, ...],
targets: Tensor,
loss_fn: Optional[Union[Module, Callable]] = None,
reduction_type: Optional[str] = None,
):
"""
For instances of TracInCPFast and children classes, computation of influence scores
or self influence scores repeatedly calls this function for different checkpoints
and batches. These computations involve a loss function. If `test` is True, the
loss function is `self.loss_fn`. If `test` is False, the loss function is
`self.test_loss_fn`. These two attributes were set in initialization, with
`self.loss_fn` equal to the `loss_fn` initialization argument, and
`self.test_loss_fn` equal to the `test_loss_fn` initialization argument if it was
provided, and `loss_fn` otherwise.
Args:
influence_instance (TracInCPFast): A instance of TracInCPFast or its children.
We assume `influence_instance` has a `loss_fn` attribute, i.e. the loss
function applied to the output of the last fully-connected layer, as
well as a `reduction_type` attribute, which indicates whether `loss_fn`
reduces the per-example losses by using their mean or sum. The
`reduction_type` attribute must either be "mean" or "sum".
inputs (tuple[Any, ...]): A batch of examples, which could be a training batch
or test batch, depending which method is the caller. Does not
represent labels, which are passed as `targets`. The assumption is
that `model(*inputs)` produces the predictions for the batch.
targets (Tensor): If computing influence scores on a loss function,
these are the labels corresponding to the batch `inputs`.
loss_fn (Callable, optional): The loss function to use when computing the
jacobian.
reduction_type (str, optional): The reduction type of `loss_fn`. This argument
is only used if `sample_wise_grads_per_batch` was true in
initialization of `influence_instance`.
Returns:
(input_jacobians, layer_inputs) (tuple): `input_jacobians` is a 2D tensor,
where each row is the jacobian of the loss, with respect to the
*output* of the last fully-connected layer. `layer_inputs` is a 1D
tensor, where each row is the *input* to the last fully-connected
layer. For both, the length is the number of examples in the batch
represented by `inputs` and `targets`.
"""
layer_inputs: Dict[device, Tuple[Tensor, ...]] = defaultdict()
lock = threading.Lock()
def hook_wrapper(original_module):
def _capture_inputs(layer, input, output) -> None:
r"""Save activations into layer_inputs in forward pass"""
with lock:
is_eval_tuple = isinstance(input, tuple)
if is_eval_tuple:
layer_inputs_val = tuple(inp.detach() for inp in input)
else:
layer_inputs_val = input.detach()
layer_inputs[layer_inputs_val[0].device] = layer_inputs_val
return _capture_inputs
assert isinstance(influence_instance.final_fc_layer, Module)
handle = influence_instance.final_fc_layer.register_forward_hook(
hook_wrapper(influence_instance.final_fc_layer)
)
out = influence_instance.model(*inputs)
assert loss_fn is not None, "loss function is required"
assert reduction_type in [
"sum",
"mean",
], 'reduction_type must be either "mean" or "sum"'
input_jacobians = _jacobian_loss_wrt_inputs(
loss_fn,
out,
targets,
influence_instance.vectorize,
reduction_type,
)
handle.remove()
device_ids = cast(
Union[None, List[int]],
influence_instance.model.device_ids
if hasattr(influence_instance.model, "device_ids")
else None,
)
key_list = _sort_key_list(list(layer_inputs.keys()), device_ids)
_layer_inputs = _gather_distributed_tensors(layer_inputs, key_list=key_list)[0]
assert len(input_jacobians.shape) == 2
return input_jacobians, _layer_inputs
class TracInCPFastRandProj(TracInCPFast):
r"""
A version of TracInCPFast which is optimized for "interactive" calls to
`influence` for the purpose of calculating proponents / opponents, or
influence scores. "Interactive" means there will be multiple calls to
`influence`, with each call for a different batch of test examples, and
subsequent calls rely on the results of previous calls. The implementation in
this class has been optimized so that each call to `influence` is fast, so that
it can be used for interactive analysis. This class should only be used for
interactive use cases. It should not be used if `influence` will only be
called once, because to enable fast calls to `influence`, time and memory
intensive preprocessing is required in `__init__`. Furthermore, it should not
be used to calculate self influence scores - `TracInCPFast` should be used
instead for that purpose. To enable interactive analysis, this implementation
computes and saves "embedding" vectors for all training examples in
`train_dataset`. Crucially, the influence score of a training
example on a test example is simply the dot-product of their corresponding
vectors, and proponents / opponents can be found by first storing vectors for
training examples in a nearest-neighbor data structure, and then finding the
nearest-neighbors for a test example in terms of dot-product (see appendix F
of the TracIn paper). This class should only be used if calls to `influence`
to obtain proponents / opponents or influence scores will be made in an
"interactive" manner, and there is sufficient memory to store vectors for the
entire `train_dataset`. This is because in order to enable interactive
analysis, this implementation incures overhead in `__init__` to setup the
nearest-neighbors data structure, which is both time and memory intensive, as
vectors corresponding to all training examples needed to be stored. To reduce
memory usage, this implementation enables random projections of those vectors.
Note that the influence scores computed with random projections are less
accurate, though correct in expectation.
In more detail regarding the "embedding" vectors - the influence of a training
example on a test example, when only considering gradients in the last
fully-connected layer, the sum of the contribution from each checkpoint. The
contribution from a given checkpoint is
:math`(x^T x')(\nabla_y f(y)^T \nabla_{y'} f(y'))`, using the notation in the
description of `TracInCPFast`. As is, this is not a dot-product of 2 vectors.
However, we can rewrite that contribution as
:math`(x \nabla_y f(y)^T) \dot (x' f(y')^T)`. Both terms in this
product are 2D matrices, as they are outer products, and the "product" is actually
a dot-product, treating both matrices as vectors. Therefore, for a given
checkpoint, its contribution to the "embedding" of an example is just the
outer-product :math`(x \nabla_y f(y)^T)`, flattened. Furthemore, to reduce the
dimension of this contribution, we can right-multiply and
left-multiply the outer-product with two separate projection matrices. These
transform :math`\nabla_y f(y)` and :math`x` to lower dimensional vectors. While
the dimension of these two lower dimensional vectors do not necessarily need to
be the same, in our implementation, we let them be the same, both equal to the
square root of the desired projection dimension. Finally, the embedding of an
example is the concatenation of the contributions from each checkpoint.
"""
def __init__(
self,
model: Module,
final_fc_layer: Union[Module, str],
train_dataset: Union[Dataset, DataLoader],
checkpoints: Union[str, List[str], Iterator],
checkpoints_load_func: Callable = _load_flexible_state_dict,
loss_fn: Optional[Union[Module, Callable]] = None,
batch_size: Union[int, None] = 1,
test_loss_fn: Optional[Union[Module, Callable]] = None,
vectorize: bool = False,
nearest_neighbors: Optional[NearestNeighbors] = None,
projection_dim: int = None,
seed: int = 0,
) -> None:
r"""
Args:
model (torch.nn.Module): An instance of pytorch model. This model should
define all of its layers as attributes of the model.
final_fc_layer (torch.nn.Module or str): The last fully connected layer in
the network for which gradients will be approximated via fast random
projection method. Can be either the layer module itself, or the
fully qualified name of the layer if it is a defined attribute of
the passed `model`.
train_dataset (torch.utils.data.Dataset or torch.utils.data.DataLoader):
In the `influence` method, we compute the influence score of
training examples on examples in a test batch.
This argument represents the training dataset containing those
training examples. In order to compute those influence scores, we
will create a Pytorch DataLoader yielding batches of training
examples that is then used for processing. If this argument is
already a Pytorch Dataloader, that DataLoader can be directly
used for processing. If it is instead a Pytorch Dataset, we will
create a DataLoader using it, with batch size specified by
`batch_size`. For efficiency purposes, the batch size of the
DataLoader used for processing should be as large as possible, but
not too large, so that certain intermediate quantities created
from a batch still fit in memory. Therefore, if
`train_dataset` is a Dataset, `batch_size` should be large.
If `train_dataset` was already a DataLoader to begin with,
it should have been constructed to have a large batch size. It is
assumed that the Dataloader (regardless of whether it is created
from a Pytorch Dataset or not) yields tuples. For a `batch` that is
yielded, of length `L`, it is assumed that the forward function of
`model` accepts `L-1` arguments, and the last element of `batch` is
the label. In other words, `model(*batch[:-1])` gives the output of
`model`, and `batch[-1]` are the labels for the batch.
checkpoints (str, list[str], or Iterator): Either the directory of the
path to store and retrieve model checkpoints, a list of
filepaths with checkpoints from which to load, or an iterator which
returns objects from which to load checkpoints.
checkpoints_load_func (Callable, optional): The function to load a saved
checkpoint into a model to update its parameters, and get the
learning rate if it is saved. By default uses a utility to load a
model saved as a state dict.
Default: _load_flexible_state_dict
loss_fn (Callable, optional): The loss function applied to model. `loss_fn`
must be a "reduction" loss function that reduces the per-example
losses in a batch, and returns a single scalar Tensor. Furthermore,
the reduction must be the *sum* of the per-example losses. For
instance, `nn.BCELoss(reduction="sum")` is acceptable, but
`nn.BCELoss(reduction="mean")` is *not* acceptable.
Default: None
batch_size (int or None, optional): Batch size of the DataLoader created to
iterate through `train_dataset`, if it is a Dataset.
`batch_size` should be chosen as large as possible so that certain
intermediate quantities created from a batch still fit in memory.
Specific implementations of `TracInCPBase` will detail the size of
the intermediate quantities. `batch_size` must be an int if
`train_dataset` is a Dataset. If `train_dataset`
is a DataLoader, then `batch_size` is ignored as an argument.
Default: 1
test_loss_fn (Callable, optional): In some cases, one may want to use a
separate loss functions for training examples, i.e. those in
`train_dataset`, and for test examples, i.e. those
represented by the `inputs` and `targets` arguments to the
`influence` method. For example, if one wants to calculate the
influence score of a training example on a test example's
prediction for a fixed class, `test_loss_fn` could map from the
logits for all classes to the logits for a fixed class.
`test_loss_fn` needs satisfy the same constraints as `loss_fn`.
Thus, the same checks that we apply to `loss_fn` are also applied
to `test_loss_fn`, if the latter is provided. If not provided, the
loss function for test examples is assumed to be the same as the
loss function for training examples, i.e. `loss_fn`.
vectorize (bool): Flag to use experimental vectorize functionality
for `torch.autograd.functional.jacobian`.
Default: False
nearest_neighbors (NearestNeighbors, optional): The NearestNeighbors
instance for finding nearest neighbors. If None, defaults to
`AnnoyNearestNeighbors(n_trees=10)`.
Default: None
projection_dim (int, optional): Each example will be represented in
the nearest neighbors data structure with a vector. This vector
is the concatenation of several "checkpoint vectors", each of which
is computed using a different checkpoint in the `checkpoints`
argument. If `projection_dim` is an int, it represents the
dimension we will project each "checkpoint vector" to, so that the
vector for each example will be of dimension at most
`projection_dim` * C, where C is the number of checkpoints.
Regarding the dimension of each vector, D: Let I be the dimension
of the output of the last fully-connected layer times the dimension
of the input of the last fully-connected layer. If `projection_dim`
is not `None`, then D = min(I * C, `projection_dim` * C).
Otherwise, D = I * C. In summary, if `projection_dim` is None, the
dimension of this vector will be determined by the size of the
input and output of the last fully-connected layer of `model`, and
the number of checkpoints. Otherwise, `projection_dim` must be an
int, and random projection will be performed to ensure that the
vector is of dimension no more than `projection_dim` * C.
`projection_dim` corresponds to the variable d in the top of page
15 of the TracIn paper: https://arxiv.org/abs/2002.08484.
Default: None
seed (int, optional): Because this implementation chooses a random
projection, its output is random. Setting this seed specifies the
random seed when choosing the random projection.
Default: 0
"""
TracInCPFast.__init__(
self,
model,
final_fc_layer,
train_dataset,
checkpoints,
checkpoints_load_func,
loss_fn,
batch_size,
test_loss_fn,
vectorize,
)
warnings.warn(
(
"WARNING: Using this implementation stores quantities related to the "
"entire `train_dataset` in memory, and may results in running "
"out of memory. If this happens, consider using %s instead, for which "
"each call to `influence` to compute influence scores or proponents "
"will be slower, but may avoid running out of memory."
)
% "`TracInCPFast`"
)
self.nearest_neighbors = (
AnnoyNearestNeighbors() if nearest_neighbors is None else nearest_neighbors
)
self.projection_dim = projection_dim
torch.manual_seed(seed) # for reproducibility
self.projection_quantities = self._set_projections_tracincp_fast_rand_proj(
self.train_dataloader,
)
self.src_intermediate_quantities = (
self._get_intermediate_quantities_tracincp_fast_rand_proj(
self.train_dataloader,
self.projection_quantities,
)
)
self._process_src_intermediate_quantities_tracincp_fast_rand_proj(
self.src_intermediate_quantities,
)
def _influence( # type: ignore[override]
self,
inputs: Tuple[Any, ...],
) -> Tensor:
r"""
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
Returns:
influence_scores (Tensor): Influence scores from the `TracInCPFastRandProj`
method. Its shape is `(input_size, train_dataset_size)`, where `input_size`
is the number of examples in the test batch, and
`train_dataset_size` is the number of examples in
training dataset `train_dataset`. For example:
`influence_scores[i][j]` is the influence score for the j-th training
example to the i-th example in the test batch.
"""
# TODO: after D35721609 lands, use helper function
# `TracInCP._influence_rand_proj` here to avoid duplicated logic
input_projections = self._get_intermediate_quantities_tracincp_fast_rand_proj(
inputs,
self.projection_quantities,
test=True,
)
src_projections = self.src_intermediate_quantities
return torch.matmul(input_projections, src_projections.T)
def _get_k_most_influential( # type: ignore[override]
self,
inputs: Tuple[Any, ...],
k: int = 5,
proponents: bool = True,
) -> KMostInfluentialResults:
r"""
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
k (int, optional): The number of proponents or opponents to return per test
example.
Default: 5
proponents (bool, optional): Whether seeking proponents (`proponents=True`)
or opponents (`proponents=False`)
Default: True
Returns:
(indices, influence_scores) (namedtuple): `indices` is a torch.long Tensor
that contains the indices of the proponents (or opponents) for each
test example. Its dimension is `(inputs_batch_size, k)`, where
`inputs_batch_size` is the number of examples in `inputs`. For
example, if `proponents==True`, `indices[i][j]` is the index of the
example in training dataset `train_dataset` with the
k-th highest influence score for the j-th example in `inputs`.
`indices` is a `torch.long` tensor so that it can directly be used
to index other tensors. Each row of `influence_scores` contains the
influence scores for a different test example, in sorted order. In
particular, `influence_scores[i][j]` is the influence score of
example `indices[i][j]` in training dataset `train_dataset`
on example `i` in the test batch represented by `inputs`.
"""
input_projections = self._get_intermediate_quantities_tracincp_fast_rand_proj(
inputs,
self.projection_quantities,
test=True,
)
multiplier = 1 if proponents else -1
input_projections *= multiplier
indices, distances = self.nearest_neighbors.get_nearest_neighbors(
input_projections, k
)
distances *= multiplier
return KMostInfluentialResults(indices, distances)
@log_usage()
def self_influence(
self,
inputs: Optional[Union[Tuple[Any, ...], DataLoader]] = None,
show_progress: bool = False,
outer_loop_by_checkpoints: bool = False,
) -> Tensor:
"""
NOT IMPLEMENTED - no need to implement `TracInCPFastRandProj.self_influence`,
as `TracInCPFast.self_influence` is sufficient - the latter does not benefit
from random projections, since no quantities associated with a training
example are stored (other than its self influence score)
Computes self influence scores for a single batch or a Pytorch `DataLoader`
that yields batches. Note that if `inputs` is a single batch, this
will call `model` on that single batch, and if `inputs` yields
batches, this will call `model` on each batch that is yielded. Therefore,
please ensure that for both cases, the batch(es) that `model` is called
with are not too large, so that there will not be an out-of-memory error.
Args:
inputs (tuple or DataLoader): Either a single tuple of any, or a
`DataLoader`, where each batch yielded is a tuple of any. In
either case, the tuple represents a single batch, where the last
element is assumed to be the labels for the batch. That is,
`model(*batch[0:-1])` produces the output for `model`,
and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset`. Please see documentation for the
`train_dataset` argument to `TracInCP.__init__` for
more details on the assumed structure of a batch.
show_progress (bool, optional): Computation of self influence scores can
take a long time if `inputs` represents many examples. If
`show_progress` is true, the progress of this computation will be
displayed. In more detail, this computation will iterate over all
checkpoints (provided as the `checkpoints` initialization argument)
and all batches that `inputs` represents. Therefore, the
total number of (checkpoint, batch) combinations that need to be
iterated over is
(# of checkpoints x # of batches that `inputs` represents).
If `show_progress` is True, the total number of such combinations
that have been iterated over is displayed. It will try to use tqdm
if available for advanced features (e.g. time estimation).
Otherwise, it will fallback to a simple output of progress.
Default: False
outer_loop_by_checkpoints (bool, optional): If performing an outer
iteration over checkpoints; see method description for more
details.
Default: False
Returns:
self_influence_scores (Tensor): This is a 1D tensor containing the self
influence scores of all examples in `inputs`, regardless of
whether it represents a single batch or a `DataLoader` that yields
batches.
"""
warnings.warn(
(
"WARNING: If calculating self influence scores, when only considering "
"gradients with respect to the last fully-connected layer, "
"`TracInCPFastRandProj` should not be used. Instead, please use "
"`TracInCPFast`. This is because when calculating self influence "
"scores, no quantities associated with a training example are stored "
"so that memory-saving benefit of the random projections used by "
"`TracInCPFastRandProj`needed. Further considering the fact that "
"random projections results only in approximate self influence "
"scores, there is no reason to use `TracInCPFastRandProj` when "
"calculating self influence scores."
)
)
raise NotImplementedError
@log_usage()
def influence( # type: ignore[override]
self,
inputs: Optional[Tuple[Any, ...]] = None,
k: int = 5,
proponents: bool = True,
) -> Union[Tensor, KMostInfluentialResults]:
r"""
This is the key method of this class, and can be run in 2 different modes,
where the mode that is run depends on the arguments passed to this method:
- influence score mode: This mode is used if `k` is None. This mode computes
the influence score of every example in training dataset `train_dataset`
on every example in the test batch represented by `inputs`.
- k-most influential mode: This mode is used if `k` is not None, and an int.
This mode computes the proponents or opponents of every example in the
test batch represented by `inputs`. In particular, for each test example in
the test batch, this mode computes its proponents (resp. opponents),
which are the indices in the training dataset `train_dataset` of the
training examples with the `k` highest (resp. lowest) influence scores on the
test example. Proponents are computed if `proponents` is True. Otherwise,
opponents are computed. For each test example, this method also returns the
actual influence score of each proponent (resp. opponent) on the test
example.
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
k (int, optional): If not provided or `None`, the influence score mode will
be run. Otherwise, the k-most influential mode will be run,
and `k` is the number of proponents / opponents to return per
example in the test batch.
Default: None
proponents (bool, optional): Whether seeking proponents (`proponents=True`)
or opponents (`proponents=False`), if running in k-most influential
mode.
Default: True
Returns:
The return value of this method depends on which mode is run.
- influence score mode: if this mode is run (`k` is None), returns a 2D
tensor `influence_scores` of shape `(input_size, train_dataset_size)`,
where `input_size` is the number of examples in the test batch, and
`train_dataset_size` is the number of examples in training dataset
`train_dataset`. In other words, `influence_scores[i][j]` is the
influence score of the `j`-th example in `train_dataset` on the `i`-th
example in the test batch.
- k-most influential mode: if this mode is run (`k` is an int), returns
a namedtuple `(indices, influence_scores)`. `indices` is a 2D tensor of
shape `(input_size, k)`, where `input_size` is the number of examples in
the test batch. If computing proponents (resp. opponents),
`indices[i][j]` is the index in training dataset `train_dataset` of the
example with the `j`-th highest (resp. lowest) influence score (out of
the examples in `train_dataset`) on the `i`-th example in the test
batch. `influence_scores` contains the corresponding influence scores.
In particular, `influence_scores[i][j]` is the influence score of example
`indices[i][j]` in `train_dataset` on example `i` in the test batch
represented by `inputs`.
"""
assert inputs is not None, (
"`inputs` argument is required."
"`TracInCPFastRandProj` does not support computing self influence scores"
"Even if it did, one would use the `self_influence` method."
)
return _influence_route_to_helpers(
self,
inputs,
k,
proponents,
)
def _set_projections_tracincp_fast_rand_proj(
self,
dataloader: DataLoader,
) -> Optional[Tuple[torch.Tensor, torch.Tensor]]:
"""
returns the variables `jacobian_projection` and `layer_input_projection`
if needed, based on `self.projection_dim`. The two variables are
used by `self._get_intermediate_quantities_fast_rand_proj`. They are both None
if projection is not needed, due to the intermediate quantities (see the
`_get_intermediate_quantities_fast_rand_proj` method for details) being no
greater than `self.projection_dim` * C even without projection, where C is the
number of checkpoints in the `checkpoints` argument to
`TracInCPFastRandProj.__init__`.
Args:
dataloader (DataLoader): determining the projection requires knowing the
dimensionality of the last layer's parameters (`jacobian_dim`
below) and its input (`layer_input_dim` below). These are
determined by passing a batch to `model`. `dataloader`
provides that batch.
Returns:
jacobian_projection (Tensor or None): Projection matrix to apply to
Jacobian of last layer to reduce its dimension, if needed.
None otherwise.
input_projection (Tensor or None): Projection matrix to apply to input of
last layer to reduce its dimension, if needed. None otherwise.
"""
# figure out projection dimensions, if needed
projection_dim = self.projection_dim
projection_quantities = None
if not (projection_dim is None):
# figure out original dimensions by looking at data, passing through network
self.checkpoints_load_func(self.model, next(iter(self.checkpoints)))
batch = next(iter(dataloader))
batch_jacobians, batch_layer_inputs = _basic_computation_tracincp_fast(
self,
batch[0:-1],
batch[-1],
self.loss_fn,
self.reduction_type,
)
jacobian_dim = batch_jacobians.shape[
1
] # this is the dimension of the output of the last fully-connected layer
layer_input_dim = batch_layer_inputs.shape[
1
] # this is the dimension of the input of the last fully-connected layer
device = batch_jacobians.device
dtype = batch_jacobians.dtype
# choose projection if needed
# without projection, the dimension of the intermediate quantities returned
# by `_get_intermediate_quantities_fast_rand_proj` will be
# `jacobian_dim` * `layer_input_dim` * number of checkpoints
# this is because for each checkpoint, we compute a "partial" intermediate
# quantity, and the intermediate quantity is the concatenation of the
# "partial" intermediate quantities, and the dimension of each "partial"
# intermediate quantity, without projection, is `jacobian_dim` *
# `layer_input_dim`. However, `projection_dim` refers to the maximum
# allowable dimension of the "partial" intermediate quantity. Therefore,
# we only project if `jacobian_dim` * `layer_input_dim` > `projection_dim`.
# `projection_dim` corresponds to the variable d in the top of page 15 of
# the TracIn paper: https://arxiv.org/abs/2002.08484.
if jacobian_dim * layer_input_dim > projection_dim:
jacobian_projection_dim = min(int(projection_dim**0.5), jacobian_dim)
layer_input_projection_dim = min(
int(projection_dim**0.5), layer_input_dim
)
jacobian_projection = torch.normal(
torch.zeros(jacobian_dim, jacobian_projection_dim),
1.0 / jacobian_projection_dim**0.5,
)
layer_input_projection = torch.normal(
torch.zeros(layer_input_dim, layer_input_projection_dim),
1.0 / layer_input_projection_dim**0.5,
)
projection_quantities = jacobian_projection.to(
device=device, dtype=dtype
), layer_input_projection.to(device=device, dtype=dtype)
return projection_quantities
def _process_src_intermediate_quantities_tracincp_fast_rand_proj(
self,
src_intermediate_quantities: torch.Tensor,
):
"""
Assumes `self._get_intermediate_quantities_tracin_fast_rand_proj` returns
vector representations for each example, and that influence between a
training and test example is obtained by taking the dot product of their
vector representations. In this case, given a test example, its proponents
can be found by storing the vector representations for training examples
into a data structure enablng fast largest-dot-product computation. This
method creates that data structure. This method has side effects.
Args:
src_intermediate_quantities (Tensor): the output of the
`_get_intermediate_quantities_tracin_fast_rand_proj` function when
applied to training dataset `train_dataset`. This
output is the vector representation of all training examples.
The dot product between the representation of a training example
and the representation of a test example gives the influence score
of the training example on the test example.
"""
self.nearest_neighbors.setup(src_intermediate_quantities)
def _get_intermediate_quantities_tracincp_fast_rand_proj(
self,
inputs: Union[Tuple[Any, ...], DataLoader],
projection_quantities: Optional[Tuple[torch.Tensor, torch.Tensor]],
test: bool = False,
) -> torch.Tensor:
r"""
This method computes vectors that can be used to compute influence. (see
Appendix F, page 15). Crucially, the influence score between a test example
and a training example is simply the dot product of their respective
vectors. This means that the training example with the largest influence score
on a given test example can be found using a nearest-neighbor (more
specifically, largest dot-product) data structure.
Args:
inputs (Tuple, or DataLoader): Either a single tuple of any, or a
`DataLoader`, where each batch yielded is a tuple of any. In
either case, the tuple represents a single batch, where the last
element is assumed to be the labels for the batch. That is,
`model(*batch[0:-1])` produces the output for `model`, and
and `batch[-1]` are the labels, if any. Here, `model` is model
provided in initialization. This is the same assumption made for
each batch yielded by training dataset `train_dataset`. Please see
documentation for the `train_dataset` argument to
`TracInCPFastRandProj.__init__` for more details on the assumed
structure of a batch.
projection_quantities (tuple or None): Is either the two tensors defining
the randomized projections to apply, or None, which means no
projection is to be applied.
test (bool): If True, the intermediate quantities are computed using
`self.test_loss_fn`. Otherwise, they are computed using
`self.loss_fn`.
Default: False
Returns:
intermediate_quantities (Tensor): A tensor of dimension
(N, D * C), where N is total number of examples in `dataloader`, C
is the number of checkpoints passed as the `checkpoints` argument
of `TracInCPFastRandProj.__init__`, and each row represents the
vector for an example. Regarding D: Let I be the dimension of the
output of the last fully-connected layer times the dimension of the
input of the last fully-connected layer. If `self.projection_dim`
is specified in initialization,
D = min(I * C, `self.projection_dim` * C). Otherwise, D = I * C.
In summary, if `self.projection_dim` is None, the dimension of each
vector will be determined by the size of the input and output of
the last fully-connected layer of `model`. Otherwise,
`self.projection_dim` must be an int, and random projection will be
performed to ensure that the vector is of dimension no more than
`self.projection_dim` * C. `self.projection_dim` corresponds to
the variable d in the top of page 15 of the TracIn paper:
https://arxiv.org/abs/2002.08484.
"""
# if `inputs` is not a `DataLoader`, turn it into one.
inputs = _format_inputs_dataset(inputs)
# internally, whether `projection_quantities` is None determines whether
# any projection will be applied to reduce the dimension of the "embedding"
# vectors. If projection will be applied, there are actually 2 different
# projection matrices - one to project the `input_jacobians`, and one to
# project the `layer_inputs`. See below for details of those two quantities.
# here, we extract the corresponding projection matrices for those two
# quantities, if doing projection. Note that the same projections are used
# for each checkpoint.
project = False
if projection_quantities is not None:
project = True
jacobian_projection, layer_input_projection = projection_quantities
# for each checkpoint, we will populate a list containing the contribution of
# the checkpoint for each batch
checkpoint_contributions: List[Union[List, Tensor]] = [
[] for _ in self.checkpoints
]
# the "embedding" vector is the concatenation of contributions from each
# checkpoint, which we compute one by one
for (j, checkpoint) in enumerate(self.checkpoints):
assert (
checkpoint is not None
), "None returned from `checkpoints`, cannot load."
learning_rate = self.checkpoints_load_func(self.model, checkpoint)
learning_rate_root = learning_rate**0.5
# after loading a checkpoint, we compute the contribution of that
# checkpoint, for *all* batches (instead of a single batch). this enables
# increased efficiency.
for batch in inputs:
# compute `input_jacobians` and `layer_inputs`, for a given checkpoint
# using a helper function. `input_jacobians` is a 2D tensor,
# where each row is the jacobian of the loss, with respect to the
# *output* of the last fully-connected layer. `layer_inputs` is a 2D
# tensor, where each row is the *input* to the last fully-connected
# layer. For both, the length is the number of examples in `batch`
input_jacobians, layer_inputs = _basic_computation_tracincp_fast(
self,
batch[0:-1],
batch[-1],
self.test_loss_fn,
self.test_reduction_type,
)
# if doing projection, project those two quantities
if project:
input_jacobians = torch.matmul(input_jacobians, jacobian_projection)
layer_inputs = torch.matmul(layer_inputs, layer_input_projection)
# for an example, the contribution to the "embedding" vector from each
# checkpoint is the outer product of its `input_jacobian` and its
# `layer_input`, flattened to a 1D tensor. here, we perform this
# for the entire batch. we append the contribution to a list containing
# the contribution of all batches, from the checkpoint.
cast(list, checkpoint_contributions[j]).append(
torch.matmul(
torch.unsqueeze(
input_jacobians, 2
), # size is (batch_size, output_size, 1)
torch.unsqueeze(
layer_inputs, 1
), # size is (batch_size, 1, input_size)
).flatten(
start_dim=1
) # matmul does a batched matrix multiplication to return a 3D
# tensor. each element along the batch (0-th) dimension is the
# matrix product of a (output_size, 1) and (1, input_size) tensor
# in other words, each element is an outer product, and the matmul
# is just doing a batched outer product. this is what we want, as
# the contribution to the "embedding" for an example is the outer
# product of the last layer's input and the gradient of its output.
# finally, we flatten the 3rd dimension so that the contribution to
# the embedding for this checkpoint is a 2D tensor, i.e. each
# example's contribution to the embedding is a 1D tensor.
* learning_rate_root
)
# once we have computed the contribution from each batch, for a given
# checkpoint, we concatenate them along the batch dimension to get a
# single 2D tensor for that checkpoint
checkpoint_contributions[j] = torch.cat(
checkpoint_contributions[j], dim=0 # type: ignore
)
# finally, we concatenate along the checkpoint dimension, to get a tensor of
# shape (batch_size, projection_dim * number of checkpoints)
# each row in this result is the "embedding" vector for an example in `batch`
return torch.cat(checkpoint_contributions, dim=1) # type: ignore
@log_usage()
def compute_intermediate_quantities(
self,
inputs: Union[Tuple[Any, ...], DataLoader],
) -> Tensor:
"""
Computes "embedding" vectors for all examples in a single batch, or a
`Dataloader` that yields batches. These embedding vectors are constructed so
that the influence score of a training example on a test example is simply the
dot-product of their corresponding vectors. Please see the documentation for
`TracInCPFastRandProj.__init__` for more details. Allowing a `DataLoader`
yielding batches to be passed in (as opposed to a single batch) gives the
potential to improve efficiency, because we load each checkpoint only once in
this method call. Thus if a `DataLoader` yielding batches is passed in, this
reduces the total number of times each checkpoint is loaded for a dataset,
compared to if a single batch is passed in. The reason we do not just increase
the batch size is that for large models, large batches do not fit in memory.
Args:
inputs (Tuple, or DataLoader): Either a single tuple of any, or a
`DataLoader`, where each batch yielded is a tuple of any. In
either case, the tuple represents a single batch, where the last
element is assumed to be the labels for the batch. That is,
`model(*batch[0:-1])` produces the output for `model`, and
and `batch[-1]` are the labels, if any. Here, `model` is model
provided in initialization. This is the same assumption made for
each batch yielded by training dataset `train_dataset`. Please see
documentation for the `train_dataset` argument to
`TracInCPFastRandProj.__init__` for more details on the assumed
structure of a batch.
Returns:
intermediate_quantities (Tensor): A tensor of dimension
(N, D * C), where N is total number of examples in
`inputs`, C is the number of checkpoints passed as the
`checkpoints` argument of `TracInCPFastRandProj.__init__`, and each
row represents the vector for an example. Regarding D: Let I be the
dimension of the output of the last fully-connected layer times the
dimension of the input of the last fully-connected layer. If
`self.projection_dim` is specified in initialization,
D = min(I * C, `self.projection_dim` * C). Otherwise, D = I * C.
In summary, if `self.projection_dim` is None, the dimension of each
vector will be determined by the size of the input and output of
the last fully-connected layer of `model`. Otherwise,
`self.projection_dim` must be an int, and random projection will be
performed to ensure that the vector is of dimension no more than
`self.projection_dim` * C. `self.projection_dim` corresponds to
the variable d in the top of page 15 of the TracIn paper:
https://arxiv.org/pdf/2002.08484.pdf.
"""
return self._get_intermediate_quantities_tracincp_fast_rand_proj(
inputs, self.projection_quantities
)
|
#!/usr/bin/env python3
from abc import ABC, abstractmethod
from typing import Optional, Tuple
import torch
from torch import Tensor
from torch.nn import Module
class StochasticGatesBase(Module, ABC):
"""
Abstract module for Stochastic Gates.
Stochastic Gates is a practical solution to add L0 norm regularization for neural
networks. L0 regularization, which explicitly penalizes any present (non-zero)
parameters, can help network pruning and feature selection, but directly optimizing
L0 is a non-differentiable combinatorial problem. To surrogate L0, Stochastic Gate
uses certain continuous probability distributions (e.g., Concrete, Gaussian) with
hard-sigmoid rectification as a continuous smoothed Bernoulli distribution
determining the weight of a parameter, i.e., gate. Then L0 is equal to the gates's
non-zero probability represented by the parameters of the continuous probability
distribution. The gate value can also be reparameterized to the distribution
parameters with a noise. So the expected L0 can be optimized through learning
the distribution parameters via stochastic gradients.
This base class defines the shared variables and forward logic of how the input is
gated regardless of the underneath distribution. The actual implementation should
extend this class and implement the distribution specific functions.
"""
def __init__(
self,
n_gates: int,
mask: Optional[Tensor] = None,
reg_weight: float = 1.0,
reg_reduction: str = "sum",
):
"""
Args:
n_gates (int): number of gates.
mask (Tensor, optional): If provided, this allows grouping multiple
input tensor elements to share the same stochastic gate.
This tensor should be broadcastable to match the input shape
and contain integers in the range 0 to n_gates - 1.
Indices grouped to the same stochastic gate should have the same value.
If not provided, each element in the input tensor
(on dimensions other than dim 0 - batch dim) is gated separately.
Default: None
reg_weight (float, optional): rescaling weight for L0 regularization term.
Default: 1.0
reg_reduction (str, optional): the reduction to apply to the regularization:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be
applied and it will be the same as the return of ``get_active_probs``,
``'mean'``: the sum of the gates non-zero probabilities will be divided
by the number of gates, ``'sum'``: the gates non-zero probabilities will
be summed.
Default: ``'sum'``
"""
super().__init__()
if mask is not None:
max_mask_ind = mask.max().item()
assert max_mask_ind == n_gates - 1, (
f"the maximum mask index (received {max_mask_ind}) should be equal to"
f" the number of gates - 1 (received {n_gates}) since each mask"
" should correspond to a gate"
)
valid_reg_reduction = ["none", "mean", "sum"]
assert (
reg_reduction in valid_reg_reduction
), f"reg_reduction must be one of [none, mean, sum], received: {reg_reduction}"
self.reg_reduction = reg_reduction
self.n_gates = n_gates
self.register_buffer(
"mask", mask.detach().clone() if mask is not None else None
)
self.reg_weight = reg_weight
def forward(self, input_tensor: Tensor) -> Tuple[Tensor, Tensor]:
"""
Args:
input_tensor (Tensor): Tensor to be gated with stochastic gates
Returns:
tuple[Tensor, Tensor]:
- gated_input (Tensor): Tensor of the same shape weighted by the sampled
gate values
- l0_reg (Tensor): L0 regularization term to be optimized together with
model loss,
e.g. loss(model_out, target) + l0_reg
"""
if self.mask is None:
n_ele = self._get_numel_of_input(input_tensor)
assert n_ele == self.n_gates, (
"if mask is not given, each example in the input batch should have the"
" same number of elements"
f" (received {n_ele}) as gates ({self.n_gates})"
)
input_size = input_tensor.size()
batch_size = input_size[0]
gate_values = self._sample_gate_values(batch_size)
# hard-sigmoid rectification z=min(1,max(0,_z))
gate_values = torch.clamp(gate_values, min=0, max=1)
if self.mask is not None:
# use expand_as not expand/broadcast_to which do not work with torch.fx
input_mask = self.mask.expand_as(input_tensor)
# flatten all dim except batch to gather from gate values
flattened_mask = input_mask.reshape(batch_size, -1)
gate_values = torch.gather(gate_values, 1, flattened_mask)
# reshape gates(batch_size, n_elements) into input_size for point-wise mul
gate_values = gate_values.reshape(input_size)
gated_input = input_tensor * gate_values
prob_density = self._get_gate_active_probs()
if self.reg_reduction == "sum":
l0_reg = prob_density.sum()
elif self.reg_reduction == "mean":
l0_reg = prob_density.mean()
else:
l0_reg = prob_density
l0_reg *= self.reg_weight
return gated_input, l0_reg
def get_gate_values(self, clamp: bool = True) -> Tensor:
"""
Get the gate values, which are the means of the underneath gate distributions,
optionally clamped within 0 and 1.
Args:
clamp (bool, optional): whether to clamp the gate values or not. As smoothed
Bernoulli variables, gate values are clamped within 0 and 1 by default.
Turn this off to get the raw means of the underneath
distribution (e.g., concrete, gaussian), which can be useful to
differentiate the gates' importance when multiple gate
values are beyond 0 or 1.
Default: ``True``
Returns:
Tensor:
- gate_values (Tensor): value of each gate in shape(n_gates)
"""
gate_values = self._get_gate_values()
if clamp:
gate_values = torch.clamp(gate_values, min=0, max=1)
return gate_values.detach()
def get_gate_active_probs(self) -> Tensor:
"""
Get the active probability of each gate, i.e, gate value > 0
Returns:
Tensor:
- probs (Tensor): probabilities tensor of the gates are active
in shape(n_gates)
"""
return self._get_gate_active_probs().detach()
@abstractmethod
def _get_gate_values(self) -> Tensor:
"""
Protected method to be override in the child depending on the chosen
distribution. Get the raw gate values derived from the learned parameters of
the according distribution without clamping.
Returns:
gate_values (Tensor): gate value tensor of shape(n_gates)
"""
pass
@abstractmethod
def _sample_gate_values(self, batch_size: int) -> Tensor:
"""
Protected method to be override in the child depending on the chosen
distribution. Sample gate values for each example in the batch from a
probability distribution
Args:
batch_size (int): input batch size
Returns:
gate_values (Tensor): gate value tensor of shape(batch_size, n_gates)
"""
pass
@abstractmethod
def _get_gate_active_probs(self) -> Tensor:
"""
Protected method to be override in the child depending on the chosen
distribution. Get the active probability of each gate, i.e, gate value > 0
Returns:
probs (Tensor): probabilities tensor of the gates are active
in shape(n_gates)
"""
pass
def _get_numel_of_input(self, input_tensor: Tensor) -> int:
"""
Get the number of elements of a single example in the batched input tensor
"""
assert input_tensor.dim() > 1, (
"The input tensor must have more than 1 dimension with the 1st dimention"
" being batch size;"
f" received input tensor shape {input_tensor.size()}"
)
return input_tensor[0].numel()
|
#!/usr/bin/env python3
import math
from typing import Optional
import torch
from captum.module.stochastic_gates_base import StochasticGatesBase
from torch import nn, Tensor
class GaussianStochasticGates(StochasticGatesBase):
"""
Stochastic Gates with Gaussian distribution.
Stochastic Gates is a practical solution to add L0 norm regularization for neural
networks. L0 regularization, which explicitly penalizes any present (non-zero)
parameters, can help network pruning and feature selection, but directly optimizing
L0 is a non-differentiable combinatorial problem. To surrogate L0, Stochastic Gate
uses certain continuous probability distributions (e.g., Concrete, Gaussian) with
hard-sigmoid rectification as a continuous smoothed Bernoulli distribution
determining the weight of a parameter, i.e., gate. Then L0 is equal to the gates's
non-zero probability represented by the parameters of the continuous probability
distribution. The gate value can also be reparameterized to the distribution
parameters with a noise. So the expected L0 can be optimized through learning
the distribution parameters via stochastic gradients.
GaussianStochasticGates adopts a gaussian distribution as the smoothed Bernoulli
distribution of gate. While the smoothed Bernoulli distribution should be
within 0 and 1, gaussian does not have boundaries. So hard-sigmoid rectification
is used to "fold" the parts smaller than 0 or larger than 1 back to 0 and 1.
More details can be found in the original paper:
https://arxiv.org/abs/1810.04247
Examples::
>>> n_params = 5 # number of gates
>>> stg = GaussianStochasticGates(n_params, reg_weight=0.01)
>>> inputs = torch.randn(3, n_params) # mock inputs with batch size of 3
>>> gated_inputs, reg = stg(mock_inputs) # gate the inputs
"""
def __init__(
self,
n_gates: int,
mask: Optional[Tensor] = None,
reg_weight: Optional[float] = 1.0,
std: Optional[float] = 0.5,
reg_reduction: str = "sum",
):
"""
Args:
n_gates (int): number of gates.
mask (Tensor, optional): If provided, this allows grouping multiple
input tensor elements to share the same stochastic gate.
This tensor should be broadcastable to match the input shape
and contain integers in the range 0 to n_gates - 1.
Indices grouped to the same stochastic gate should have the same value.
If not provided, each element in the input tensor
(on dimensions other than dim 0, i.e., batch dim) is gated separately.
Default: None
reg_weight (float, optional): rescaling weight for L0 regularization term.
Default: 1.0
std (float, optional): standard deviation that will be fixed throughout.
Default: 0.5
reg_reduction (str, optional): the reduction to apply to the regularization:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be
applied and it will be the same as the return of ``get_active_probs``,
``'mean'``: the sum of the gates non-zero probabilities will be divided
by the number of gates, ``'sum'``: the gates non-zero probabilities will
be summed.
Default: ``'sum'``
"""
super().__init__(
n_gates, mask=mask, reg_weight=reg_weight, reg_reduction=reg_reduction
)
mu = torch.empty(n_gates)
nn.init.normal_(mu, mean=0.5, std=0.01)
self.mu = nn.Parameter(mu)
assert 0 < std, f"the standard deviation should be positive, received {std}"
self.std = std
def _sample_gate_values(self, batch_size: int) -> Tensor:
"""
Sample gate values for each example in the batch from the Gaussian distribution
Args:
batch_size (int): input batch size
Returns:
gate_values (Tensor): gate value tensor of shape(batch_size, n_gates)
"""
if self.training:
n = torch.empty(batch_size, self.n_gates, device=self.mu.device)
n.normal_(mean=0, std=self.std)
return self.mu + n
return self.mu.expand(batch_size, self.n_gates)
def _get_gate_values(self) -> Tensor:
"""
Get the raw gate values, which are the means of the underneath gate
distributions, the learned mu
Returns:
gate_values (Tensor): value of each gate after model is trained
"""
return self.mu
def _get_gate_active_probs(self) -> Tensor:
"""
Get the active probability of each gate, i.e, gate value > 0, in the
Gaussian distribution
Returns:
probs (Tensor): probabilities tensor of the gates are active
in shape(n_gates)
"""
x = self.mu / self.std
return 0.5 * (1 + torch.erf(x / math.sqrt(2)))
@classmethod
def _from_pretrained(cls, mu: Tensor, *args, **kwargs):
"""
Private factory method to create an instance with pretrained parameters
Args:
mu (Tensor): FloatTensor containing weights for the pretrained mu
mask (Tensor, optional): If provided, this allows grouping multiple
input tensor elements to share the same stochastic gate.
This tensor should be broadcastable to match the input shape
and contain integers in the range 0 to n_gates - 1.
Indices grouped to the same stochastic gate should have the same value.
If not provided, each element in the input tensor
(on dimensions other than dim 0 - batch dim) is gated separately.
Default: None
reg_weight (float, optional): rescaling weight for L0 regularization term.
Default: 1.0
std (float, optional): standard deviation that will be fixed throughout.
Default: 0.5
reg_reduction (str, optional): the reduction to apply to the regularization:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be
applied and it will be the same as the return of ``get_active_probs``,
``'mean'``: the sum of the gates non-zero probabilities will be divided
by the number of gates, ``'sum'``: the gates non-zero probabilities will
be summed.
Default: ``'sum'``
Returns:
stg (GaussianStochasticGates): StochasticGates instance
"""
n_gates = mu.numel()
stg = cls(n_gates, *args, **kwargs)
stg.load_state_dict({"mu": mu}, strict=False)
return stg
|
from captum.module.binary_concrete_stochastic_gates import ( # noqa
BinaryConcreteStochasticGates,
)
from captum.module.gaussian_stochastic_gates import GaussianStochasticGates # noqa
from captum.module.stochastic_gates_base import StochasticGatesBase # noqa
|
#!/usr/bin/env python3
import math
from typing import Optional
import torch
from captum.module.stochastic_gates_base import StochasticGatesBase
from torch import nn, Tensor
def _torch_empty(batch_size: int, n_gates: int, device: torch.device) -> Tensor:
return torch.empty(batch_size, n_gates, device=device)
# torch.fx is introduced in 1.8.0
if hasattr(torch, "fx"):
torch.fx.wrap(_torch_empty)
def _logit(inp):
# torch.logit is introduced in 1.7.0
if hasattr(torch, "logit"):
return torch.logit(inp)
else:
return torch.log(inp) - torch.log(1 - inp)
class BinaryConcreteStochasticGates(StochasticGatesBase):
"""
Stochastic Gates with binary concrete distribution.
Stochastic Gates is a practical solution to add L0 norm regularization for neural
networks. L0 regularization, which explicitly penalizes any present (non-zero)
parameters, can help network pruning and feature selection, but directly optimizing
L0 is a non-differentiable combinatorial problem. To surrogate L0, Stochastic Gate
uses certain continuous probability distributions (e.g., Concrete, Gaussian) with
hard-sigmoid rectification as a continuous smoothed Bernoulli distribution
determining the weight of a parameter, i.e., gate. Then L0 is equal to the gates's
non-zero probability represented by the parameters of the continuous probability
distribution. The gate value can also be reparameterized to the distribution
parameters with a noise. So the expected L0 can be optimized through learning
the distribution parameters via stochastic gradients.
BinaryConcreteStochasticGates adopts a "stretched" binary concrete distribution as
the smoothed Bernoulli distribution of gate. The binary concrete distribution does
not include its lower and upper boundaries, 0 and 1, which are required by a
Bernoulli distribution, so it needs to be linearly stretched beyond both boundaries.
Then use hard-sigmoid rectification to "fold" the parts smaller than 0 or larger
than 1 back to 0 and 1.
More details can be found in the original paper:
https://arxiv.org/abs/1712.01312
Examples::
>>> n_params = 5 # number of parameters
>>> stg = BinaryConcreteStochasticGates(n_params, reg_weight=0.01)
>>> inputs = torch.randn(3, n_params) # mock inputs with batch size of 3
>>> gated_inputs, reg = stg(mock_inputs) # gate the inputs
"""
def __init__(
self,
n_gates: int,
mask: Optional[Tensor] = None,
reg_weight: float = 1.0,
temperature: float = 2.0 / 3,
lower_bound: float = -0.1,
upper_bound: float = 1.1,
eps: float = 1e-8,
reg_reduction: str = "sum",
):
"""
Args:
n_gates (int): number of gates.
mask (Tensor, optional): If provided, this allows grouping multiple
input tensor elements to share the same stochastic gate.
This tensor should be broadcastable to match the input shape
and contain integers in the range 0 to n_gates - 1.
Indices grouped to the same stochastic gate should have the same value.
If not provided, each element in the input tensor
(on dimensions other than dim 0, i.e., batch dim) is gated separately.
Default: None
reg_weight (float, optional): rescaling weight for L0 regularization term.
Default: 1.0
temperature (float, optional): temperature of the concrete distribution,
controls the degree of approximation, as 0 means the original Bernoulli
without relaxation. The value should be between 0 and 1.
Default: 2/3
lower_bound (float, optional): the lower bound to "stretch" the binary
concrete distribution
Default: -0.1
upper_bound (float, optional): the upper bound to "stretch" the binary
concrete distribution
Default: 1.1
eps (float, optional): term to improve numerical stability in binary
concerete sampling
Default: 1e-8
reg_reduction (str, optional): the reduction to apply to the regularization:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be
applied and it will be the same as the return of ``get_active_probs``,
``'mean'``: the sum of the gates non-zero probabilities will be divided
by the number of gates, ``'sum'``: the gates non-zero probabilities will
be summed.
Default: ``'sum'``
"""
super().__init__(
n_gates, mask=mask, reg_weight=reg_weight, reg_reduction=reg_reduction
)
# avoid changing the tensor's variable name
# when the module is used after compilation,
# users may directly access this tensor by name
log_alpha_param = torch.empty(n_gates)
nn.init.normal_(log_alpha_param, mean=0.0, std=0.01)
self.log_alpha_param = nn.Parameter(log_alpha_param)
assert (
0 < temperature < 1
), f"the temperature should be bwteen 0 and 1, received {temperature}"
self.temperature = temperature
assert (
lower_bound < 0
), f"the stretch lower bound should smaller than 0, received {lower_bound}"
self.lower_bound = lower_bound
assert (
upper_bound > 1
), f"the stretch upper bound should larger than 1, received {upper_bound}"
self.upper_bound = upper_bound
self.eps = eps
# pre-calculate the fixed term used in active prob
self.active_prob_offset = temperature * math.log(-lower_bound / upper_bound)
def _sample_gate_values(self, batch_size: int) -> Tensor:
"""
Sample gate values for each example in the batch from the binary concrete
distributions
Args:
batch_size (int): input batch size
Returns:
gate_values (Tensor): gate value tensor of shape(batch_size, n_gates)
"""
if self.training:
u = _torch_empty(
batch_size, self.n_gates, device=self.log_alpha_param.device
)
u.uniform_(self.eps, 1 - self.eps)
s = torch.sigmoid((_logit(u) + self.log_alpha_param) / self.temperature)
else:
s = torch.sigmoid(self.log_alpha_param)
s = s.expand(batch_size, self.n_gates)
s_bar = s * (self.upper_bound - self.lower_bound) + self.lower_bound
return s_bar
def _get_gate_values(self) -> Tensor:
"""
Get the raw gate values, which are the means of the underneath gate
distributions, derived from learned log_alpha_param
Returns:
gate_values (Tensor): value of each gate after model is trained
"""
gate_values = (
torch.sigmoid(self.log_alpha_param) * (self.upper_bound - self.lower_bound)
+ self.lower_bound
)
return gate_values
def _get_gate_active_probs(self) -> Tensor:
"""
Get the active probability of each gate, i.e, gate value > 0, in the binary
concrete distributions
Returns:
probs (Tensor): probabilities tensor of the gates are active
in shape(n_gates)
"""
return torch.sigmoid(self.log_alpha_param - self.active_prob_offset)
@classmethod
def _from_pretrained(cls, log_alpha_param: Tensor, *args, **kwargs):
"""
Private factory method to create an instance with pretrained parameters
Args:
log_alpha_param (Tensor): FloatTensor containing weights for
the pretrained log_alpha
mask (Tensor, optional): If provided, this allows grouping multiple
input tensor elements to share the same stochastic gate.
This tensor should be broadcastable to match the input shape
and contain integers in the range 0 to n_gates - 1.
Indices grouped to the same stochastic gate should have the same value.
If not provided, each element in the input tensor
(on dimensions other than dim 0 - batch dim) is gated separately.
Default: None
reg_weight (float, optional): rescaling weight for L0 regularization term.
Default: 1.0
temperature (float, optional): temperature of the concrete distribution,
controls the degree of approximation, as 0 means the original Bernoulli
without relaxation. The value should be between 0 and 1.
Default: 2/3
lower_bound (float, optional): the lower bound to "stretch" the binary
concrete distribution
Default: -0.1
upper_bound (float, optional): the upper bound to "stretch" the binary
concrete distribution
Default: 1.1
eps (float, optional): term to improve numerical stability in binary
concerete sampling
Default: 1e-8
reg_reduction (str, optional): the reduction to apply to the regularization:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be
applied and it will be the same as the return of ``get_active_probs``,
``'mean'``: the sum of the gates non-zero probabilities will be divided
by the number of gates, ``'sum'``: the gates non-zero probabilities will
be summed.
Default: ``'sum'``
Returns:
stg (BinaryConcreteStochasticGates): StochasticGates instance
"""
assert (
log_alpha_param.dim() == 1
), "log_alpha_param is expected to be 1-dimensional"
n_gates = log_alpha_param.numel()
stg = cls(n_gates, *args, **kwargs)
stg.load_state_dict({"log_alpha_param": log_alpha_param}, strict=False)
return stg
|
#!/usr/bin/env python3
from captum.attr._core.dataloader_attr import DataLoaderAttribution # noqa
from captum.attr._core.deep_lift import DeepLift, DeepLiftShap # noqa
from captum.attr._core.feature_ablation import FeatureAblation # noqa
from captum.attr._core.feature_permutation import FeaturePermutation # noqa
from captum.attr._core.gradient_shap import GradientShap # noqa
from captum.attr._core.guided_backprop_deconvnet import ( # noqa
Deconvolution,
GuidedBackprop,
)
from captum.attr._core.guided_grad_cam import GuidedGradCam # noqa
from captum.attr._core.input_x_gradient import InputXGradient # noqa
from captum.attr._core.integrated_gradients import IntegratedGradients # noqa
from captum.attr._core.kernel_shap import KernelShap # noqa
from captum.attr._core.layer.grad_cam import LayerGradCam # noqa
from captum.attr._core.layer.internal_influence import InternalInfluence # noqa
from captum.attr._core.layer.layer_activation import LayerActivation # noqa
from captum.attr._core.layer.layer_conductance import LayerConductance # noqa
from captum.attr._core.layer.layer_deep_lift import ( # noqa
LayerDeepLift,
LayerDeepLiftShap,
)
from captum.attr._core.layer.layer_feature_ablation import LayerFeatureAblation # noqa
from captum.attr._core.layer.layer_gradient_shap import LayerGradientShap # noqa
from captum.attr._core.layer.layer_gradient_x_activation import ( # noqa
LayerGradientXActivation,
)
from captum.attr._core.layer.layer_integrated_gradients import ( # noqa
LayerIntegratedGradients,
)
from captum.attr._core.layer.layer_lrp import LayerLRP # noqa
from captum.attr._core.lime import Lime, LimeBase # noqa
from captum.attr._core.lrp import LRP # noqa
from captum.attr._core.neuron.neuron_conductance import NeuronConductance # noqa
from captum.attr._core.neuron.neuron_deep_lift import ( # noqa
NeuronDeepLift,
NeuronDeepLiftShap,
)
from captum.attr._core.neuron.neuron_feature_ablation import ( # noqa
NeuronFeatureAblation,
)
from captum.attr._core.neuron.neuron_gradient import NeuronGradient # noqa
from captum.attr._core.neuron.neuron_gradient_shap import NeuronGradientShap # noqa
from captum.attr._core.neuron.neuron_guided_backprop_deconvnet import ( # noqa
NeuronDeconvolution,
NeuronGuidedBackprop,
)
from captum.attr._core.neuron.neuron_integrated_gradients import ( # noqa
NeuronIntegratedGradients,
)
from captum.attr._core.noise_tunnel import NoiseTunnel # noqa
from captum.attr._core.occlusion import Occlusion # noqa
from captum.attr._core.saliency import Saliency # noqa
from captum.attr._core.shapley_value import ShapleyValues, ShapleyValueSampling # noqa
from captum.attr._models.base import ( # noqa
configure_interpretable_embedding_layer,
InterpretableEmbeddingBase,
remove_interpretable_embedding_layer,
TokenReferenceBase,
)
from captum.attr._utils import visualization # noqa
from captum.attr._utils.attribution import ( # noqa # noqa # noqa # noqa # noqa
Attribution,
GradientAttribution,
LayerAttribution,
NeuronAttribution,
PerturbationAttribution,
)
from captum.attr._utils.class_summarizer import ClassSummarizer
from captum.attr._utils.stat import (
CommonStats,
Count,
Max,
Mean,
Min,
MSE,
StdDev,
Sum,
Var,
)
from captum.attr._utils.summarizer import Summarizer
__all__ = [
"Attribution",
"GradientAttribution",
"PerturbationAttribution",
"NeuronAttribution",
"LayerAttribution",
"IntegratedGradients",
"DataLoaderAttribution",
"DeepLift",
"DeepLiftShap",
"InputXGradient",
"Saliency",
"GuidedBackprop",
"Deconvolution",
"GuidedGradCam",
"FeatureAblation",
"FeaturePermutation",
"Occlusion",
"ShapleyValueSampling",
"ShapleyValues",
"LimeBase",
"Lime",
"LRP",
"KernelShap",
"LayerConductance",
"LayerGradientXActivation",
"LayerActivation",
"LayerFeatureAblation",
"InternalInfluence",
"LayerGradCam",
"LayerDeepLift",
"LayerDeepLiftShap",
"LayerGradientShap",
"LayerIntegratedGradients",
"LayerLRP",
"NeuronConductance",
"NeuronFeatureAblation",
"NeuronGradient",
"NeuronIntegratedGradients",
"NeuronDeepLift",
"NeuronDeepLiftShap",
"NeuronGradientShap",
"NeuronDeconvolution",
"NeuronGuidedBackprop",
"NoiseTunnel",
"GradientShap",
"InterpretableEmbeddingBase",
"TokenReferenceBase",
"visualization",
"configure_interpretable_embedding_layer",
"remove_interpretable_embedding_layer",
"Summarizer",
"CommonStats",
"ClassSummarizer",
"Mean",
"StdDev",
"MSE",
"Var",
"Min",
"Max",
"Sum",
"Count",
]
|
#!/usr/bin/env python3
import typing
import warnings
from typing import Any, Callable, Iterator, Tuple, Union
import torch
from captum._utils.common import (
_format_additional_forward_args,
_format_output,
_format_tensor_into_tuples,
_reduce_list,
)
from captum._utils.typing import (
TargetType,
TensorOrTupleOfTensorsGeneric,
TupleOrTensorOrBoolGeneric,
)
from captum.attr._utils.approximation_methods import approximation_parameters
from torch import Tensor
def _batch_attribution(
attr_method,
num_examples,
internal_batch_size,
n_steps,
include_endpoint=False,
**kwargs,
):
"""
This method applies internal batching to given attribution method, dividing
the total steps into batches and running each independently and sequentially,
adding each result to compute the total attribution.
Step sizes and alphas are spliced for each batch and passed explicitly for each
call to _attribute.
kwargs include all argument necessary to pass to each attribute call, except
for n_steps, which is computed based on the number of steps for the batch.
include_endpoint ensures that one step overlaps between each batch, which
is necessary for some methods, particularly LayerConductance.
"""
if internal_batch_size < num_examples:
warnings.warn(
"Internal batch size cannot be less than the number of input examples. "
"Defaulting to internal batch size of %d equal to the number of examples."
% num_examples
)
# Number of steps for each batch
step_count = max(1, internal_batch_size // num_examples)
if include_endpoint:
if step_count < 2:
step_count = 2
warnings.warn(
"This method computes finite differences between evaluations at "
"consecutive steps, so internal batch size must be at least twice "
"the number of examples. Defaulting to internal batch size of %d"
" equal to twice the number of examples." % (2 * num_examples)
)
total_attr = None
cumulative_steps = 0
step_sizes_func, alphas_func = approximation_parameters(kwargs["method"])
full_step_sizes = step_sizes_func(n_steps)
full_alphas = alphas_func(n_steps)
while cumulative_steps < n_steps:
start_step = cumulative_steps
end_step = min(start_step + step_count, n_steps)
batch_steps = end_step - start_step
if include_endpoint:
batch_steps -= 1
step_sizes = full_step_sizes[start_step:end_step]
alphas = full_alphas[start_step:end_step]
current_attr = attr_method._attribute(
**kwargs, n_steps=batch_steps, step_sizes_and_alphas=(step_sizes, alphas)
)
if total_attr is None:
total_attr = current_attr
else:
if isinstance(total_attr, Tensor):
total_attr = total_attr + current_attr.detach()
else:
total_attr = tuple(
current.detach() + prev_total
for current, prev_total in zip(current_attr, total_attr)
)
if include_endpoint and end_step < n_steps:
cumulative_steps = end_step - 1
else:
cumulative_steps = end_step
return total_attr
@typing.overload
def _tuple_splice_range(inputs: None, start: int, end: int) -> None:
...
@typing.overload
def _tuple_splice_range(inputs: Tuple, start: int, end: int) -> Tuple:
...
def _tuple_splice_range(
inputs: Union[None, Tuple], start: int, end: int
) -> Union[None, Tuple]:
"""
Splices each tensor element of given tuple (inputs) from range start
(inclusive) to end (non-inclusive) on its first dimension. If element
is not a Tensor, it is left unchanged. It is assumed that all tensor elements
have the same first dimension (corresponding to number of examples).
The returned value is a tuple with the same length as inputs, with Tensors
spliced appropriately.
"""
assert start < end, "Start point must precede end point for batch splicing."
if inputs is None:
return None
return tuple(
inp[start:end] if isinstance(inp, torch.Tensor) else inp for inp in inputs
)
def _batched_generator(
inputs: TensorOrTupleOfTensorsGeneric,
additional_forward_args: Any = None,
target_ind: TargetType = None,
internal_batch_size: Union[None, int] = None,
) -> Iterator[Tuple[Tuple[Tensor, ...], Any, TargetType]]:
"""
Returns a generator which returns corresponding chunks of size internal_batch_size
for both inputs and additional_forward_args. If batch size is None,
generator only includes original inputs and additional args.
"""
assert internal_batch_size is None or (
isinstance(internal_batch_size, int) and internal_batch_size > 0
), "Batch size must be greater than 0."
inputs = _format_tensor_into_tuples(inputs)
additional_forward_args = _format_additional_forward_args(additional_forward_args)
num_examples = inputs[0].shape[0]
# TODO Reconsider this check if _batched_generator is used for non gradient-based
# attribution algorithms
if not (inputs[0] * 1).requires_grad:
warnings.warn(
"""It looks like that the attribution for a gradient-based method is
computed in a `torch.no_grad` block or perhaps the inputs have no
requires_grad."""
)
if internal_batch_size is None:
yield inputs, additional_forward_args, target_ind
else:
for current_total in range(0, num_examples, internal_batch_size):
with torch.autograd.set_grad_enabled(True):
inputs_splice = _tuple_splice_range(
inputs, current_total, current_total + internal_batch_size
)
yield inputs_splice, _tuple_splice_range(
additional_forward_args,
current_total,
current_total + internal_batch_size,
), target_ind[
current_total : current_total + internal_batch_size
] if isinstance(
target_ind, list
) or (
isinstance(target_ind, torch.Tensor) and target_ind.numel() > 1
) else target_ind
def _batched_operator(
operator: Callable[..., TupleOrTensorOrBoolGeneric],
inputs: TensorOrTupleOfTensorsGeneric,
additional_forward_args: Any = None,
target_ind: TargetType = None,
internal_batch_size: Union[None, int] = None,
**kwargs: Any,
) -> TupleOrTensorOrBoolGeneric:
"""
Batches the operation of the given operator, applying the given batch size
to inputs and additional forward arguments, and returning the concatenation
of the results of each batch.
"""
all_outputs = [
operator(
inputs=input,
additional_forward_args=additional,
target_ind=target,
**kwargs,
)
for input, additional, target in _batched_generator(
inputs, additional_forward_args, target_ind, internal_batch_size
)
]
return _reduce_list(all_outputs)
def _select_example(curr_arg: Any, index: int, bsz: int) -> Any:
if curr_arg is None:
return None
is_tuple = isinstance(curr_arg, tuple)
if not is_tuple:
curr_arg = (curr_arg,)
selected_arg = []
for i in range(len(curr_arg)):
if isinstance(curr_arg[i], (Tensor, list)) and len(curr_arg[i]) == bsz:
selected_arg.append(curr_arg[i][index : index + 1])
else:
selected_arg.append(curr_arg[i])
return _format_output(is_tuple, tuple(selected_arg))
def _batch_example_iterator(bsz: int, *args) -> Iterator:
"""
Batches the provided argument.
"""
for i in range(bsz):
curr_args = [_select_example(args[j], i, bsz) for j in range(len(args))]
yield tuple(curr_args)
|
#!/usr/bin/env python3
import torch.nn as nn
class Addition_Module(nn.Module):
"""Custom addition module that uses multiple inputs to assure correct relevance
propagation. Any addition in a forward function needs to be replaced with the
module before using LRP."""
def __init__(self) -> None:
super().__init__()
def forward(self, x1, x2):
return x1 + x2
|
#!/usr/bin/env python3
from enum import Enum
from typing import Callable, List, Tuple
import torch
class Riemann(Enum):
left = 1
right = 2
middle = 3
trapezoid = 4
SUPPORTED_RIEMANN_METHODS = [
"riemann_left",
"riemann_right",
"riemann_middle",
"riemann_trapezoid",
]
SUPPORTED_METHODS = SUPPORTED_RIEMANN_METHODS + ["gausslegendre"]
def approximation_parameters(
method: str,
) -> Tuple[Callable[[int], List[float]], Callable[[int], List[float]]]:
r"""Retrieves parameters for the input approximation `method`
Args:
method (str): The name of the approximation method. Currently only `riemann`
and gauss legendre are
"""
if method in SUPPORTED_RIEMANN_METHODS:
return riemann_builders(method=Riemann[method.split("_")[-1]])
if method == "gausslegendre":
return gauss_legendre_builders()
raise ValueError("Invalid integral approximation method name: {}".format(method))
def riemann_builders(
method: Riemann = Riemann.trapezoid,
) -> Tuple[Callable[[int], List[float]], Callable[[int], List[float]]]:
r"""Step sizes are identical and alphas are scaled in [0, 1]
Args:
method (Riemann): `left`, `right`, `middle` and `trapezoid` riemann
Returns:
2-element tuple of **step_sizes**, **alphas**:
- **step_sizes** (*Callable*):
`step_sizes` takes the number of steps as an
input argument and returns an array of steps sizes which
sum is smaller than or equal to one.
- **alphas** (*Callable*):
`alphas` takes the number of steps as an input argument
and returns the multipliers/coefficients for the inputs
of integrand in the range of [0, 1]
"""
def step_sizes(n: int) -> List[float]:
assert n > 1, "The number of steps has to be larger than one"
deltas = [1 / n] * n
if method == Riemann.trapezoid:
deltas[0] /= 2
deltas[-1] /= 2
return deltas
def alphas(n: int) -> List[float]:
assert n > 1, "The number of steps has to be larger than one"
if method == Riemann.trapezoid:
return torch.linspace(0, 1, n).tolist()
elif method == Riemann.left:
return torch.linspace(0, 1 - 1 / n, n).tolist()
elif method == Riemann.middle:
return torch.linspace(1 / (2 * n), 1 - 1 / (2 * n), n).tolist()
elif method == Riemann.right:
return torch.linspace(1 / n, 1, n).tolist()
else:
raise AssertionError("Provided Reimann approximation method is not valid.")
# This is not a standard riemann method but in many cases it
# leades to faster approaximation. Test cases for small number of steps
# do not make sense but for larger number of steps the approximation is
# better therefore leaving this option available
# if method == 'riemann_include_endpoints':
# return [i / (n - 1) for i in range(n)]
return step_sizes, alphas
def gauss_legendre_builders() -> Tuple[
Callable[[int], List[float]], Callable[[int], List[float]]
]:
r"""Numpy's `np.polynomial.legendre` function helps to compute step sizes
and alpha coefficients using gauss-legendre quadrature rule.
Since numpy returns the integration parameters in different scales we need to
rescale them to adjust to the desired scale.
Gauss Legendre quadrature rule for approximating the integrals was originally
proposed by [Xue Feng and her intern Hauroun Habeeb]
(https://research.fb.com/people/feng-xue/).
Returns:
2-element tuple of **step_sizes**, **alphas**:
- **step_sizes** (*Callable*):
`step_sizes` takes the number of steps as an
input argument and returns an array of steps sizes which
sum is smaller than or equal to one.
- **alphas** (*Callable*):
`alphas` takes the number of steps as an input argument
and returns the multipliers/coefficients for the inputs
of integrand in the range of [0, 1]
"""
# allow using riemann even without np
import numpy as np
def step_sizes(n: int) -> List[float]:
assert n > 0, "The number of steps has to be larger than zero"
# Scaling from 2 to 1
return list(0.5 * np.polynomial.legendre.leggauss(n)[1])
def alphas(n: int) -> List[float]:
assert n > 0, "The number of steps has to be larger than zero"
# Scaling from [-1, 1] to [0, 1]
return list(0.5 * (1 + np.polynomial.legendre.leggauss(n)[0]))
return step_sizes, alphas
|
#!/usr/bin/env python3
import inspect
from typing import Any
import torch.nn as nn
class InputIdentity(nn.Module):
def __init__(self, input_name: str) -> None:
r"""
The identity operation
Args:
input_name (str)
The name of the input this layer is associated to. For debugging
purposes.
"""
super().__init__()
self.input_name = input_name
def forward(self, x):
return x
class ModelInputWrapper(nn.Module):
def __init__(self, module_to_wrap: nn.Module) -> None:
r"""
This is a convenience class. This wraps a model via first feeding the
model's inputs to separate layers (one for each input) and then feeding
the (unmodified) inputs to the underlying model (`module_to_wrap`). Each
input is fed through an `InputIdentity` layer/module. This class does
not change how you feed inputs to your model, so feel free to use your
model as you normally would.
To access a wrapped input layer, simply access it via the `input_maps`
ModuleDict, e.g. to get the corresponding module for input "x", simply
provide/write `my_wrapped_module.input_maps["x"]`
This is done such that one can use layer attribution methods on inputs.
Which should allow you to use mix layers with inputs with these
attribution methods. This is especially useful multimodal models which
input discrete features (mapped to embeddings, such as text) and regular
continuous feature vectors.
Notes:
- Since inputs are mapped with the identity, attributing to the
input/feature can be done with either the input or output of the
layer, e.g. attributing to an input/feature doesn't depend on whether
attribute_to_layer_input is True or False for
LayerIntegratedGradients.
- Please refer to the multimodal tutorial or unit tests
(test/attr/test_layer_wrapper.py) for an example.
Args:
module_to_wrap (nn.Module):
The model/module you want to wrap
"""
super().__init__()
self.module = module_to_wrap
# ignore self
self.arg_name_list = inspect.getfullargspec(module_to_wrap.forward).args[1:]
self.input_maps = nn.ModuleDict(
{arg_name: InputIdentity(arg_name) for arg_name in self.arg_name_list}
)
def forward(self, *args, **kwargs) -> Any:
args = list(args)
for idx, (arg_name, arg) in enumerate(zip(self.arg_name_list, args)):
args[idx] = self.input_maps[arg_name](arg)
for arg_name in kwargs.keys():
kwargs[arg_name] = self.input_maps[arg_name](kwargs[arg_name])
return self.module(*tuple(args), **kwargs)
|
#!/usr/bin/env python3
import warnings
from enum import Enum
from typing import Any, Iterable, List, Optional, Tuple, Union
import numpy as np
from matplotlib import cm, colors, pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.figure import Figure
from matplotlib.pyplot import axis, figure
from mpl_toolkits.axes_grid1 import make_axes_locatable
from numpy import ndarray
try:
from IPython.display import display, HTML
HAS_IPYTHON = True
except ImportError:
HAS_IPYTHON = False
class ImageVisualizationMethod(Enum):
heat_map = 1
blended_heat_map = 2
original_image = 3
masked_image = 4
alpha_scaling = 5
class TimeseriesVisualizationMethod(Enum):
overlay_individual = 1
overlay_combined = 2
colored_graph = 3
class VisualizeSign(Enum):
positive = 1
absolute_value = 2
negative = 3
all = 4
def _prepare_image(attr_visual: ndarray):
return np.clip(attr_visual.astype(int), 0, 255)
def _normalize_scale(attr: ndarray, scale_factor: float):
assert scale_factor != 0, "Cannot normalize by scale factor = 0"
if abs(scale_factor) < 1e-5:
warnings.warn(
"Attempting to normalize by value approximately 0, visualized results"
"may be misleading. This likely means that attribution values are all"
"close to 0."
)
attr_norm = attr / scale_factor
return np.clip(attr_norm, -1, 1)
def _cumulative_sum_threshold(values: ndarray, percentile: Union[int, float]):
# given values should be non-negative
assert percentile >= 0 and percentile <= 100, (
"Percentile for thresholding must be " "between 0 and 100 inclusive."
)
sorted_vals = np.sort(values.flatten())
cum_sums = np.cumsum(sorted_vals)
threshold_id = np.where(cum_sums >= cum_sums[-1] * 0.01 * percentile)[0][0]
return sorted_vals[threshold_id]
def _normalize_attr(
attr: ndarray,
sign: str,
outlier_perc: Union[int, float] = 2,
reduction_axis: Optional[int] = None,
):
attr_combined = attr
if reduction_axis is not None:
attr_combined = np.sum(attr, axis=reduction_axis)
# Choose appropriate signed values and rescale, removing given outlier percentage.
if VisualizeSign[sign] == VisualizeSign.all:
threshold = _cumulative_sum_threshold(np.abs(attr_combined), 100 - outlier_perc)
elif VisualizeSign[sign] == VisualizeSign.positive:
attr_combined = (attr_combined > 0) * attr_combined
threshold = _cumulative_sum_threshold(attr_combined, 100 - outlier_perc)
elif VisualizeSign[sign] == VisualizeSign.negative:
attr_combined = (attr_combined < 0) * attr_combined
threshold = -1 * _cumulative_sum_threshold(
np.abs(attr_combined), 100 - outlier_perc
)
elif VisualizeSign[sign] == VisualizeSign.absolute_value:
attr_combined = np.abs(attr_combined)
threshold = _cumulative_sum_threshold(attr_combined, 100 - outlier_perc)
else:
raise AssertionError("Visualize Sign type is not valid.")
return _normalize_scale(attr_combined, threshold)
def visualize_image_attr(
attr: ndarray,
original_image: Union[None, ndarray] = None,
method: str = "heat_map",
sign: str = "absolute_value",
plt_fig_axis: Union[None, Tuple[figure, axis]] = None,
outlier_perc: Union[int, float] = 2,
cmap: Union[None, str] = None,
alpha_overlay: float = 0.5,
show_colorbar: bool = False,
title: Union[None, str] = None,
fig_size: Tuple[int, int] = (6, 6),
use_pyplot: bool = True,
):
r"""
Visualizes attribution for a given image by normalizing attribution values
of the desired sign (positive, negative, absolute value, or all) and displaying
them using the desired mode in a matplotlib figure.
Args:
attr (numpy.ndarray): Numpy array corresponding to attributions to be
visualized. Shape must be in the form (H, W, C), with
channels as last dimension. Shape must also match that of
the original image if provided.
original_image (numpy.ndarray, optional): Numpy array corresponding to
original image. Shape must be in the form (H, W, C), with
channels as the last dimension. Image can be provided either
with float values in range 0-1 or int values between 0-255.
This is a necessary argument for any visualization method
which utilizes the original image.
Default: None
method (str, optional): Chosen method for visualizing attribution.
Supported options are:
1. `heat_map` - Display heat map of chosen attributions
2. `blended_heat_map` - Overlay heat map over greyscale
version of original image. Parameter alpha_overlay
corresponds to alpha of heat map.
3. `original_image` - Only display original image.
4. `masked_image` - Mask image (pixel-wise multiply)
by normalized attribution values.
5. `alpha_scaling` - Sets alpha channel of each pixel
to be equal to normalized attribution value.
Default: `heat_map`
sign (str, optional): Chosen sign of attributions to visualize. Supported
options are:
1. `positive` - Displays only positive pixel attributions.
2. `absolute_value` - Displays absolute value of
attributions.
3. `negative` - Displays only negative pixel attributions.
4. `all` - Displays both positive and negative attribution
values. This is not supported for `masked_image` or
`alpha_scaling` modes, since signed information cannot
be represented in these modes.
Default: `absolute_value`
plt_fig_axis (tuple, optional): Tuple of matplotlib.pyplot.figure and axis
on which to visualize. If None is provided, then a new figure
and axis are created.
Default: None
outlier_perc (float or int, optional): Top attribution values which
correspond to a total of outlier_perc percentage of the
total attribution are set to 1 and scaling is performed
using the minimum of these values. For sign=`all`, outliers
and scale value are computed using absolute value of
attributions.
Default: 2
cmap (str, optional): String corresponding to desired colormap for
heatmap visualization. This defaults to "Reds" for negative
sign, "Blues" for absolute value, "Greens" for positive sign,
and a spectrum from red to green for all. Note that this
argument is only used for visualizations displaying heatmaps.
Default: None
alpha_overlay (float, optional): Alpha to set for heatmap when using
`blended_heat_map` visualization mode, which overlays the
heat map over the greyscaled original image.
Default: 0.5
show_colorbar (bool, optional): Displays colorbar for heatmap below
the visualization. If given method does not use a heatmap,
then a colormap axis is created and hidden. This is
necessary for appropriate alignment when visualizing
multiple plots, some with colorbars and some without.
Default: False
title (str, optional): Title string for plot. If None, no title is
set.
Default: None
fig_size (tuple, optional): Size of figure created.
Default: (6,6)
use_pyplot (bool, optional): If true, uses pyplot to create and show
figure and displays the figure after creating. If False,
uses Matplotlib object oriented API and simply returns a
figure object without showing.
Default: True.
Returns:
2-element tuple of **figure**, **axis**:
- **figure** (*matplotlib.pyplot.figure*):
Figure object on which visualization
is created. If plt_fig_axis argument is given, this is the
same figure provided.
- **axis** (*matplotlib.pyplot.axis*):
Axis object on which visualization
is created. If plt_fig_axis argument is given, this is the
same axis provided.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> ig = IntegratedGradients(net)
>>> # Computes integrated gradients for class 3 for a given image .
>>> attribution, delta = ig.attribute(orig_image, target=3)
>>> # Displays blended heat map visualization of computed attributions.
>>> _ = visualize_image_attr(attribution, orig_image, "blended_heat_map")
"""
# Create plot if figure, axis not provided
if plt_fig_axis is not None:
plt_fig, plt_axis = plt_fig_axis
else:
if use_pyplot:
plt_fig, plt_axis = plt.subplots(figsize=fig_size)
else:
plt_fig = Figure(figsize=fig_size)
plt_axis = plt_fig.subplots()
if original_image is not None:
if np.max(original_image) <= 1.0:
original_image = _prepare_image(original_image * 255)
elif ImageVisualizationMethod[method] != ImageVisualizationMethod.heat_map:
raise ValueError(
"Original Image must be provided for"
"any visualization other than heatmap."
)
# Remove ticks and tick labels from plot.
plt_axis.xaxis.set_ticks_position("none")
plt_axis.yaxis.set_ticks_position("none")
plt_axis.set_yticklabels([])
plt_axis.set_xticklabels([])
plt_axis.grid(visible=False)
heat_map = None
# Show original image
if ImageVisualizationMethod[method] == ImageVisualizationMethod.original_image:
assert (
original_image is not None
), "Original image expected for original_image method."
if len(original_image.shape) > 2 and original_image.shape[2] == 1:
original_image = np.squeeze(original_image, axis=2)
plt_axis.imshow(original_image)
else:
# Choose appropriate signed attributions and normalize.
norm_attr = _normalize_attr(attr, sign, outlier_perc, reduction_axis=2)
# Set default colormap and bounds based on sign.
if VisualizeSign[sign] == VisualizeSign.all:
default_cmap = LinearSegmentedColormap.from_list(
"RdWhGn", ["red", "white", "green"]
)
vmin, vmax = -1, 1
elif VisualizeSign[sign] == VisualizeSign.positive:
default_cmap = "Greens"
vmin, vmax = 0, 1
elif VisualizeSign[sign] == VisualizeSign.negative:
default_cmap = "Reds"
vmin, vmax = 0, 1
elif VisualizeSign[sign] == VisualizeSign.absolute_value:
default_cmap = "Blues"
vmin, vmax = 0, 1
else:
raise AssertionError("Visualize Sign type is not valid.")
cmap = cmap if cmap is not None else default_cmap
# Show appropriate image visualization.
if ImageVisualizationMethod[method] == ImageVisualizationMethod.heat_map:
heat_map = plt_axis.imshow(norm_attr, cmap=cmap, vmin=vmin, vmax=vmax)
elif (
ImageVisualizationMethod[method]
== ImageVisualizationMethod.blended_heat_map
):
assert (
original_image is not None
), "Original Image expected for blended_heat_map method."
plt_axis.imshow(np.mean(original_image, axis=2), cmap="gray")
heat_map = plt_axis.imshow(
norm_attr, cmap=cmap, vmin=vmin, vmax=vmax, alpha=alpha_overlay
)
elif ImageVisualizationMethod[method] == ImageVisualizationMethod.masked_image:
assert VisualizeSign[sign] != VisualizeSign.all, (
"Cannot display masked image with both positive and negative "
"attributions, choose a different sign option."
)
plt_axis.imshow(
_prepare_image(original_image * np.expand_dims(norm_attr, 2))
)
elif ImageVisualizationMethod[method] == ImageVisualizationMethod.alpha_scaling:
assert VisualizeSign[sign] != VisualizeSign.all, (
"Cannot display alpha scaling with both positive and negative "
"attributions, choose a different sign option."
)
plt_axis.imshow(
np.concatenate(
[
original_image,
_prepare_image(np.expand_dims(norm_attr, 2) * 255),
],
axis=2,
)
)
else:
raise AssertionError("Visualize Method type is not valid.")
# Add colorbar. If given method is not a heatmap and no colormap is relevant,
# then a colormap axis is created and hidden. This is necessary for appropriate
# alignment when visualizing multiple plots, some with heatmaps and some
# without.
if show_colorbar:
axis_separator = make_axes_locatable(plt_axis)
colorbar_axis = axis_separator.append_axes("bottom", size="5%", pad=0.1)
if heat_map:
plt_fig.colorbar(heat_map, orientation="horizontal", cax=colorbar_axis)
else:
colorbar_axis.axis("off")
if title:
plt_axis.set_title(title)
if use_pyplot:
plt.show()
return plt_fig, plt_axis
def visualize_image_attr_multiple(
attr: ndarray,
original_image: Union[None, ndarray],
methods: List[str],
signs: List[str],
titles: Union[None, List[str]] = None,
fig_size: Tuple[int, int] = (8, 6),
use_pyplot: bool = True,
**kwargs: Any,
):
r"""
Visualizes attribution using multiple visualization methods displayed
in a 1 x k grid, where k is the number of desired visualizations.
Args:
attr (numpy.ndarray): Numpy array corresponding to attributions to be
visualized. Shape must be in the form (H, W, C), with
channels as last dimension. Shape must also match that of
the original image if provided.
original_image (numpy.ndarray, optional): Numpy array corresponding to
original image. Shape must be in the form (H, W, C), with
channels as the last dimension. Image can be provided either
with values in range 0-1 or 0-255. This is a necessary
argument for any visualization method which utilizes
the original image.
methods (list[str]): List of strings of length k, defining method
for each visualization. Each method must be a valid
string argument for method to visualize_image_attr.
signs (list[str]): List of strings of length k, defining signs for
each visualization. Each sign must be a valid
string argument for sign to visualize_image_attr.
titles (list[str], optional): List of strings of length k, providing
a title string for each plot. If None is provided, no titles
are added to subplots.
Default: None
fig_size (tuple, optional): Size of figure created.
Default: (8, 6)
use_pyplot (bool, optional): If true, uses pyplot to create and show
figure and displays the figure after creating. If False,
uses Matplotlib object oriented API and simply returns a
figure object without showing.
Default: True.
**kwargs (Any, optional): Any additional arguments which will be passed
to every individual visualization. Such arguments include
`show_colorbar`, `alpha_overlay`, `cmap`, etc.
Returns:
2-element tuple of **figure**, **axis**:
- **figure** (*matplotlib.pyplot.figure*):
Figure object on which visualization
is created. If plt_fig_axis argument is given, this is the
same figure provided.
- **axis** (*matplotlib.pyplot.axis*):
Axis object on which visualization
is created. If plt_fig_axis argument is given, this is the
same axis provided.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> ig = IntegratedGradients(net)
>>> # Computes integrated gradients for class 3 for a given image .
>>> attribution, delta = ig.attribute(orig_image, target=3)
>>> # Displays original image and heat map visualization of
>>> # computed attributions side by side.
>>> _ = visualize_image_attr_multiple(attribution, orig_image,
>>> ["original_image", "heat_map"], ["all", "positive"])
"""
assert len(methods) == len(signs), "Methods and signs array lengths must match."
if titles is not None:
assert len(methods) == len(titles), (
"If titles list is given, length must " "match that of methods list."
)
if use_pyplot:
plt_fig = plt.figure(figsize=fig_size)
else:
plt_fig = Figure(figsize=fig_size)
plt_axis = plt_fig.subplots(1, len(methods))
# When visualizing one
if len(methods) == 1:
plt_axis = [plt_axis]
for i in range(len(methods)):
visualize_image_attr(
attr,
original_image=original_image,
method=methods[i],
sign=signs[i],
plt_fig_axis=(plt_fig, plt_axis[i]),
use_pyplot=False,
title=titles[i] if titles else None,
**kwargs,
)
plt_fig.tight_layout()
if use_pyplot:
plt.show()
return plt_fig, plt_axis
def visualize_timeseries_attr(
attr: ndarray,
data: ndarray,
x_values: Optional[ndarray] = None,
method: str = "individual_channels",
sign: str = "absolute_value",
channel_labels: Optional[List[str]] = None,
channels_last: bool = True,
plt_fig_axis: Union[None, Tuple[figure, axis]] = None,
outlier_perc: Union[int, float] = 2,
cmap: Union[None, str] = None,
alpha_overlay: float = 0.7,
show_colorbar: bool = False,
title: Union[None, str] = None,
fig_size: Tuple[int, int] = (6, 6),
use_pyplot: bool = True,
**pyplot_kwargs,
):
r"""
Visualizes attribution for a given timeseries data by normalizing
attribution values of the desired sign (positive, negative, absolute value,
or all) and displaying them using the desired mode in a matplotlib figure.
Args:
attr (numpy.ndarray): Numpy array corresponding to attributions to be
visualized. Shape must be in the form (N, C) with channels
as last dimension, unless `channels_last` is set to True.
Shape must also match that of the timeseries data.
data (numpy.ndarray): Numpy array corresponding to the original,
equidistant timeseries data. Shape must be in the form
(N, C) with channels as last dimension, unless
`channels_last` is set to true.
x_values (numpy.ndarray, optional): Numpy array corresponding to the
points on the x-axis. Shape must be in the form (N, ). If
not provided, integers from 0 to N-1 are used.
Default: None
method (str, optional): Chosen method for visualizing attributions
overlaid onto data. Supported options are:
1. `overlay_individual` - Plot each channel individually in
a separate panel, and overlay the attributions for each
channel as a heat map. The `alpha_overlay` parameter
controls the alpha of the heat map.
2. `overlay_combined` - Plot all channels in the same panel,
and overlay the average attributions as a heat map.
3. `colored_graph` - Plot each channel in a separate panel,
and color the graphs according to the attribution
values. Works best with color maps that does not contain
white or very bright colors.
Default: `overlay_individual`
sign (str, optional): Chosen sign of attributions to visualize.
Supported options are:
1. `positive` - Displays only positive pixel attributions.
2. `absolute_value` - Displays absolute value of
attributions.
3. `negative` - Displays only negative pixel attributions.
4. `all` - Displays both positive and negative attribution
values.
Default: `absolute_value`
channel_labels (list[str], optional): List of labels
corresponding to each channel in data.
Default: None
channels_last (bool, optional): If True, data is expected to have
channels as the last dimension, i.e. (N, C). If False, data
is expected to have channels first, i.e. (C, N).
Default: True
plt_fig_axis (tuple, optional): Tuple of matplotlib.pyplot.figure and axis
on which to visualize. If None is provided, then a new figure
and axis are created.
Default: None
outlier_perc (float or int, optional): Top attribution values which
correspond to a total of outlier_perc percentage of the
total attribution are set to 1 and scaling is performed
using the minimum of these values. For sign=`all`, outliers
and scale value are computed using absolute value of
attributions.
Default: 2
cmap (str, optional): String corresponding to desired colormap for
heatmap visualization. This defaults to "Reds" for negative
sign, "Blues" for absolute value, "Greens" for positive sign,
and a spectrum from red to green for all. Note that this
argument is only used for visualizations displaying heatmaps.
Default: None
alpha_overlay (float, optional): Alpha to set for heatmap when using
`blended_heat_map` visualization mode, which overlays the
heat map over the greyscaled original image.
Default: 0.7
show_colorbar (bool): Displays colorbar for heat map below
the visualization.
title (str, optional): Title string for plot. If None, no title is
set.
Default: None
fig_size (tuple, optional): Size of figure created.
Default: (6,6)
use_pyplot (bool): If true, uses pyplot to create and show
figure and displays the figure after creating. If False,
uses Matplotlib object oriented API and simply returns a
figure object without showing.
Default: True.
pyplot_kwargs: Keyword arguments forwarded to plt.plot, for example
`linewidth=3`, `color='black'`, etc
Returns:
2-element tuple of **figure**, **axis**:
- **figure** (*matplotlib.pyplot.figure*):
Figure object on which visualization
is created. If plt_fig_axis argument is given, this is the
same figure provided.
- **axis** (*matplotlib.pyplot.axis*):
Axis object on which visualization
is created. If plt_fig_axis argument is given, this is the
same axis provided.
Examples::
>>> # Classifier takes input of shape (batch, length, channels)
>>> model = Classifier()
>>> dl = DeepLift(model)
>>> attribution = dl.attribute(data, target=0)
>>> # Pick the first sample and plot each channel in data in a separate
>>> # panel, with attributions overlaid
>>> visualize_timeseries_attr(attribution[0], data[0], "overlay_individual")
"""
# Check input dimensions
assert len(attr.shape) == 2, "Expected attr of shape (N, C), got {}".format(
attr.shape
)
assert len(data.shape) == 2, "Expected data of shape (N, C), got {}".format(
attr.shape
)
# Convert to channels-first
if channels_last:
attr = np.transpose(attr)
data = np.transpose(data)
num_channels = attr.shape[0]
timeseries_length = attr.shape[1]
if num_channels > timeseries_length:
warnings.warn(
"Number of channels ({}) greater than time series length ({}), "
"please verify input format".format(num_channels, timeseries_length)
)
num_subplots = num_channels
if (
TimeseriesVisualizationMethod[method]
== TimeseriesVisualizationMethod.overlay_combined
):
num_subplots = 1
attr = np.sum(attr, axis=0) # Merge attributions across channels
if x_values is not None:
assert (
x_values.shape[0] == timeseries_length
), "x_values must have same length as data"
else:
x_values = np.arange(timeseries_length)
# Create plot if figure, axis not provided
if plt_fig_axis is not None:
plt_fig, plt_axis = plt_fig_axis
else:
if use_pyplot:
plt_fig, plt_axis = plt.subplots(
figsize=fig_size, nrows=num_subplots, sharex=True
)
else:
plt_fig = Figure(figsize=fig_size)
plt_axis = plt_fig.subplots(nrows=num_subplots, sharex=True)
if not isinstance(plt_axis, ndarray):
plt_axis = np.array([plt_axis])
norm_attr = _normalize_attr(attr, sign, outlier_perc, reduction_axis=None)
# Set default colormap and bounds based on sign.
if VisualizeSign[sign] == VisualizeSign.all:
default_cmap = LinearSegmentedColormap.from_list(
"RdWhGn", ["red", "white", "green"]
)
vmin, vmax = -1, 1
elif VisualizeSign[sign] == VisualizeSign.positive:
default_cmap = "Greens"
vmin, vmax = 0, 1
elif VisualizeSign[sign] == VisualizeSign.negative:
default_cmap = "Reds"
vmin, vmax = 0, 1
elif VisualizeSign[sign] == VisualizeSign.absolute_value:
default_cmap = "Blues"
vmin, vmax = 0, 1
else:
raise AssertionError("Visualize Sign type is not valid.")
cmap = cmap if cmap is not None else default_cmap
cmap = cm.get_cmap(cmap)
cm_norm = colors.Normalize(vmin, vmax)
def _plot_attrs_as_axvspan(attr_vals, x_vals, ax):
half_col_width = (x_values[1] - x_values[0]) / 2.0
for icol, col_center in enumerate(x_vals):
left = col_center - half_col_width
right = col_center + half_col_width
ax.axvspan(
xmin=left,
xmax=right,
facecolor=(cmap(cm_norm(attr_vals[icol]))),
edgecolor=None,
alpha=alpha_overlay,
)
if (
TimeseriesVisualizationMethod[method]
== TimeseriesVisualizationMethod.overlay_individual
):
for chan in range(num_channels):
plt_axis[chan].plot(x_values, data[chan, :], **pyplot_kwargs)
if channel_labels is not None:
plt_axis[chan].set_ylabel(channel_labels[chan])
_plot_attrs_as_axvspan(norm_attr[chan], x_values, plt_axis[chan])
plt.subplots_adjust(hspace=0)
elif (
TimeseriesVisualizationMethod[method]
== TimeseriesVisualizationMethod.overlay_combined
):
# Dark colors are better in this case
cycler = plt.cycler("color", cm.Dark2.colors)
plt_axis[0].set_prop_cycle(cycler)
for chan in range(num_channels):
label = channel_labels[chan] if channel_labels else None
plt_axis[0].plot(x_values, data[chan, :], label=label, **pyplot_kwargs)
_plot_attrs_as_axvspan(norm_attr, x_values, plt_axis[0])
plt_axis[0].legend(loc="best")
elif (
TimeseriesVisualizationMethod[method]
== TimeseriesVisualizationMethod.colored_graph
):
for chan in range(num_channels):
points = np.array([x_values, data[chan, :]]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(segments, cmap=cmap, norm=cm_norm, **pyplot_kwargs)
lc.set_array(norm_attr[chan, :])
plt_axis[chan].add_collection(lc)
plt_axis[chan].set_ylim(
1.2 * np.min(data[chan, :]), 1.2 * np.max(data[chan, :])
)
if channel_labels is not None:
plt_axis[chan].set_ylabel(channel_labels[chan])
plt.subplots_adjust(hspace=0)
else:
raise AssertionError("Invalid visualization method: {}".format(method))
plt.xlim([x_values[0], x_values[-1]])
if show_colorbar:
axis_separator = make_axes_locatable(plt_axis[-1])
colorbar_axis = axis_separator.append_axes("bottom", size="5%", pad=0.4)
colorbar_alpha = alpha_overlay
if (
TimeseriesVisualizationMethod[method]
== TimeseriesVisualizationMethod.colored_graph
):
colorbar_alpha = 1.0
plt_fig.colorbar(
cm.ScalarMappable(cm_norm, cmap),
orientation="horizontal",
cax=colorbar_axis,
alpha=colorbar_alpha,
)
if title:
plt_axis[0].set_title(title)
if use_pyplot:
plt.show()
return plt_fig, plt_axis
# These visualization methods are for text and are partially copied from
# experiments conducted by Davide Testuggine at Facebook.
class VisualizationDataRecord:
r"""
A data record for storing attribution relevant information
"""
__slots__ = [
"word_attributions",
"pred_prob",
"pred_class",
"true_class",
"attr_class",
"attr_score",
"raw_input_ids",
"convergence_score",
]
def __init__(
self,
word_attributions,
pred_prob,
pred_class,
true_class,
attr_class,
attr_score,
raw_input_ids,
convergence_score,
) -> None:
self.word_attributions = word_attributions
self.pred_prob = pred_prob
self.pred_class = pred_class
self.true_class = true_class
self.attr_class = attr_class
self.attr_score = attr_score
self.raw_input_ids = raw_input_ids
self.convergence_score = convergence_score
def _get_color(attr):
# clip values to prevent CSS errors (Values should be from [-1,1])
attr = max(-1, min(1, attr))
if attr > 0:
hue = 120
sat = 75
lig = 100 - int(50 * attr)
else:
hue = 0
sat = 75
lig = 100 - int(-40 * attr)
return "hsl({}, {}%, {}%)".format(hue, sat, lig)
def format_classname(classname):
return '<td><text style="padding-right:2em"><b>{}</b></text></td>'.format(classname)
def format_special_tokens(token):
if token.startswith("<") and token.endswith(">"):
return "#" + token.strip("<>")
return token
def format_tooltip(item, text):
return '<div class="tooltip">{item}\
<span class="tooltiptext">{text}</span>\
</div>'.format(
item=item, text=text
)
def format_word_importances(words, importances):
if importances is None or len(importances) == 0:
return "<td></td>"
assert len(words) <= len(importances)
tags = ["<td>"]
for word, importance in zip(words, importances[: len(words)]):
word = format_special_tokens(word)
color = _get_color(importance)
unwrapped_tag = '<mark style="background-color: {color}; opacity:1.0; \
line-height:1.75"><font color="black"> {word}\
</font></mark>'.format(
color=color, word=word
)
tags.append(unwrapped_tag)
tags.append("</td>")
return "".join(tags)
def visualize_text(
datarecords: Iterable[VisualizationDataRecord], legend: bool = True
) -> "HTML": # In quotes because this type doesn't exist in standalone mode
assert HAS_IPYTHON, (
"IPython must be available to visualize text. "
"Please run 'pip install ipython'."
)
dom = ["<table width: 100%>"]
rows = [
"<tr><th>True Label</th>"
"<th>Predicted Label</th>"
"<th>Attribution Label</th>"
"<th>Attribution Score</th>"
"<th>Word Importance</th>"
]
for datarecord in datarecords:
rows.append(
"".join(
[
"<tr>",
format_classname(datarecord.true_class),
format_classname(
"{0} ({1:.2f})".format(
datarecord.pred_class, datarecord.pred_prob
)
),
format_classname(datarecord.attr_class),
format_classname("{0:.2f}".format(datarecord.attr_score)),
format_word_importances(
datarecord.raw_input_ids, datarecord.word_attributions
),
"<tr>",
]
)
)
if legend:
dom.append(
'<div style="border-top: 1px solid; margin-top: 5px; \
padding-top: 5px; display: inline-block">'
)
dom.append("<b>Legend: </b>")
for value, label in zip([-1, 0, 1], ["Negative", "Neutral", "Positive"]):
dom.append(
'<span style="display: inline-block; width: 10px; height: 10px; \
border: 1px solid; background-color: \
{value}"></span> {label} '.format(
value=_get_color(value), label=label
)
)
dom.append("</div>")
dom.append("".join(rows))
dom.append("</table>")
html = HTML("".join(dom))
display(html)
return html
|
#!/usr/bin/env python3
from typing import Any, Callable, cast, Generic, List, Tuple, Type, Union
import torch
import torch.nn.functional as F
from captum._utils.common import (
_format_additional_forward_args,
_format_tensor_into_tuples,
_run_forward,
_validate_target,
)
from captum._utils.gradient import compute_gradients
from captum._utils.typing import ModuleOrModuleList, TargetType
from captum.attr._utils.common import (
_format_input_baseline,
_sum_rows,
_tensorize_baseline,
_validate_input,
)
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
class Attribution:
r"""
All attribution algorithms extend this class. It enforces its child classes
to extend and override core `attribute` method.
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (Callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
"""
self.forward_func = forward_func
attribute: Callable
r"""
This method computes and returns the attribution values for each input tensor.
Deriving classes are responsible for implementing its logic accordingly.
Specific attribution algorithms that extend this class take relevant
arguments.
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which attribution
is computed. It can be provided as a single tensor or
a tuple of multiple tensors. If multiple input tensors
are provided, the batch sizes must be aligned across all
tensors.
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attribution values for each
input tensor. The `attributions` have the same shape and
dimensionality as the inputs.
If a single tensor is provided as inputs, a single tensor
is returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
"""
@property
def multiplies_by_inputs(self):
return False
def has_convergence_delta(self) -> bool:
r"""
This method informs the user whether the attribution algorithm provides
a convergence delta (aka an approximation error) or not. Convergence
delta may serve as a proxy of correctness of attribution algorithm's
approximation. If deriving attribution class provides a
`compute_convergence_delta` method, it should
override both `compute_convergence_delta` and `has_convergence_delta` methods.
Returns:
bool:
Returns whether the attribution algorithm
provides a convergence delta (aka approximation error) or not.
"""
return False
compute_convergence_delta: Callable
r"""
The attribution algorithms which derive `Attribution` class and provide
convergence delta (aka approximation error) should implement this method.
Convergence delta can be computed based on certain properties of the
attribution alogrithms.
Args:
attributions (Tensor or tuple[Tensor, ...]): Attribution scores that
are precomputed by an attribution algorithm.
Attributions can be provided in form of a single tensor
or a tuple of those. It is assumed that attribution
tensor's dimension 0 corresponds to the number of
examples, and if multiple input tensors are provided,
the examples must be aligned appropriately.
*args (Any, optional): Additonal arguments that are used by the
sub-classes depending on the specific implementation
of `compute_convergence_delta`.
Returns:
*Tensor* of **deltas**:
- **deltas** (*Tensor*):
Depending on specific implementaion of
sub-classes, convergence delta can be returned per
sample in form of a tensor or it can be aggregated
across multuple samples and returned in form of a
single floating point tensor.
"""
@classmethod
def get_name(cls: Type["Attribution"]) -> str:
r"""
Create readable class name by inserting a space before any capital
characters besides the very first.
Returns:
str: a readable class name
Example:
for a class called IntegratedGradients, we return the string
'Integrated Gradients'
"""
return "".join(
[
char if char.islower() or idx == 0 else " " + char
for idx, char in enumerate(cls.__name__)
]
)
class GradientAttribution(Attribution):
r"""
All gradient based attribution algorithms extend this class. It requires a
forward function, which most commonly is the forward function of the model
that we want to interpret or the model itself.
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (Callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
"""
Attribution.__init__(self, forward_func)
self.gradient_func = compute_gradients
@log_usage()
def compute_convergence_delta(
self,
attributions: Union[Tensor, Tuple[Tensor, ...]],
start_point: Union[
None, int, float, Tensor, Tuple[Union[int, float, Tensor], ...]
],
end_point: Union[Tensor, Tuple[Tensor, ...]],
target: TargetType = None,
additional_forward_args: Any = None,
) -> Tensor:
r"""
Here we provide a specific implementation for `compute_convergence_delta`
which is based on a common property among gradient-based attribution algorithms.
In the literature sometimes it is also called completeness axiom. Completeness
axiom states that the sum of the attribution must be equal to the differences of
NN Models's function at its end and start points. In other words:
sum(attributions) - (F(end_point) - F(start_point)) is close to zero.
Returned delta of this method is defined as above stated difference.
This implementation assumes that both the `start_point` and `end_point` have
the same shape and dimensionality. It also assumes that the target must have
the same number of examples as the `start_point` and the `end_point` in case
it is provided in form of a list or a non-singleton tensor.
Args:
attributions (Tensor or tuple[Tensor, ...]): Precomputed attribution
scores. The user can compute those using any attribution
algorithm. It is assumed the shape and the
dimensionality of attributions must match the shape and
the dimensionality of `start_point` and `end_point`.
It also assumes that the attribution tensor's
dimension 0 corresponds to the number of
examples, and if multiple input tensors are provided,
the examples must be aligned appropriately.
start_point (Tensor or tuple[Tensor, ...], optional): `start_point`
is passed as an input to model's forward function. It
is the starting point of attributions' approximation.
It is assumed that both `start_point` and `end_point`
have the same shape and dimensionality.
end_point (Tensor or tuple[Tensor, ...]): `end_point`
is passed as an input to model's forward function. It
is the end point of attributions' approximation.
It is assumed that both `start_point` and `end_point`
have the same shape and dimensionality.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples.
`additional_forward_args` is used both for `start_point`
and `end_point` when computing the forward pass.
Default: None
Returns:
*Tensor* of **deltas**:
- **deltas** (*Tensor*):
This implementation returns convergence delta per
sample. Deriving sub-classes may do any type of aggregation
of those values, if necessary.
"""
end_point, start_point = _format_input_baseline(end_point, start_point)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
# tensorizing start_point in case it is a scalar or one example baseline
# If the batch size is large we could potentially also tensorize only one
# sample and expand the output to the rest of the elements in the batch
start_point = _tensorize_baseline(end_point, start_point)
attributions = _format_tensor_into_tuples(attributions)
# verify that the attributions and end_point match on 1st dimension
for attribution, end_point_tnsr in zip(attributions, end_point):
assert end_point_tnsr.shape[0] == attribution.shape[0], (
"Attributions tensor and the end_point must match on the first"
" dimension but found attribution: {} and end_point: {}".format(
attribution.shape[0], end_point_tnsr.shape[0]
)
)
num_samples = end_point[0].shape[0]
_validate_input(end_point, start_point)
_validate_target(num_samples, target)
with torch.no_grad():
start_out_sum = _sum_rows(
_run_forward(
self.forward_func, start_point, target, additional_forward_args
)
)
end_out_sum = _sum_rows(
_run_forward(
self.forward_func, end_point, target, additional_forward_args
)
)
row_sums = [_sum_rows(attribution) for attribution in attributions]
attr_sum = torch.stack(
[cast(Tensor, sum(row_sum)) for row_sum in zip(*row_sums)]
)
_delta = attr_sum - (end_out_sum - start_out_sum)
return _delta
class PerturbationAttribution(Attribution):
r"""
All perturbation based attribution algorithms extend this class. It requires a
forward function, which most commonly is the forward function of the model
that we want to interpret or the model itself.
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (Callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
"""
Attribution.__init__(self, forward_func)
@property
def multiplies_by_inputs(self):
return True
class InternalAttribution(Attribution, Generic[ModuleOrModuleList]):
r"""
Shared base class for LayerAttrubution and NeuronAttribution,
attribution types that require a model and a particular layer.
"""
layer: ModuleOrModuleList
def __init__(
self,
forward_func: Callable,
layer: ModuleOrModuleList,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (Callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
layer (torch.nn.Module): Layer for which output attributions are computed.
Output size of attribute matches that of layer output.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model, which allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
Attribution.__init__(self, forward_func)
self.layer = layer
self.device_ids = device_ids
class LayerAttribution(InternalAttribution):
r"""
Layer attribution provides attribution values for the given layer, quantifying
the importance of each neuron within the given layer's output. The output
attribution of calling attribute on a LayerAttribution object always matches
the size of the layer output.
"""
def __init__(
self,
forward_func: Callable,
layer: ModuleOrModuleList,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (Callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
layer (torch.nn.Module): Layer for which output attributions are computed.
Output size of attribute matches that of layer output.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model, which allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
InternalAttribution.__init__(self, forward_func, layer, device_ids)
@staticmethod
def interpolate(
layer_attribution: Tensor,
interpolate_dims: Union[int, Tuple[int, ...]],
interpolate_mode: str = "nearest",
) -> Tensor:
r"""
Interpolates given 3D, 4D or 5D layer attribution to given dimensions.
This is often utilized to upsample the attribution of a convolutional layer
to the size of an input, which allows visualizing in the input space.
Args:
layer_attribution (Tensor): Tensor of given layer attributions.
interpolate_dims (int or tuple): Upsampled dimensions. The
number of elements must be the number of dimensions
of layer_attribution - 2, since the first dimension
corresponds to number of examples and the second is
assumed to correspond to the number of channels.
interpolate_mode (str): Method for interpolation, which
must be a valid input interpolation mode for
torch.nn.functional. These methods are
"nearest", "area", "linear" (3D-only), "bilinear"
(4D-only), "bicubic" (4D-only), "trilinear" (5D-only)
based on the number of dimensions of the given layer
attribution.
Returns:
*Tensor* of upsampled **attributions**:
- **attributions** (*Tensor*):
Upsampled layer attributions with first 2 dimensions matching
slayer_attribution and remaining dimensions given by
interpolate_dims.
"""
return F.interpolate(layer_attribution, interpolate_dims, mode=interpolate_mode)
class NeuronAttribution(InternalAttribution):
r"""
Neuron attribution provides input attribution for a given neuron, quantifying
the importance of each input feature in the activation of a particular neuron.
Calling attribute on a NeuronAttribution object requires also providing
the index of the neuron in the output of the given layer for which attributions
are required.
The output attribution of calling attribute on a NeuronAttribution object
always matches the size of the input.
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (Callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
layer (torch.nn.Module): Layer for which output attributions are computed.
Output size of attribute matches that of layer output.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model, which allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
InternalAttribution.__init__(self, forward_func, layer, device_ids)
attribute: Callable
r"""
This method computes and returns the neuron attribution values for each
input tensor. Deriving classes are responsible for implementing
its logic accordingly.
Specific attribution algorithms that extend this class take relevant
arguments.
Args:
inputs: A single high dimensional input tensor or a tuple of them.
neuron_selector (int or tuple): Tuple providing index of neuron in output
of given layer for which attribution is desired. Length of
this tuple must be one less than the number of
dimensions in the output of the given layer (since
dimension 0 corresponds to number of examples).
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attribution values for
each input vector. The `attributions` have the
dimensionality of inputs.
"""
|
#!/usr/bin/env python3
import typing
from inspect import signature
from typing import Any, Callable, List, Tuple, TYPE_CHECKING, Union
import torch
from captum._utils.common import (
_format_baseline,
_format_output,
_format_tensor_into_tuples,
_validate_input as _validate_input_basic,
)
from captum._utils.typing import (
BaselineType,
Literal,
TargetType,
TensorOrTupleOfTensorsGeneric,
)
from captum.attr._utils.approximation_methods import SUPPORTED_METHODS
from torch import Tensor
if TYPE_CHECKING:
from captum.attr._utils.attribution import GradientAttribution
def _sum_rows(input: Tensor) -> Tensor:
return input.reshape(input.shape[0], -1).sum(1)
def _validate_target(num_samples: int, target: TargetType) -> None:
if isinstance(target, list) or (
isinstance(target, torch.Tensor) and torch.numel(target) > 1
):
assert num_samples == len(target), (
"The number of samples provied in the"
"input {} does not match with the number of targets. {}".format(
num_samples, len(target)
)
)
def _validate_input(
inputs: Tuple[Tensor, ...],
baselines: Tuple[Union[Tensor, int, float], ...],
n_steps: int = 50,
method: str = "riemann_trapezoid",
draw_baseline_from_distrib: bool = False,
) -> None:
_validate_input_basic(inputs, baselines, draw_baseline_from_distrib)
assert (
n_steps >= 0
), "The number of steps must be a positive integer. " "Given: {}".format(n_steps)
assert (
method in SUPPORTED_METHODS
), "Approximation method must be one for the following {}. " "Given {}".format(
SUPPORTED_METHODS, method
)
def _validate_noise_tunnel_type(
nt_type: str, supported_noise_tunnel_types: List[str]
) -> None:
assert nt_type in supported_noise_tunnel_types, (
"Noise types must be either `smoothgrad`, `smoothgrad_sq` or `vargrad`. "
"Given {}".format(nt_type)
)
@typing.overload
def _format_input_baseline(
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: Union[Tensor, Tuple[Tensor, ...]],
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
@typing.overload
def _format_input_baseline(
inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: BaselineType
) -> Tuple[Tuple[Tensor, ...], Tuple[Union[Tensor, int, float], ...]]:
...
def _format_input_baseline(
inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: BaselineType
) -> Tuple[Tuple[Tensor, ...], Tuple[Union[Tensor, int, float], ...]]:
inputs = _format_tensor_into_tuples(inputs)
baselines = _format_baseline(baselines, inputs)
return inputs, baselines
# This function can potentially be merged with the `format_baseline` function
# however, since currently not all algorithms support baselines of type
# callable this will be kept in a separate function.
@typing.overload
def _format_callable_baseline(
baselines: Union[
None,
Callable[..., Union[Tensor, Tuple[Tensor, ...]]],
Tensor,
Tuple[Tensor, ...],
],
inputs: Union[Tensor, Tuple[Tensor, ...]],
) -> Tuple[Tensor, ...]:
...
@typing.overload
def _format_callable_baseline(
baselines: Union[
None,
Callable[..., Union[Tensor, Tuple[Tensor, ...]]],
Tensor,
int,
float,
Tuple[Union[Tensor, int, float], ...],
],
inputs: Union[Tensor, Tuple[Tensor, ...]],
) -> Tuple[Union[Tensor, int, float], ...]:
...
def _format_callable_baseline(
baselines: Union[
None,
Callable[..., Union[Tensor, Tuple[Tensor, ...]]],
Tensor,
int,
float,
Tuple[Union[Tensor, int, float], ...],
],
inputs: Union[Tensor, Tuple[Tensor, ...]],
) -> Tuple[Union[Tensor, int, float], ...]:
if callable(baselines):
# Note: this assumes that if baselines is a function and if it takes
# arguments, then the first argument is the `inputs`.
# This can be expanded in the future with better type checks
baseline_parameters = signature(baselines).parameters
if len(baseline_parameters) == 0:
baselines = baselines()
else:
baselines = baselines(inputs)
return _format_baseline(baselines, _format_tensor_into_tuples(inputs))
def _format_and_verify_strides(
strides: Union[None, int, Tuple[int, ...], Tuple[Union[int, Tuple[int, ...]], ...]],
inputs: Tuple[Tensor, ...],
) -> Tuple[Union[int, Tuple[int, ...]], ...]:
# Formats strides, which are necessary for occlusion
# Assumes inputs are already formatted (in tuple)
if strides is None:
strides = tuple(1 for input in inputs)
if len(inputs) == 1 and not (isinstance(strides, tuple) and len(strides) == 1):
strides = (strides,) # type: ignore
assert isinstance(strides, tuple) and len(strides) == len(
inputs
), "Strides must be provided for each input tensor."
for i in range(len(inputs)):
assert isinstance(strides[i], int) or (
isinstance(strides[i], tuple)
and len(strides[i]) == len(inputs[i].shape) - 1 # type: ignore
), (
"Stride for input index {} is {}, which is invalid for input with "
"shape {}. It must be either an int or a tuple with length equal to "
"len(input_shape) - 1."
).format(
i, strides[i], inputs[i].shape
)
return strides
def _format_and_verify_sliding_window_shapes(
sliding_window_shapes: Union[Tuple[int, ...], Tuple[Tuple[int, ...], ...]],
inputs: Tuple[Tensor, ...],
) -> Tuple[Tuple[int, ...], ...]:
# Formats shapes of sliding windows, which is necessary for occlusion
# Assumes inputs is already formatted (in tuple)
if isinstance(sliding_window_shapes[0], int):
sliding_window_shapes = (sliding_window_shapes,) # type: ignore
sliding_window_shapes: Tuple[Tuple[int, ...], ...]
assert len(sliding_window_shapes) == len(
inputs
), "Must provide sliding window dimensions for each input tensor."
for i in range(len(inputs)):
assert (
isinstance(sliding_window_shapes[i], tuple)
and len(sliding_window_shapes[i]) == len(inputs[i].shape) - 1
), (
"Occlusion shape for input index {} is {} but should be a tuple with "
"{} dimensions."
).format(
i, sliding_window_shapes[i], len(inputs[i].shape) - 1
)
return sliding_window_shapes
@typing.overload
def _compute_conv_delta_and_format_attrs(
attr_algo: "GradientAttribution",
return_convergence_delta: bool,
attributions: Tuple[Tensor, ...],
start_point: Union[int, float, Tensor, Tuple[Union[int, float, Tensor], ...]],
end_point: Union[Tensor, Tuple[Tensor, ...]],
additional_forward_args: Any,
target: TargetType,
is_inputs_tuple: Literal[False] = False,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
...
@typing.overload
def _compute_conv_delta_and_format_attrs(
attr_algo: "GradientAttribution",
return_convergence_delta: bool,
attributions: Tuple[Tensor, ...],
start_point: Union[int, float, Tensor, Tuple[Union[int, float, Tensor], ...]],
end_point: Union[Tensor, Tuple[Tensor, ...]],
additional_forward_args: Any,
target: TargetType,
is_inputs_tuple: Literal[True],
) -> Union[Tuple[Tensor, ...], Tuple[Tuple[Tensor, ...], Tensor]]:
...
# FIXME: GradientAttribution is provided as a string due to a circular import.
# This should be fixed when common is refactored into separate files.
def _compute_conv_delta_and_format_attrs(
attr_algo: "GradientAttribution",
return_convergence_delta: bool,
attributions: Tuple[Tensor, ...],
start_point: Union[int, float, Tensor, Tuple[Union[int, float, Tensor], ...]],
end_point: Union[Tensor, Tuple[Tensor, ...]],
additional_forward_args: Any,
target: TargetType,
is_inputs_tuple: bool = False,
) -> Union[
Tensor, Tuple[Tensor, ...], Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]
]:
if return_convergence_delta:
# computes convergence error
delta = attr_algo.compute_convergence_delta(
attributions,
start_point,
end_point,
additional_forward_args=additional_forward_args,
target=target,
)
return _format_output(is_inputs_tuple, attributions), delta
else:
return _format_output(is_inputs_tuple, attributions)
def _tensorize_baseline(
inputs: Tuple[Tensor, ...], baselines: Tuple[Union[int, float, Tensor], ...]
) -> Tuple[Tensor, ...]:
def _tensorize_single_baseline(baseline, input):
if isinstance(baseline, (int, float)):
return torch.full_like(input, baseline)
if input.shape[0] > baseline.shape[0] and baseline.shape[0] == 1:
return torch.cat([baseline] * input.shape[0])
return baseline
assert isinstance(inputs, tuple) and isinstance(baselines, tuple), (
"inputs and baselines must"
"have tuple type but found baselines: {} and inputs: {}".format(
type(baselines), type(inputs)
)
)
return tuple(
_tensorize_single_baseline(baseline, input)
for baseline, input in zip(baselines, inputs)
)
def _reshape_and_sum(
tensor_input: Tensor, num_steps: int, num_examples: int, layer_size: Tuple[int, ...]
) -> Tensor:
# Used for attribution methods which perform integration
# Sums across integration steps by reshaping tensor to
# (num_steps, num_examples, (layer_size)) and summing over
# dimension 0. Returns a tensor of size (num_examples, (layer_size))
return torch.sum(
tensor_input.reshape((num_steps, num_examples) + layer_size), dim=0
)
def _call_custom_attribution_func(
custom_attribution_func: Callable[..., Tuple[Tensor, ...]],
multipliers: Tuple[Tensor, ...],
inputs: Tuple[Tensor, ...],
baselines: Tuple[Tensor, ...],
) -> Tuple[Tensor, ...]:
assert callable(custom_attribution_func), (
"`custom_attribution_func`"
" must be a callable function but {} provided".format(
type(custom_attribution_func)
)
)
custom_attr_func_params = signature(custom_attribution_func).parameters
if len(custom_attr_func_params) == 1:
return custom_attribution_func(multipliers)
elif len(custom_attr_func_params) == 2:
return custom_attribution_func(multipliers, inputs)
elif len(custom_attr_func_params) == 3:
return custom_attribution_func(multipliers, inputs, baselines)
else:
raise AssertionError(
"`custom_attribution_func` must take at least one and at most 3 arguments."
)
def _find_output_mode_and_verify(
initial_eval: Union[int, float, Tensor],
num_examples: int,
perturbations_per_eval: int,
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric],
) -> bool:
"""
This method identifies whether the model outputs a single output for a batch
(agg_output_mode = True) or whether it outputs a single output per example
(agg_output_mode = False) and returns agg_output_mode. The method also
verifies that perturbations_per_eval is 1 in the case that agg_output_mode is True
and also verifies that the first dimension of each feature mask if the model
returns a single output for a batch.
"""
if isinstance(initial_eval, (int, float)) or (
isinstance(initial_eval, torch.Tensor)
and (
len(initial_eval.shape) == 0
or (num_examples > 1 and initial_eval.numel() == 1)
)
):
agg_output_mode = True
assert (
perturbations_per_eval == 1
), "Cannot have perturbations_per_eval > 1 when function returns scalar."
if feature_mask is not None:
for single_mask in feature_mask:
assert single_mask.shape[0] == 1, (
"Cannot provide different masks for each example when function "
"returns a scalar."
)
else:
agg_output_mode = False
assert (
isinstance(initial_eval, torch.Tensor) and initial_eval[0].numel() == 1
), "Target should identify a single element in the model output."
return agg_output_mode
def _construct_default_feature_mask(
inputs: Tuple[Tensor, ...]
) -> Tuple[Tuple[Tensor, ...], int]:
feature_mask = []
current_num_features = 0
for i in range(len(inputs)):
num_features = torch.numel(inputs[i][0])
feature_mask.append(
current_num_features
+ torch.reshape(
torch.arange(num_features, device=inputs[i].device),
inputs[i][0:1].shape,
)
)
current_num_features += num_features
total_features = current_num_features
feature_mask = tuple(feature_mask)
return feature_mask, total_features
|
#!/usr/bin/env python3
from collections import defaultdict
from typing import Any, Dict, List, Optional, Union
from captum._utils.common import _format_tensor_into_tuples
from captum._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._utils.stat import Stat
from captum.attr._utils.summarizer import Summarizer
from captum.log import log_usage
from torch import Tensor
class ClassSummarizer(Summarizer):
r"""
Used to keep track of summaries for associated classes. The
classes/labels can be of any type that are supported by `dict`.
This also keeps track of an aggregate of all class summaries.
"""
@log_usage()
def __init__(self, stats: List[Stat]) -> None:
Summarizer.__init__.__wrapped__(self, stats)
self.summaries: Dict[Any, Summarizer] = defaultdict(
lambda: Summarizer(stats=stats)
)
def update( # type: ignore
self,
x: TensorOrTupleOfTensorsGeneric,
labels: TargetType = None,
):
r"""
Updates the stats of the summarizer, optionally associated to classes.
This accepts either a single tensor to summarise or a tuple of tensors.
Args:
x (Tensor or tuple[Tensor, ...]):
The input tensor to be summarised. The first
dimension of this input must be associated to
the batch size of the inputs.
labels (int, tuple, Tensor, or list, optional):
The associated labels for `x`. If Any, we
assume `labels` represents the label for all inputs in `x`.
If this is None we simply aggregate the total summary.
"""
if labels is None:
super().update(x)
return
x = _format_tensor_into_tuples(x)
num_labels = 1
labels_typed: Union[List[Any], Tensor]
if isinstance(labels, list) or isinstance(labels, Tensor):
labels_typed = labels
num_labels = len(labels) # = labels.size(0) if tensor
else:
labels_typed = [labels]
# mypy doesn't realise I have made the int a list
if len(labels_typed) > 1:
for x_i in x:
assert x_i.size(0) == num_labels, (
"batch size does not equal amount of labels; "
"please ensure length of labels is equal to 1 "
"or to the `batch_size` corresponding to the "
"number of examples in the input(s)"
)
batch_size = x[0].size(0)
for i in range(batch_size):
tensors_to_summarize = tuple(tensor[i] for tensor in x)
tensors_to_summarize_copy = tuple(tensor[i].clone() for tensor in x)
label = labels_typed[0] if len(labels_typed) == 1 else labels_typed[i]
self.summaries[label].update(tensors_to_summarize)
super().update(tensors_to_summarize_copy)
@property
def class_summaries(
self,
) -> Dict[
Any, Union[None, Dict[str, Optional[Tensor]], List[Dict[str, Optional[Tensor]]]]
]:
r"""
Returns:
The summaries for each class.
"""
return {key: value.summary for key, value in self.summaries.items()}
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Optional, TYPE_CHECKING
import torch
from torch import Tensor
if TYPE_CHECKING:
from captum.attr._utils.summarizer import SummarizerSingleTensor
class Stat:
"""
The Stat class represents a statistic that can be updated and retrieved
at any point in time.
The basic functionality this class provides is:
1. A update/get method to actually compute the statistic
2. A statistic store/cache to retrieve dependent information
(e.g. other stat values that are required for computation)
3. The name of the statistic that is used for the user to refer to
"""
def __init__(self, name: Optional[str] = None, **kwargs: Any) -> None:
"""
Args:
name (str, optional):
The name of the statistic. If not provided,
the class name will be used alongside it's parameters
kwargs (Any):
Additional arguments used to construct the statistic
"""
self.params = kwargs
self._name = name
self._other_stats: Optional[SummarizerSingleTensor] = None
def init(self):
pass
def _get_stat(self, stat: "Stat") -> Optional["Stat"]:
assert self._other_stats is not None
return self._other_stats.get(stat)
def update(self, x: Tensor):
raise NotImplementedError()
def get(self) -> Optional[Tensor]:
raise NotImplementedError()
def __hash__(self):
return hash((self.__class__, frozenset(self.params.items())))
def __eq__(self, other: object) -> bool:
if isinstance(other, Stat):
return self.__class__ == other.__class__ and frozenset(
self.params.items()
) == frozenset(other.params.items())
else:
return False
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
@property
def name(self):
"""
The name of the statistic. i.e. it is the key in a .summary
This will be the class name or a custom name if provided.
See Summarizer or SummarizerSingleTensor
"""
default_name = self.__class__.__name__.lower()
if len(self.params) > 0:
default_name += f"({self.params})"
return default_name if self._name is None else self._name
class Count(Stat):
"""
Counts the number of elements, i.e. the
number of `update`'s called
"""
def __init__(self, name: Optional[str] = None) -> None:
super().__init__(name=name)
self.n = None
def get(self):
return self.n
def update(self, x):
if self.n is None:
self.n = 0
self.n += 1
class Mean(Stat):
"""
Calculates the average of a tensor
"""
def __init__(self, name: Optional[str] = None) -> None:
super().__init__(name=name)
self.rolling_mean: Optional[Tensor] = None
self.n: Optional[Count] = None
def get(self) -> Optional[Tensor]:
return self.rolling_mean
def init(self):
self.n = self._get_stat(Count())
def update(self, x):
n = self.n.get()
if self.rolling_mean is None:
# Ensures rolling_mean is a float tensor
self.rolling_mean = x.clone() if x.is_floating_point() else x.double()
else:
delta = x - self.rolling_mean
self.rolling_mean += delta / n
class MSE(Stat):
"""
Calculates the mean squared error of a tensor
"""
def __init__(self, name: Optional[str] = None) -> None:
super().__init__(name=name)
self.prev_mean = None
self.mse = None
def init(self):
self.mean = self._get_stat(Mean())
def get(self) -> Optional[Tensor]:
if self.mse is None and self.prev_mean is not None:
return torch.zeros_like(self.prev_mean)
return self.mse
def update(self, x: Tensor):
mean = self.mean.get()
if mean is not None and self.prev_mean is not None:
rhs = (x - self.prev_mean) * (x - mean)
if self.mse is None:
self.mse = rhs
else:
self.mse += rhs
# do not not clone
self.prev_mean = mean.clone()
class Var(Stat):
"""
Calculates the variance of a tensor, with an order. e.g.
if `order = 1` then it will calculate sample variance.
This is equal to mse / (n - order)
"""
def __init__(self, name: Optional[str] = None, order: int = 0) -> None:
if name is None:
if order == 0:
name = "variance"
elif order == 1:
name = "sample_variance"
else:
name = f"variance({order})"
super().__init__(name=name, order=order)
self.order = order
def init(self):
self.mse = self._get_stat(MSE())
self.n = self._get_stat(Count())
def update(self, x: Tensor):
pass
def get(self) -> Optional[Tensor]:
mse = self.mse.get()
n = self.n.get()
if mse is None:
return None
if n <= self.order:
return torch.zeros_like(mse)
# NOTE: The following ensures mse is a float tensor.
# torch.true_divide is available in PyTorch 1.5 and later.
# This is for compatibility with 1.4.
return mse.to(torch.float64) / (n - self.order)
class StdDev(Stat):
"""
The standard deviation, with an associated order.
"""
def __init__(self, name: Optional[str] = None, order: int = 0) -> None:
if name is None:
if order == 0:
name = "std_dev"
elif order == 1:
name = "sample_std_dev"
else:
name = f"std_dev{order})"
super().__init__(name=name, order=order)
self.order = order
def init(self):
self.var = self._get_stat(Var(order=self.order))
def update(self, x: Tensor):
pass
def get(self) -> Optional[Tensor]:
var = self.var.get()
return var**0.5 if var is not None else None
class GeneralAccumFn(Stat):
"""
Performs update(x): result = fn(result, x)
where fn is a custom function
"""
def __init__(self, fn: Callable, name: Optional[str] = None) -> None:
super().__init__(name=name)
self.result = None
self.fn = fn
def get(self) -> Optional[Tensor]:
return self.result
def update(self, x):
if self.result is None:
self.result = x
else:
self.result = self.fn(self.result, x)
class Min(GeneralAccumFn):
def __init__(
self, name: Optional[str] = None, min_fn: Callable = torch.min
) -> None:
super().__init__(name=name, fn=min_fn)
class Max(GeneralAccumFn):
def __init__(
self, name: Optional[str] = None, max_fn: Callable = torch.max
) -> None:
super().__init__(name=name, fn=max_fn)
class Sum(GeneralAccumFn):
def __init__(
self, name: Optional[str] = None, add_fn: Callable = torch.add
) -> None:
super().__init__(name=name, fn=add_fn)
def CommonStats() -> List[Stat]:
r"""
Returns common summary statistics, specifically:
Mean, Sample Variance, Sample Std Dev, Min, Max
"""
return [Mean(), Var(order=1), StdDev(order=1), Min(), Max()]
|
#!/usr/bin/env python3
from typing import Dict, List, Optional, Tuple, Type, Union
import torch
from captum.attr._utils.stat import Count, Max, Mean, Min, MSE, Stat, StdDev, Sum, Var
from captum.log import log_usage
from torch import Tensor
class Summarizer:
r"""
This class simply wraps over a given a set of SummarizerSingleTensor's in order
to summarise multiple input tensors.
Basic usage:
>>>from captum.attr.aggregator import Summarizer
>>>from captum.attr._utils.stats import Mean, StdDev
>>>
>>>attrib = torch.tensor([1, 2, 3, 4, 5])
>>>
>>>summ = Summarizer([Mean(), StdDev(0])
>>>summ.update(attrib)
>>>
>>>print(summ.summary['mean'])
"""
@log_usage()
def __init__(self, stats: List[Stat]) -> None:
r"""
Args:
stats (List[Stat]):
The list of statistics you wish to track
"""
self._summarizers: List[SummarizerSingleTensor] = []
self._is_inputs_tuple: Optional[bool] = None
self._stats, self._summary_stats_indicies = _reorder_stats(stats)
def _copy_stats(self):
import copy
return copy.deepcopy(self._stats)
def update(self, x: Union[float, Tensor, Tuple[Union[float, Tensor], ...]]):
r"""
Calls `update` on each `Stat` object within the summarizer
Args:
x (Tensor or Tuple[Tensor, ...]):
The input(s) you wish to summarize
"""
if self._is_inputs_tuple is None:
self._is_inputs_tuple = isinstance(x, tuple)
else:
# we want input to be consistently a single input or a tuple
assert not (self._is_inputs_tuple ^ isinstance(x, tuple))
from captum._utils.common import _format_float_or_tensor_into_tuples
x = _format_float_or_tensor_into_tuples(x)
for i, inp in enumerate(x):
if i >= len(self._summarizers):
# _summarizers[i] is a new SummarizerSingleTensor, which
# aims to summarize input i (i.e. x[i])
#
# Thus, we must copy our stats, as otherwise
# in the best case the statistics for each input will be mangled
# and in the worst case we will run into an error due to different
# dimensionality in the input tensors tensors (i.e.
# x[i].shape != x[j].shape for some pair i, j)
stats = self._copy_stats()
self._summarizers.append(
SummarizerSingleTensor(
stats=stats, summary_stats_indices=self._summary_stats_indicies
)
)
if not isinstance(inp, torch.Tensor):
inp = torch.tensor(inp, dtype=torch.float)
self._summarizers[i].update(inp)
@property
def summary(
self,
) -> Optional[
Union[Dict[str, Optional[Tensor]], List[Dict[str, Optional[Tensor]]]]
]:
r"""
Effectively calls `get` on each `Stat` object within this object for each input
Returns:
A dict or list of dict: mapping from the Stat
object's `name` to the associated value of `get`
"""
if len(self._summarizers) == 0:
return None
temp = [summ.summary for summ in self._summarizers]
return temp if self._is_inputs_tuple else temp[0]
def _reorder_stats(stats: List[Stat]) -> Tuple[List[Stat], List[int]]:
# We want to want to store two things:
# 1. A mapping from a Stat to Stat object (self._stat_to_stat):
# This is to retrieve an existing Stat object for dependency
# resolution, e.g. Mean needs the Count stat - we want to
# retrieve it in O(1)
#
# 2. All of the necessary stats, in the correct order,
# to perform an update for each Stat (self.stats) trivially
# As a reference, the dependency graph for our stats is as follows:
# StdDev(x) -> Var(x) -> MSE -> Mean -> Count, for all valid x
#
# Step 1:
# Ensure we have all the necessary stats
# i.e. ensure we have the dependencies
# Step 2:
# Figure out the order to update them
dep_order = [StdDev, Var, MSE, Mean, Count]
# remove dupe stats
stats = set(stats)
summary_stats = set(stats)
from collections import defaultdict
stats_by_module: Dict[Type, List[Stat]] = defaultdict(list)
for stat in stats:
stats_by_module[stat.__class__].append(stat)
# StdDev is an odd case since it is parameterized, thus
# for each StdDev(order) we must ensure there is an associated Var(order)
for std_dev in stats_by_module[StdDev]:
stat_to_add = Var(order=std_dev.order) # type: ignore
stats.add(stat_to_add)
stats_by_module[stat_to_add.__class__].append(stat_to_add)
# For the other modules (deps[1:n-1]): if i exists =>
# we want to ensure i...n-1 exists
for i, dep in enumerate(dep_order[1:]):
if dep in stats_by_module:
stats.update([mod() for mod in dep_order[i + 1 :]])
break
# Step 2: get the correct order
# NOTE: we are sorting via a given topological order
sort_order = {mod: i for i, mod in enumerate(dep_order)}
sort_order[Min] = -1
sort_order[Max] = -1
sort_order[Sum] = -1
stats = list(stats)
stats.sort(key=lambda x: sort_order[x.__class__], reverse=True)
# get the summary stat indices
summary_stat_indexs = []
for i, stat in enumerate(stats):
if stat in summary_stats:
summary_stat_indexs.append(i)
return stats, summary_stat_indexs
class SummarizerSingleTensor:
r"""
A simple class that summarizes a single tensor. The basic functionality
of this class is two operations .update and .summary
If possible use `Summarizer` instead.
"""
def __init__(self, stats: List[Stat], summary_stats_indices: List[int]) -> None:
r"""
Args:
stats (list[Stat]): A list of all the Stat objects that
need to be updated. This must be in the appropriate order for
updates (see `_reorder_stats`)
summary_stats (list[int]): A list of indicies, referencing `stats`,
which are the stats you want to show in the .summary property. This
does not require any specific order.
"""
self._stats = stats
self._stat_to_stat = {stat: stat for stat in self._stats}
self._summary_stats = [stats[i] for i in summary_stats_indices]
for stat in stats:
stat._other_stats = self
stat.init()
def update(self, x: Tensor):
r"""
Updates the summary of a given tensor `x`
Args:
x (Tensor):
The tensor to summarize
"""
for stat in self._stats:
stat.update(x)
def get(self, stat: Stat) -> Optional[Stat]:
r"""
Retrieves `stat` from cache if this summarizer contains it.
Note that `Stat` has it's hash/equality method overridden, such
that an object with the same class and parameters will have the
same hash. Thus, if you call `get` with a `Stat`, an associated
`Stat` with the same class and parameters belonging to this object
will be retrieved if it exists.
If no such object is retrieved then `None` is returned.
Args:
stat (Stat):
The stat to retrieve
Returns:
Stat
The cached stat object or `None`
"""
if stat not in self._stat_to_stat:
return None
return self._stat_to_stat[stat]
@property
def summary(self) -> Dict[str, Optional[Tensor]]:
"""
Returns:
Optional[Dict[str, Optional[Tensor]]]
The cached stat object
"""
return {stat.name: stat.get() for stat in self._summary_stats}
|
#!/usr/bin/env python3
from abc import ABC, abstractmethod
import torch
from ..._utils.common import _format_tensor_into_tuples
class PropagationRule(ABC):
"""
Base class for all propagation rule classes, also called Z-Rule.
STABILITY_FACTOR is used to assure that no zero divison occurs.
"""
STABILITY_FACTOR = 1e-9
def forward_hook(self, module, inputs, outputs):
"""Register backward hooks on input and output
tensors of linear layers in the model."""
inputs = _format_tensor_into_tuples(inputs)
self._has_single_input = len(inputs) == 1
self._handle_input_hooks = []
for input in inputs:
if not hasattr(input, "hook_registered"):
input_hook = self._create_backward_hook_input(input.data)
self._handle_input_hooks.append(input.register_hook(input_hook))
input.hook_registered = True
output_hook = self._create_backward_hook_output(outputs.data)
self._handle_output_hook = outputs.register_hook(output_hook)
return outputs.clone()
@staticmethod
def backward_hook_activation(module, grad_input, grad_output):
"""Backward hook to propagate relevance over non-linear activations."""
# replace_out is set in _backward_hook_input, this is necessary
# due to 2 tensor hooks on the same tensor
if hasattr(grad_output, "replace_out"):
hook_out = grad_output.replace_out
del grad_output.replace_out
return hook_out
return grad_output
def _create_backward_hook_input(self, inputs):
def _backward_hook_input(grad):
relevance = grad * inputs
device = grad.device
if self._has_single_input:
self.relevance_input[device] = relevance.data
else:
self.relevance_input[device].append(relevance.data)
# replace_out is needed since two hooks are set on the same tensor
# The output of this hook is needed in backward_hook_activation
grad.replace_out = relevance
return relevance
return _backward_hook_input
def _create_backward_hook_output(self, outputs):
def _backward_hook_output(grad):
sign = torch.sign(outputs)
sign[sign == 0] = 1
relevance = grad / (outputs + sign * self.STABILITY_FACTOR)
self.relevance_output[grad.device] = grad.data
return relevance
return _backward_hook_output
def forward_hook_weights(self, module, inputs, outputs):
"""Save initial activations a_j before modules are changed"""
device = inputs[0].device if isinstance(inputs, tuple) else inputs.device
if hasattr(module, "activations") and device in module.activations:
raise RuntimeError(
"Module {} is being used more than once in the network, which "
"is not supported by LRP. "
"Please ensure that module is being used only once in the "
"network.".format(module)
)
module.activations[device] = tuple(input.data for input in inputs)
self._manipulate_weights(module, inputs, outputs)
@abstractmethod
def _manipulate_weights(self, module, inputs, outputs):
raise NotImplementedError
def forward_pre_hook_activations(self, module, inputs):
"""Pass initial activations to graph generation pass"""
device = inputs[0].device if isinstance(inputs, tuple) else inputs.device
for input, activation in zip(inputs, module.activations[device]):
input.data = activation
return inputs
class EpsilonRule(PropagationRule):
"""
Rule for relevance propagation using a small value of epsilon
to avoid numerical instabilities and remove noise.
Use for middle layers.
Args:
epsilon (integer, float): Value by which is added to the
discriminator during propagation.
"""
def __init__(self, epsilon=1e-9) -> None:
self.STABILITY_FACTOR = epsilon
def _manipulate_weights(self, module, inputs, outputs):
pass
class GammaRule(PropagationRule):
"""
Gamma rule for relevance propagation, gives more importance to
positive relevance.
Use for lower layers.
Args:
gamma (float): The gamma parameter determines by how much
the positive relevance is increased.
"""
def __init__(self, gamma=0.25, set_bias_to_zero=False) -> None:
self.gamma = gamma
self.set_bias_to_zero = set_bias_to_zero
def _manipulate_weights(self, module, inputs, outputs):
if hasattr(module, "weight"):
module.weight.data = (
module.weight.data + self.gamma * module.weight.data.clamp(min=0)
)
if self.set_bias_to_zero and hasattr(module, "bias"):
if module.bias is not None:
module.bias.data = torch.zeros_like(module.bias.data)
class Alpha1_Beta0_Rule(PropagationRule):
"""
Alpha1_Beta0 rule for relevance backpropagation, also known
as Deep-Taylor. Only positive relevance is propagated, resulting
in stable results, therefore recommended as the initial choice.
Warning: Does not work for BatchNorm modules because weight and bias
are defined differently.
Use for lower layers.
"""
def __init__(self, set_bias_to_zero=False) -> None:
self.set_bias_to_zero = set_bias_to_zero
def _manipulate_weights(self, module, inputs, outputs):
if hasattr(module, "weight"):
module.weight.data = module.weight.data.clamp(min=0)
if self.set_bias_to_zero and hasattr(module, "bias"):
if module.bias is not None:
module.bias.data = torch.zeros_like(module.bias.data)
class IdentityRule(EpsilonRule):
"""
Identity rule for skipping layer manipulation and propagating the
relevance over a layer. Only valid for modules with same dimensions for
inputs and outputs.
Can be used for BatchNorm2D.
"""
def _create_backward_hook_input(self, inputs):
def _backward_hook_input(grad):
return self.relevance_output[grad.device]
return _backward_hook_input
|
#!/usr/bin/env python3
import math
from typing import Any, Callable, cast, Tuple, Union
import torch
from captum._utils.common import (
_expand_additional_forward_args,
_expand_target,
_format_additional_forward_args,
_format_feature_mask,
_format_output,
_is_tuple,
_run_forward,
)
from captum._utils.progress import progress
from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._utils.attribution import PerturbationAttribution
from captum.attr._utils.common import _format_input_baseline
from captum.log import log_usage
from torch import dtype, Tensor
class FeatureAblation(PerturbationAttribution):
r"""
A perturbation based approach to computing attribution, involving
replacing each input feature with a given baseline / reference, and
computing the difference in output. By default, each scalar value within
each input tensor is taken as a feature and replaced independently. Passing
a feature mask, allows grouping features to be ablated together. This can
be used in cases such as images, where an entire segment or region
can be ablated, measuring the importance of the segment (feature group).
Each input scalar in the group will be given the same attribution value
equal to the change in target as a result of ablating the entire feature
group.
The forward function can either return a scalar per example or a tensor
of a fixed sized tensor (or scalar value) for the full batch, i.e. the
output does not grow as the batch size increase. If the output is fixed
we consider this model to be an "aggregation" of the inputs. In the fixed
sized output mode we require `perturbations_per_eval == 1` and the
`feature_mask` to be either `None` or for all of them to have 1 as their
first dimension (i.e. a feature mask requires to be applied to all inputs).
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or
any modification of it.
"""
PerturbationAttribution.__init__(self, forward_func)
self.use_weights = False
# only used when perturbations_per_eval > 1, where the 1st dim of forward_func's
# output must grow as the input batch size. If forward's output is aggregated,
# we cannot expand the input to include more perturbations in one call.
# If it's False, we will force the validation by comparing the outpus of
# the original input and the modified input whose batch size expanded based on
# perturbations_per_eval. Set the flag to True if the output of the modified
# input grow as expected. Once it turns to True, we will assume the model's
# behavior stays consistent and no longer check again
self._is_output_shape_valid = False
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
feature_mask: Union[None, Tensor, Tuple[Tensor, ...]] = None,
perturbations_per_eval: int = 1,
show_progress: bool = False,
**kwargs: Any,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which ablation
attributions are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define reference value which replaces each
feature when ablated.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or
broadcastable to match the dimensions of inputs
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. For all other types,
the given argument is used for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
feature_mask (Tensor or tuple[Tensor, ...], optional):
feature_mask defines a mask for the input, grouping
features which should be ablated together. feature_mask
should contain the same number of tensors as inputs.
Each tensor should
be the same size as the corresponding input or
broadcastable to match the input tensor. Each tensor
should contain integers in the range 0 to num_features
- 1, and indices corresponding to the same feature should
have the same value.
Note that features within each input tensor are ablated
independently (not across tensors).
If the forward function returns a single scalar per batch,
we enforce that the first dimension of each mask must be 1,
since attributions are returned batch-wise rather than per
example, so the attributions must correspond to the
same features (indices) in each input example.
If None, then a feature mask is constructed which assigns
each scalar within a tensor as a separate feature, which
is ablated independently.
Default: None
perturbations_per_eval (int, optional): Allows ablation of multiple
features to be processed simultaneously in one call to
forward_fn.
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
If the forward function's number of outputs does not
change as the batch size grows (e.g. if it outputs a
scalar value), you must set perturbations_per_eval to 1
and use a single feature mask to describe the features
for all examples in the batch.
Default: 1
show_progress (bool, optional): Displays the progress of computation.
It will try to use tqdm if available for advanced features
(e.g. time estimation). Otherwise, it will fallback to
a simple output of progress.
Default: False
**kwargs (Any, optional): Any additional arguments used by child
classes of FeatureAblation (such as Occlusion) to construct
ablations. These arguments are ignored when using
FeatureAblation directly.
Default: None
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The attributions with respect to each input feature.
If the forward function returns
a scalar value per example, attributions will be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If the forward function returns a scalar per batch, then
attribution tensor(s) will have first dimension 1 and
the remaining dimensions will match the input.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple of tensors is provided for inputs, a
tuple of corresponding sized tensors is returned.
Examples::
>>> # SimpleClassifier takes a single input tensor of size Nx4x4,
>>> # and returns an Nx3 tensor of class probabilities.
>>> net = SimpleClassifier()
>>> # Generating random input with size 2 x 4 x 4
>>> input = torch.randn(2, 4, 4)
>>> # Defining FeatureAblation interpreter
>>> ablator = FeatureAblation(net)
>>> # Computes ablation attribution, ablating each of the 16
>>> # scalar input independently.
>>> attr = ablator.attribute(input, target=1)
>>> # Alternatively, we may want to ablate features in groups, e.g.
>>> # grouping each 2x2 square of the inputs and ablating them together.
>>> # This can be done by creating a feature mask as follows, which
>>> # defines the feature groups, e.g.:
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # With this mask, all inputs with the same value are ablated
>>> # simultaneously, and the attribution for each input in the same
>>> # group (0, 1, 2, and 3) per example are the same.
>>> # The attributions can be calculated as follows:
>>> # feature mask has dimensions 1 x 4 x 4
>>> feature_mask = torch.tensor([[[0,0,1,1],[0,0,1,1],
>>> [2,2,3,3],[2,2,3,3]]])
>>> attr = ablator.attribute(input, target=1, feature_mask=feature_mask)
"""
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = _is_tuple(inputs)
inputs, baselines = _format_input_baseline(inputs, baselines)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
num_examples = inputs[0].shape[0]
feature_mask = _format_feature_mask(feature_mask, inputs)
assert (
isinstance(perturbations_per_eval, int) and perturbations_per_eval >= 1
), "Perturbations per evaluation must be an integer and at least 1."
with torch.no_grad():
if show_progress:
feature_counts = self._get_feature_counts(
inputs, feature_mask, **kwargs
)
total_forwards = (
sum(
math.ceil(count / perturbations_per_eval)
for count in feature_counts
)
+ 1
) # add 1 for the initial eval
attr_progress = progress(
desc=f"{self.get_name()} attribution", total=total_forwards
)
attr_progress.update(0)
# Computes initial evaluation with all features, which is compared
# to each ablated result.
initial_eval = self._strict_run_forward(
self.forward_func, inputs, target, additional_forward_args
)
if show_progress:
attr_progress.update()
# number of elements in the output of forward_func
n_outputs = initial_eval.numel() if isinstance(initial_eval, Tensor) else 1
# flatten eval outputs into 1D (n_outputs)
# add the leading dim for n_feature_perturbed
flattened_initial_eval = initial_eval.reshape(1, -1)
# Initialize attribution totals and counts
attrib_type = cast(dtype, flattened_initial_eval.dtype)
total_attrib = [
# attribute w.r.t each output element
torch.zeros(
(n_outputs,) + input.shape[1:],
dtype=attrib_type,
device=input.device,
)
for input in inputs
]
# Weights are used in cases where ablations may be overlapping.
if self.use_weights:
weights = [
torch.zeros(
(n_outputs,) + input.shape[1:], device=input.device
).float()
for input in inputs
]
# Iterate through each feature tensor for ablation
for i in range(len(inputs)):
# Skip any empty input tensors
if torch.numel(inputs[i]) == 0:
continue
for (
current_inputs,
current_add_args,
current_target,
current_mask,
) in self._ith_input_ablation_generator(
i,
inputs,
additional_forward_args,
target,
baselines,
feature_mask,
perturbations_per_eval,
**kwargs,
):
# modified_eval has (n_feature_perturbed * n_outputs) elements
# shape:
# agg mode: (*initial_eval.shape)
# non-agg mode:
# (feature_perturbed * batch_size, *initial_eval.shape[1:])
modified_eval = self._strict_run_forward(
self.forward_func,
current_inputs,
current_target,
current_add_args,
)
if show_progress:
attr_progress.update()
# if perturbations_per_eval > 1, the output shape must grow with
# input and not be aggregated
if perturbations_per_eval > 1 and not self._is_output_shape_valid:
current_batch_size = current_inputs[0].shape[0]
# number of perturbation, which is not the same as
# perturbations_per_eval when not enough features to perturb
n_perturb = current_batch_size / num_examples
current_output_shape = modified_eval.shape
# use initial_eval as the forward of perturbations_per_eval = 1
initial_output_shape = initial_eval.shape
assert (
# check if the output is not a scalar
current_output_shape
and initial_output_shape
# check if the output grow in same ratio, i.e., not agg
and current_output_shape[0]
== n_perturb * initial_output_shape[0]
), (
"When perturbations_per_eval > 1, forward_func's output "
"should be a tensor whose 1st dim grow with the input "
f"batch size: when input batch size is {num_examples}, "
f"the output shape is {initial_output_shape}; "
f"when input batch size is {current_batch_size}, "
f"the output shape is {current_output_shape}"
)
self._is_output_shape_valid = True
# reshape the leading dim for n_feature_perturbed
# flatten each feature's eval outputs into 1D of (n_outputs)
modified_eval = modified_eval.reshape(-1, n_outputs)
# eval_diff in shape (n_feature_perturbed, n_outputs)
eval_diff = flattened_initial_eval - modified_eval
# append the shape of one input example
# to make it broadcastable to mask
eval_diff = eval_diff.reshape(
eval_diff.shape + (inputs[i].dim() - 1) * (1,)
)
eval_diff = eval_diff.to(total_attrib[i].device)
if self.use_weights:
weights[i] += current_mask.float().sum(dim=0)
total_attrib[i] += (eval_diff * current_mask.to(attrib_type)).sum(
dim=0
)
if show_progress:
attr_progress.close()
# Divide total attributions by counts and return formatted attributions
if self.use_weights:
attrib = tuple(
single_attrib.float() / weight
for single_attrib, weight in zip(total_attrib, weights)
)
else:
attrib = tuple(total_attrib)
_result = _format_output(is_inputs_tuple, attrib)
return _result
def _ith_input_ablation_generator(
self,
i,
inputs,
additional_args,
target,
baselines,
input_mask,
perturbations_per_eval,
**kwargs,
):
"""
This method returns a generator of ablation perturbations of the i-th input
Returns:
ablation_iter (Generator): yields each perturbation to be evaluated
as a tuple (inputs, additional_forward_args, targets, mask).
"""
extra_args = {}
for key, value in kwargs.items():
# For any tuple argument in kwargs, we choose index i of the tuple.
if isinstance(value, tuple):
extra_args[key] = value[i]
else:
extra_args[key] = value
input_mask = input_mask[i] if input_mask is not None else None
min_feature, num_features, input_mask = self._get_feature_range_and_mask(
inputs[i], input_mask, **extra_args
)
num_examples = inputs[0].shape[0]
perturbations_per_eval = min(perturbations_per_eval, num_features)
baseline = baselines[i] if isinstance(baselines, tuple) else baselines
if isinstance(baseline, torch.Tensor):
baseline = baseline.reshape((1,) + baseline.shape)
if perturbations_per_eval > 1:
# Repeat features and additional args for batch size.
all_features_repeated = [
torch.cat([inputs[j]] * perturbations_per_eval, dim=0)
for j in range(len(inputs))
]
additional_args_repeated = (
_expand_additional_forward_args(additional_args, perturbations_per_eval)
if additional_args is not None
else None
)
target_repeated = _expand_target(target, perturbations_per_eval)
else:
all_features_repeated = list(inputs)
additional_args_repeated = additional_args
target_repeated = target
num_features_processed = min_feature
while num_features_processed < num_features:
current_num_ablated_features = min(
perturbations_per_eval, num_features - num_features_processed
)
# Store appropriate inputs and additional args based on batch size.
if current_num_ablated_features != perturbations_per_eval:
current_features = [
feature_repeated[0 : current_num_ablated_features * num_examples]
for feature_repeated in all_features_repeated
]
current_additional_args = (
_expand_additional_forward_args(
additional_args, current_num_ablated_features
)
if additional_args is not None
else None
)
current_target = _expand_target(target, current_num_ablated_features)
else:
current_features = all_features_repeated
current_additional_args = additional_args_repeated
current_target = target_repeated
# Store existing tensor before modifying
original_tensor = current_features[i]
# Construct ablated batch for features in range num_features_processed
# to num_features_processed + current_num_ablated_features and return
# mask with same size as ablated batch. ablated_features has dimension
# (current_num_ablated_features, num_examples, inputs[i].shape[1:])
# Note that in the case of sparse tensors, the second dimension
# may not necessarilly be num_examples and will match the first
# dimension of this tensor.
current_reshaped = current_features[i].reshape(
(current_num_ablated_features, -1) + current_features[i].shape[1:]
)
ablated_features, current_mask = self._construct_ablated_input(
current_reshaped,
input_mask,
baseline,
num_features_processed,
num_features_processed + current_num_ablated_features,
**extra_args,
)
# current_features[i] has dimension
# (current_num_ablated_features * num_examples, inputs[i].shape[1:]),
# which can be provided to the model as input.
current_features[i] = ablated_features.reshape(
(-1,) + ablated_features.shape[2:]
)
yield tuple(
current_features
), current_additional_args, current_target, current_mask
# Replace existing tensor at index i.
current_features[i] = original_tensor
num_features_processed += current_num_ablated_features
def _construct_ablated_input(
self, expanded_input, input_mask, baseline, start_feature, end_feature, **kwargs
):
r"""
Ablates given expanded_input tensor with given feature mask, feature range,
and baselines. expanded_input shape is (`num_features`, `num_examples`, ...)
with remaining dimensions corresponding to remaining original tensor
dimensions and `num_features` = `end_feature` - `start_feature`.
input_mask has same number of dimensions as original input tensor (one less
than `expanded_input`), and can have first dimension either 1, applying same
feature mask to all examples, or `num_examples`. baseline is expected to
be broadcastable to match `expanded_input`.
This method returns the ablated input tensor, which has the same
dimensionality as `expanded_input` as well as the corresponding mask with
either the same dimensionality as `expanded_input` or second dimension
being 1. This mask contains 1s in locations which have been ablated (and
thus counted towards ablations for that feature) and 0s otherwise.
"""
current_mask = torch.stack(
[input_mask == j for j in range(start_feature, end_feature)], dim=0
).long()
ablated_tensor = (
expanded_input * (1 - current_mask).to(expanded_input.dtype)
) + (baseline * current_mask.to(expanded_input.dtype))
return ablated_tensor, current_mask
def _get_feature_range_and_mask(self, input, input_mask, **kwargs):
if input_mask is None:
# Obtain feature mask for selected input tensor, matches size of
# 1 input example, (1 x inputs[i].shape[1:])
input_mask = torch.reshape(
torch.arange(torch.numel(input[0]), device=input.device),
input[0:1].shape,
).long()
return (
torch.min(input_mask).item(),
torch.max(input_mask).item() + 1,
input_mask,
)
def _get_feature_counts(self, inputs, feature_mask, **kwargs):
"""return the numbers of input features"""
if not feature_mask:
return tuple(inp[0].numel() if inp.numel() else 0 for inp in inputs)
return tuple(
(mask.max() - mask.min()).item() + 1
if mask is not None
else (inp[0].numel() if inp.numel() else 0)
for inp, mask in zip(inputs, feature_mask)
)
def _strict_run_forward(self, *args, **kwargs) -> Tensor:
"""
A temp wrapper for global _run_forward util to force forward output
type assertion & conversion.
Remove after the strict logic is supported by all attr classes
"""
forward_output = _run_forward(*args, **kwargs)
if isinstance(forward_output, Tensor):
return forward_output
output_type = type(forward_output)
assert output_type is int or output_type is float, (
"the return of forward_func must be a tensor, int, or float,"
f" received: {forward_output}"
)
# using python built-in type as torch dtype
# int -> torch.int64, float -> torch.float64
# ref: https://github.com/pytorch/pytorch/pull/21215
return torch.tensor(forward_output, dtype=output_type)
|
#!/usr/bin/env python3
from typing import Any, Callable, Tuple, Union
import torch
from captum._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.feature_ablation import FeatureAblation
from captum.log import log_usage
from torch import Tensor
def _permute_feature(x: Tensor, feature_mask: Tensor) -> Tensor:
n = x.size(0)
assert n > 1, "cannot permute features with batch_size = 1"
perm = torch.randperm(n)
no_perm = torch.arange(n)
while (perm == no_perm).all():
perm = torch.randperm(n)
return (x[perm] * feature_mask.to(dtype=x.dtype)) + (
x * feature_mask.bitwise_not().to(dtype=x.dtype)
)
class FeaturePermutation(FeatureAblation):
r"""
A perturbation based approach to compute attribution, which
takes each input feature, permutes the feature values within a batch,
and computes the difference between original and shuffled outputs for
the given batch. This difference signifies the feature importance
for the permuted feature.
Example pseudocode for the algorithm is as follows::
perm_feature_importance(batch):
importance = dict()
baseline_error = error_metric(model(batch), batch_labels)
for each feature:
permute this feature across the batch
error = error_metric(model(permuted_batch), batch_labels)
importance[feature] = baseline_error - error
"un-permute" the feature across the batch
return importance
It should be noted that the `error_metric` must be called in the
`forward_func`. You do not need to have an error metric, e.g. you
could simply return the logits (the model output), but this may or may
not provide a meaningful attribution.
This method, unlike other attribution methods, requires a batch
of examples to compute attributions and cannot be performed on a single example.
By default, each scalar value within
each input tensor is taken as a feature and shuffled independently. Passing
a feature mask, allows grouping features to be shuffled together.
Each input scalar in the group will be given the same attribution value
equal to the change in target as a result of shuffling the entire feature
group.
The forward function can either return a scalar per example, or a single
scalar for the full batch. If a single scalar is returned for the batch,
`perturbations_per_eval` must be 1, and the returned attributions will have
first dimension 1, corresponding to feature importance across all
examples in the batch.
More information can be found in the permutation feature
importance algorithm description here:
https://christophm.github.io/interpretable-ml-book/feature-importance.html
"""
def __init__(
self, forward_func: Callable, perm_func: Callable = _permute_feature
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or
any modification of it.
perm_func (Callable, optional): A function that accepts a batch of
inputs and a feature mask, and "permutes" the feature using
feature mask across the batch. This defaults to a function
which applies a random permutation, this argument only needs
to be provided if a custom permutation behavior is desired.
Default: `_permute_feature`
"""
FeatureAblation.__init__(self, forward_func=forward_func)
self.perm_func = perm_func
# suppressing error caused by the child class not having a matching
# signature to the parent
@log_usage()
def attribute( # type: ignore
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
perturbations_per_eval: int = 1,
show_progress: bool = False,
**kwargs: Any,
) -> TensorOrTupleOfTensorsGeneric:
r"""
This function is almost equivalent to
:func:`FeatureAblation.attribute <captum.attr.FeatureAblation.attribute>`. The
main difference is the way ablated examples are generated. Specifically they
are generated through the ``perm_func``, as we set the baselines for
:func:`FeatureAblation.attribute <captum.attr.FeatureAblation.attribute>` to
``None``.
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which
permutation attributions are computed. If
forward_func takes a single tensor as input, a
single input tensor should be provided. If
forward_func takes multiple tensors as input, a
tuple of the input tensors should be provided. It is
assumed that for all given input tensors, dimension
0 corresponds to the number of examples (aka batch
size), and if multiple input tensors are provided,
the examples must be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which difference is computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. For all other types,
the given argument is used for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
feature_mask (Tensor or tuple[Tensor, ...], optional):
feature_mask defines a mask for the input, grouping
features which should be ablated together. feature_mask
should contain the same number of tensors as inputs.
Each tensor should be the same size as the
corresponding input or broadcastable to match the
input tensor. Each tensor should contain integers in
the range 0 to num_features - 1, and indices
corresponding to the same feature should have the
same value. Note that features within each input
tensor are ablated independently (not across
tensors).
The first dimension of each mask must be 1, as we require
to have the same group of features for each input sample.
If None, then a feature mask is constructed which assigns
each scalar within a tensor as a separate feature, which
is permuted independently.
Default: None
perturbations_per_eval (int, optional): Allows permutations
of multiple features to be processed simultaneously
in one call to forward_fn. Each forward pass will
contain a maximum of perturbations_per_eval * #examples
samples. For DataParallel models, each batch is
split among the available devices, so evaluations on
each available device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
If the forward function returns a single scalar per batch,
perturbations_per_eval must be set to 1.
Default: 1
show_progress (bool, optional): Displays the progress of computation.
It will try to use tqdm if available for advanced features
(e.g. time estimation). Otherwise, it will fallback to
a simple output of progress.
Default: False
**kwargs (Any, optional): Any additional arguments used by child
classes of :class:`.FeatureAblation` (such as
:class:`.Occlusion`) to construct ablations. These
arguments are ignored when using FeatureAblation directly.
Default: None
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The attributions with respect to each input feature.
If the forward function returns
a scalar value per example, attributions will be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If the forward function returns a scalar per batch, then
attribution tensor(s) will have first dimension 1 and
the remaining dimensions will match the input.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple of tensors is provided for inputs,
a tuple of corresponding sized tensors is returned.
Examples::
>>> # SimpleClassifier takes a single input tensor of size Nx4x4,
>>> # and returns an Nx3 tensor of class probabilities.
>>> net = SimpleClassifier()
>>> # Generating random input with size 10 x 4 x 4
>>> input = torch.randn(10, 4, 4)
>>> # Defining FeaturePermutation interpreter
>>> feature_perm = FeaturePermutation(net)
>>> # Computes permutation attribution, shuffling each of the 16
>>> # scalar input independently.
>>> attr = feature_perm.attribute(input, target=1)
>>> # Alternatively, we may want to permute features in groups, e.g.
>>> # grouping each 2x2 square of the inputs and shuffling them together.
>>> # This can be done by creating a feature mask as follows, which
>>> # defines the feature groups, e.g.:
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # With this mask, all inputs with the same value are shuffled
>>> # simultaneously, and the attribution for each input in the same
>>> # group (0, 1, 2, and 3) per example are the same.
>>> # The attributions can be calculated as follows:
>>> # feature mask has dimensions 1 x 4 x 4
>>> feature_mask = torch.tensor([[[0,0,1,1],[0,0,1,1],
>>> [2,2,3,3],[2,2,3,3]]])
>>> attr = feature_perm.attribute(input, target=1,
>>> feature_mask=feature_mask)
"""
return FeatureAblation.attribute.__wrapped__(
self,
inputs,
baselines=None,
target=target,
additional_forward_args=additional_forward_args,
feature_mask=feature_mask,
perturbations_per_eval=perturbations_per_eval,
show_progress=show_progress,
**kwargs,
)
def _construct_ablated_input(
self,
expanded_input: Tensor,
input_mask: Tensor,
baseline: Union[int, float, Tensor],
start_feature: int,
end_feature: int,
**kwargs: Any,
) -> Tuple[Tensor, Tensor]:
r"""
This function permutes the features of `expanded_input` with a given
feature mask and feature range. Permutation occurs via calling
`self.perm_func` across each batch within `expanded_input`. As with
`FeatureAblation._construct_ablated_input`:
- `expanded_input.shape = (num_features, num_examples, ...)`
- `num_features = end_feature - start_feature` (i.e. start and end is a
half-closed interval)
- `input_mask` is a tensor of the same shape as one input, which
describes the locations of each feature via their "index"
Since `baselines` is set to None for `FeatureAblation.attribute, this
will be the zero tensor, however, it is not used.
"""
assert input_mask.shape[0] == 1, (
"input_mask.shape[0] != 1: pass in one mask in order to permute"
"the same features for each input"
)
current_mask = torch.stack(
[input_mask == j for j in range(start_feature, end_feature)], dim=0
).bool()
output = torch.stack(
[
self.perm_func(x, mask.squeeze(0))
for x, mask in zip(expanded_input, current_mask)
]
)
return output, current_mask
|
#!/usr/bin/env python3
from typing import Any, Callable, Tuple, Union
import numpy as np
import torch
from captum._utils.common import _format_tensor_into_tuples
from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.feature_ablation import FeatureAblation
from captum.attr._utils.common import (
_format_and_verify_sliding_window_shapes,
_format_and_verify_strides,
)
from captum.log import log_usage
from torch import Tensor
class Occlusion(FeatureAblation):
r"""
A perturbation based approach to compute attribution, involving
replacing each contiguous rectangular region with a given baseline /
reference, and computing the difference in output. For features located
in multiple regions (hyperrectangles), the corresponding output differences
are averaged to compute the attribution for that feature.
The first patch is applied with the corner aligned with all indices 0,
and strides are applied until the entire dimension range is covered. Note
that this may cause the final patch applied in a direction to be cut-off
and thus smaller than the target occlusion shape.
More details regarding the occlusion (or grey-box / sliding window)
method can be found in the original paper and in the DeepExplain
implementation.
https://arxiv.org/abs/1311.2901
https://github.com/marcoancona/DeepExplain/blob/master/deepexplain\
/tensorflow/methods.py#L401
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or
any modification of it.
"""
FeatureAblation.__init__(self, forward_func)
self.use_weights = True
@log_usage()
def attribute( # type: ignore
self,
inputs: TensorOrTupleOfTensorsGeneric,
sliding_window_shapes: Union[Tuple[int, ...], Tuple[Tuple[int, ...], ...]],
strides: Union[
None, int, Tuple[int, ...], Tuple[Union[int, Tuple[int, ...]], ...]
] = None,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
perturbations_per_eval: int = 1,
show_progress: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which occlusion
attributions are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
sliding_window_shapes (tuple or tuple[tuple]): Shape of patch
(hyperrectangle) to occlude each input. For a single
input tensor, this must be a tuple of length equal to the
number of dimensions of the input tensor - 1, defining
the dimensions of the patch. If the input tensor is 1-d,
this should be an empty tuple. For multiple input tensors,
this must be a tuple containing one tuple for each input
tensor defining the dimensions of the patch for that
input tensor, as described for the single tensor case.
strides (int, tuple, tuple[int], or tuple[tuple], optional):
This defines the step by which the occlusion hyperrectangle
should be shifted by in each direction for each iteration.
For a single tensor input, this can be either a single
integer, which is used as the step size in each direction,
or a tuple of integers matching the number of dimensions
in the occlusion shape, defining the step size in the
corresponding dimension. For multiple tensor inputs, this
can be either a tuple of integers, one for each input
tensor (used for all dimensions of the corresponding
tensor), or a tuple of tuples, providing the stride per
dimension for each tensor.
To ensure that all inputs are covered by at least one
sliding window, the stride for any dimension must be
<= the corresponding sliding window dimension if the
sliding window dimension is less than the input
dimension.
If None is provided, a stride of 1 is used for each
dimension of each input tensor.
Default: None
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define reference value which replaces each
feature when occluded.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or
broadcastable to match the dimensions of inputs
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which difference is computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. For all other types,
the given argument is used for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
perturbations_per_eval (int, optional): Allows multiple occlusions
to be included in one batch (one call to forward_fn).
By default, perturbations_per_eval is 1, so each occlusion
is processed individually.
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
Default: 1
show_progress (bool, optional): Displays the progress of computation.
It will try to use tqdm if available for advanced features
(e.g. time estimation). Otherwise, it will fallback to
a simple output of progress.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The attributions with respect to each input feature.
Attributions will always be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # SimpleClassifier takes a single input tensor of size Nx4x4,
>>> # and returns an Nx3 tensor of class probabilities.
>>> net = SimpleClassifier()
>>> # Generating random input with size 2 x 4 x 4
>>> input = torch.randn(2, 4, 4)
>>> # Defining Occlusion interpreter
>>> ablator = Occlusion(net)
>>> # Computes occlusion attribution, ablating each 3x3 patch,
>>> # shifting in each direction by the default of 1.
>>> attr = ablator.attribute(input, target=1, sliding_window_shapes=(3,3))
"""
formatted_inputs = _format_tensor_into_tuples(inputs)
# Formatting strides
strides = _format_and_verify_strides(strides, formatted_inputs)
# Formatting sliding window shapes
sliding_window_shapes = _format_and_verify_sliding_window_shapes(
sliding_window_shapes, formatted_inputs
)
# Construct tensors from sliding window shapes
sliding_window_tensors = tuple(
torch.ones(window_shape, device=formatted_inputs[i].device)
for i, window_shape in enumerate(sliding_window_shapes)
)
# Construct counts, defining number of steps to make of occlusion block in
# each dimension.
shift_counts = []
for i, inp in enumerate(formatted_inputs):
current_shape = np.subtract(inp.shape[1:], sliding_window_shapes[i])
# Verify sliding window doesn't exceed input dimensions.
assert (np.array(current_shape) >= 0).all(), (
"Sliding window dimensions {} cannot exceed input dimensions" "{}."
).format(sliding_window_shapes[i], tuple(inp.shape[1:]))
# Stride cannot be larger than sliding window for any dimension where
# the sliding window doesn't cover the entire input.
assert np.logical_or(
np.array(current_shape) == 0,
np.array(strides[i]) <= sliding_window_shapes[i],
).all(), (
"Stride dimension {} cannot be larger than sliding window "
"shape dimension {}."
).format(
strides[i], sliding_window_shapes[i]
)
shift_counts.append(
tuple(
np.add(np.ceil(np.divide(current_shape, strides[i])).astype(int), 1)
)
)
# Use ablation attribute method
return super().attribute.__wrapped__(
self,
inputs,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
perturbations_per_eval=perturbations_per_eval,
sliding_window_tensors=sliding_window_tensors,
shift_counts=tuple(shift_counts),
strides=strides,
show_progress=show_progress,
)
def _construct_ablated_input(
self,
expanded_input: Tensor,
input_mask: Union[None, Tensor],
baseline: Union[Tensor, int, float],
start_feature: int,
end_feature: int,
**kwargs: Any,
) -> Tuple[Tensor, Tensor]:
r"""
Ablates given expanded_input tensor with given feature mask, feature range,
and baselines, and any additional arguments.
expanded_input shape is (num_features, num_examples, ...)
with remaining dimensions corresponding to remaining original tensor
dimensions and num_features = end_feature - start_feature.
input_mask is None for occlusion, and the mask is constructed
using sliding_window_tensors, strides, and shift counts, which are provided in
kwargs. baseline is expected to
be broadcastable to match expanded_input.
This method returns the ablated input tensor, which has the same
dimensionality as expanded_input as well as the corresponding mask with
either the same dimensionality as expanded_input or second dimension
being 1. This mask contains 1s in locations which have been ablated (and
thus counted towards ablations for that feature) and 0s otherwise.
"""
input_mask = torch.stack(
[
self._occlusion_mask(
expanded_input,
j,
kwargs["sliding_window_tensors"],
kwargs["strides"],
kwargs["shift_counts"],
)
for j in range(start_feature, end_feature)
],
dim=0,
).long()
ablated_tensor = (
expanded_input
* (
torch.ones(1, dtype=torch.long, device=expanded_input.device)
- input_mask
).to(expanded_input.dtype)
) + (baseline * input_mask.to(expanded_input.dtype))
return ablated_tensor, input_mask
def _occlusion_mask(
self,
expanded_input: Tensor,
ablated_feature_num: int,
sliding_window_tsr: Tensor,
strides: Union[int, Tuple[int, ...]],
shift_counts: Tuple[int, ...],
) -> Tensor:
"""
This constructs the current occlusion mask, which is the appropriate
shift of the sliding window tensor based on the ablated feature number.
The feature number ranges between 0 and the product of the shift counts
(# of times the sliding window should be shifted in each dimension).
First, the ablated feature number is converted to the number of steps in
each dimension from the origin, based on shift counts. This procedure
is similar to a base conversion, with the position values equal to shift
counts. The feature number is first taken modulo shift_counts[0] to
get the number of shifts in the first dimension (each shift
by shift_count[0]), and then divided by shift_count[0].
The procedure is then continued for each element of shift_count. This
computes the total shift in each direction for the sliding window.
We then need to compute the padding required after the window in each
dimension, which is equal to the total input dimension minus the sliding
window dimension minus the (left) shift amount. We construct the
array pad_values which contains the left and right pad values for each
dimension, in reverse order of dimensions, starting from the last one.
Once these padding values are computed, we pad the sliding window tensor
of 1s with 0s appropriately, which is the corresponding mask,
and the result will match the input shape.
"""
remaining_total = ablated_feature_num
current_index = []
for i, shift_count in enumerate(shift_counts):
stride = strides[i] if isinstance(strides, tuple) else strides
current_index.append((remaining_total % shift_count) * stride)
remaining_total = remaining_total // shift_count
remaining_padding = np.subtract(
expanded_input.shape[2:], np.add(current_index, sliding_window_tsr.shape)
)
pad_values = [
val for pair in zip(remaining_padding, current_index) for val in pair
]
pad_values.reverse()
padded_tensor = torch.nn.functional.pad(
sliding_window_tsr, tuple(pad_values) # type: ignore
)
return padded_tensor.reshape((1,) + padded_tensor.shape)
def _get_feature_range_and_mask(
self, input: Tensor, input_mask: Tensor, **kwargs: Any
) -> Tuple[int, int, None]:
feature_max = np.prod(kwargs["shift_counts"])
return 0, feature_max, None
def _get_feature_counts(self, inputs, feature_mask, **kwargs):
"""return the numbers of possible input features"""
return tuple(np.prod(counts).astype(int) for counts in kwargs["shift_counts"])
|
#!/usr/bin/env python3
import typing
from typing import Any, Callable, Tuple, Union
import numpy as np
import torch
from captum._utils.common import _is_tuple
from captum._utils.typing import (
BaselineType,
Literal,
TargetType,
Tensor,
TensorOrTupleOfTensorsGeneric,
)
from captum.attr._core.noise_tunnel import NoiseTunnel
from captum.attr._utils.attribution import GradientAttribution
from captum.attr._utils.common import (
_compute_conv_delta_and_format_attrs,
_format_callable_baseline,
_format_input_baseline,
)
from captum.log import log_usage
class GradientShap(GradientAttribution):
r"""
Implements gradient SHAP based on the implementation from SHAP's primary
author. For reference, please view the original
`implementation
<https://github.com/slundberg/shap#deep-learning-example-with-gradientexplainer-tensorflowkeraspytorch-models>`_
and the paper: `A Unified Approach to Interpreting Model Predictions
<https://papers.nips.cc/paper/7062-a-unified-approach-to-interpreting-model-predictions>`_
GradientShap approximates SHAP values by computing the expectations of
gradients by randomly sampling from the distribution of baselines/references.
It adds white noise to each input sample `n_samples` times, selects a
random baseline from baselines' distribution and a random point along the
path between the baseline and the input, and computes the gradient of outputs
with respect to those selected random points. The final SHAP values represent
the expected values of gradients * (inputs - baselines).
GradientShap makes an assumption that the input features are independent
and that the explanation model is linear, meaning that the explanations
are modeled through the additive composition of feature effects.
Under those assumptions, SHAP value can be approximated as the expectation
of gradients that are computed for randomly generated `n_samples` input
samples after adding gaussian noise `n_samples` times to each input for
different baselines/references.
In some sense it can be viewed as an approximation of integrated gradients
by computing the expectations of gradients for different baselines.
Current implementation uses Smoothgrad from :class:`.NoiseTunnel` in order to
randomly draw samples from the distribution of baselines, add noise to input
samples and compute the expectation (smoothgrad).
"""
def __init__(self, forward_func: Callable, multiply_by_inputs: bool = True) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or
any modification of it.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in
then this type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of gradient shap, if `multiply_by_inputs`
is set to True, the sensitivity scores of scaled inputs
are being multiplied by (inputs - baselines).
"""
GradientAttribution.__init__(self, forward_func)
self._multiply_by_inputs = multiply_by_inputs
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: Union[
TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric]
],
n_samples: int = 5,
stdevs: Union[float, Tuple[float, ...]] = 0.0,
target: TargetType = None,
additional_forward_args: Any = None,
*,
return_convergence_delta: Literal[True],
) -> Tuple[TensorOrTupleOfTensorsGeneric, Tensor]:
...
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: Union[
TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric]
],
n_samples: int = 5,
stdevs: Union[float, Tuple[float, ...]] = 0.0,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: Literal[False] = False,
) -> TensorOrTupleOfTensorsGeneric:
...
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: Union[
TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric]
],
n_samples: int = 5,
stdevs: Union[float, Tuple[float, ...]] = 0.0,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: bool = False,
) -> Union[
TensorOrTupleOfTensorsGeneric, Tuple[TensorOrTupleOfTensorsGeneric, Tensor]
]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which SHAP attribution
values are computed. If `forward_func` takes a single
tensor as input, a single input tensor should be provided.
If `forward_func` takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
baselines (Tensor, tuple[Tensor, ...], or Callable):
Baselines define the starting point from which expectation
is computed and can be provided as:
- a single tensor, if inputs is a single tensor, with
the first dimension equal to the number of examples
in the baselines' distribution. The remaining dimensions
must match with input tensor's dimension starting from
the second dimension.
- a tuple of tensors, if inputs is a tuple of tensors,
with the first dimension of any tensor inside the tuple
equal to the number of examples in the baseline's
distribution. The remaining dimensions must match
the dimensions of the corresponding input tensor
starting from the second dimension.
- callable function, optionally takes `inputs` as an
argument and either returns a single tensor
or a tuple of those.
It is recommended that the number of samples in the baselines'
tensors is larger than one.
n_samples (int, optional): The number of randomly generated examples
per sample in the input batch. Random examples are
generated by adding gaussian random noise to each sample.
Default: `5` if `n_samples` is not provided.
stdevs (float or tuple of float, optional): The standard deviation
of gaussian noise with zero mean that is added to each
input in the batch. If `stdevs` is a single float value
then that same value is used for all inputs. If it is
a tuple, then it must have the same length as the inputs
tuple. In this case, each stdev value in the stdevs tuple
corresponds to the input with the same index in the inputs
tuple.
Default: 0.0
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It can contain a tuple of ND tensors or
any arbitrary python type of any shape.
In case of the ND tensor the first dimension of the
tensor must correspond to the batch size. It will be
repeated for each `n_steps` for each randomly generated
input sample.
Note that the gradients are not computed with respect
to these arguments.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attribution score computed based on GradientSHAP with respect
to each input feature. Attributions will always be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
- **delta** (*Tensor*, returned if return_convergence_delta=True):
This is computed using the property that the total
sum of forward_func(inputs) - forward_func(baselines)
must be very close to the total sum of the attributions
based on GradientSHAP.
Delta is calculated for each example in the input after adding
`n_samples` times gaussian noise to each of them. Therefore,
the dimensionality of the deltas tensor is equal to the
`number of examples in the input` * `n_samples`
The deltas are ordered by each input example and `n_samples`
noisy samples generated for it.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> gradient_shap = GradientShap(net)
>>> input = torch.randn(3, 3, 32, 32, requires_grad=True)
>>> # choosing baselines randomly
>>> baselines = torch.randn(20, 3, 32, 32)
>>> # Computes gradient shap for the input
>>> # Attribution size matches input size: 3x3x32x32
>>> attribution = gradient_shap.attribute(input, baselines,
target=5)
"""
# since `baselines` is a distribution, we can generate it using a function
# rather than passing it as an input argument
baselines = _format_callable_baseline(baselines, inputs)
assert isinstance(baselines[0], torch.Tensor), (
"Baselines distribution has to be provided in a form "
"of a torch.Tensor {}.".format(baselines[0])
)
input_min_baseline_x_grad = InputBaselineXGradient(
self.forward_func, self.multiplies_by_inputs
)
input_min_baseline_x_grad.gradient_func = self.gradient_func
nt = NoiseTunnel(input_min_baseline_x_grad)
# NOTE: using attribute.__wrapped__ to not log
attributions = nt.attribute.__wrapped__(
nt, # self
inputs,
nt_type="smoothgrad",
nt_samples=n_samples,
stdevs=stdevs,
draw_baseline_from_distrib=True,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
return_convergence_delta=return_convergence_delta,
)
return attributions
def has_convergence_delta(self) -> bool:
return True
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
class InputBaselineXGradient(GradientAttribution):
def __init__(self, forward_func: Callable, multiply_by_inputs=True) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or
any modification of it.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in
then this type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of gradient shap, if `multiply_by_inputs`
is set to True, the sensitivity scores of scaled inputs
are being multiplied by (inputs - baselines).
"""
GradientAttribution.__init__(self, forward_func)
self._multiply_by_inputs = multiply_by_inputs
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
*,
return_convergence_delta: Literal[True],
) -> Tuple[TensorOrTupleOfTensorsGeneric, Tensor]:
...
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: Literal[False] = False,
) -> TensorOrTupleOfTensorsGeneric:
...
@log_usage()
def attribute( # type: ignore
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: bool = False,
) -> Union[
TensorOrTupleOfTensorsGeneric, Tuple[TensorOrTupleOfTensorsGeneric, Tensor]
]:
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = _is_tuple(inputs)
inputs, baselines = _format_input_baseline(inputs, baselines)
rand_coefficient = torch.tensor(
np.random.uniform(0.0, 1.0, inputs[0].shape[0]),
device=inputs[0].device,
dtype=inputs[0].dtype,
)
input_baseline_scaled = tuple(
_scale_input(input, baseline, rand_coefficient)
for input, baseline in zip(inputs, baselines)
)
grads = self.gradient_func(
self.forward_func, input_baseline_scaled, target, additional_forward_args
)
if self.multiplies_by_inputs:
input_baseline_diffs = tuple(
input - baseline for input, baseline in zip(inputs, baselines)
)
attributions = tuple(
input_baseline_diff * grad
for input_baseline_diff, grad in zip(input_baseline_diffs, grads)
)
else:
attributions = grads
return _compute_conv_delta_and_format_attrs(
self,
return_convergence_delta,
attributions,
baselines,
inputs,
additional_forward_args,
target,
is_inputs_tuple,
)
def has_convergence_delta(self) -> bool:
return True
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
def _scale_input(
input: Tensor, baseline: Union[Tensor, int, float], rand_coefficient: Tensor
) -> Tensor:
# batch size
bsz = input.shape[0]
inp_shape_wo_bsz = input.shape[1:]
inp_shape = (bsz,) + tuple([1] * len(inp_shape_wo_bsz))
# expand and reshape the indices
rand_coefficient = rand_coefficient.view(inp_shape)
input_baseline_scaled = (
rand_coefficient * input + (1.0 - rand_coefficient) * baseline
).requires_grad_()
return input_baseline_scaled
|
#!/usr/bin/env python3
import inspect
import math
import typing
import warnings
from typing import Any, Callable, cast, List, Optional, Tuple, Union
import torch
from captum._utils.common import (
_expand_additional_forward_args,
_expand_target,
_flatten_tensor_or_tuple,
_format_output,
_format_tensor_into_tuples,
_get_max_feature_index,
_is_tuple,
_reduce_list,
_run_forward,
)
from captum._utils.models.linear_model import SkLearnLasso
from captum._utils.models.model import Model
from captum._utils.progress import progress
from captum._utils.typing import (
BaselineType,
Literal,
TargetType,
TensorOrTupleOfTensorsGeneric,
)
from captum.attr._utils.attribution import PerturbationAttribution
from captum.attr._utils.batching import _batch_example_iterator
from captum.attr._utils.common import (
_construct_default_feature_mask,
_format_input_baseline,
)
from captum.log import log_usage
from torch import Tensor
from torch.nn import CosineSimilarity
from torch.utils.data import DataLoader, TensorDataset
class LimeBase(PerturbationAttribution):
r"""
Lime is an interpretability method that trains an interpretable surrogate model
by sampling points around a specified input example and using model evaluations
at these points to train a simpler interpretable 'surrogate' model, such as a
linear model.
LimeBase provides a generic framework to train a surrogate interpretable model.
This differs from most other attribution methods, since the method returns a
representation of the interpretable model (e.g. coefficients of the linear model).
For a similar interface to other perturbation-based attribution methods, please use
the Lime child class, which defines specific transformations for the interpretable
model.
LimeBase allows sampling points in either the interpretable space or the original
input space to train the surrogate model. The interpretable space is a feature
vector used to train the surrogate interpretable model; this feature space is often
of smaller dimensionality than the original feature space in order for the surrogate
model to be more interpretable.
If sampling in the interpretable space, a transformation function must be provided
to define how a vector sampled in the interpretable space can be transformed into
an example in the original input space. If sampling in the original input space, a
transformation function must be provided to define how the input can be transformed
into its interpretable vector representation.
More details regarding LIME can be found in the original paper:
https://arxiv.org/abs/1602.04938
"""
def __init__(
self,
forward_func: Callable,
interpretable_model: Model,
similarity_func: Callable,
perturb_func: Callable,
perturb_interpretable_space: bool,
from_interp_rep_transform: Optional[Callable],
to_interp_rep_transform: Optional[Callable],
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it. If a batch is provided as input for
attribution, it is expected that forward_func returns a scalar
representing the entire batch.
interpretable_model (Model): Model object to train interpretable model.
A Model object provides a `fit` method to train the model,
given a dataloader, with batches containing three tensors:
- interpretable_inputs: Tensor
[2D num_samples x num_interp_features],
- expected_outputs: Tensor [1D num_samples],
- weights: Tensor [1D num_samples]
The model object must also provide a `representation` method to
access the appropriate coefficients or representation of the
interpretable model after fitting.
Some predefined interpretable linear models are provided in
captum._utils.models.linear_model including wrappers around
SkLearn linear models as well as SGD-based PyTorch linear
models.
Note that calling fit multiple times should retrain the
interpretable model, each attribution call reuses
the same given interpretable model object.
similarity_func (Callable): Function which takes a single sample
along with its corresponding interpretable representation
and returns the weight of the interpretable sample for
training interpretable model. Weight is generally
determined based on similarity to the original input.
The original paper refers to this as a similarity kernel.
The expected signature of this callable is:
>>> similarity_func(
>>> original_input: Tensor or tuple[Tensor, ...],
>>> perturbed_input: Tensor or tuple[Tensor, ...],
>>> perturbed_interpretable_input:
>>> Tensor [2D 1 x num_interp_features],
>>> **kwargs: Any
>>> ) -> float or Tensor containing float scalar
perturbed_input and original_input will be the same type and
contain tensors of the same shape (regardless of whether or not
the sampling function returns inputs in the interpretable
space). original_input is the same as the input provided
when calling attribute.
All kwargs passed to the attribute method are
provided as keyword arguments (kwargs) to this callable.
perturb_func (Callable): Function which returns a single
sampled input, generally a perturbation of the original
input, which is used to train the interpretable surrogate
model. Function can return samples in either
the original input space (matching type and tensor shapes
of original input) or in the interpretable input space,
which is a vector containing the intepretable features.
Alternatively, this function can return a generator
yielding samples to train the interpretable surrogate
model, and n_samples perturbations will be sampled
from this generator.
The expected signature of this callable is:
>>> perturb_func(
>>> original_input: Tensor or tuple[Tensor, ...],
>>> **kwargs: Any
>>> ) -> Tensor, tuple[Tensor, ...], or
>>> generator yielding tensor or tuple[Tensor, ...]
All kwargs passed to the attribute method are
provided as keyword arguments (kwargs) to this callable.
Returned sampled input should match the input type (Tensor
or Tuple of Tensor and corresponding shapes) if
perturb_interpretable_space = False. If
perturb_interpretable_space = True, the return type should
be a single tensor of shape 1 x num_interp_features,
corresponding to the representation of the
sample to train the interpretable model.
All kwargs passed to the attribute method are
provided as keyword arguments (kwargs) to this callable.
perturb_interpretable_space (bool): Indicates whether
perturb_func returns a sample in the interpretable space
(tensor of shape 1 x num_interp_features) or a sample
in the original space, matching the format of the original
input. Once sampled, inputs can be converted to / from
the interpretable representation with either
to_interp_rep_transform or from_interp_rep_transform.
from_interp_rep_transform (Callable): Function which takes a
single sampled interpretable representation (tensor
of shape 1 x num_interp_features) and returns
the corresponding representation in the input space
(matching shapes of original input to attribute).
This argument is necessary if perturb_interpretable_space
is True, otherwise None can be provided for this argument.
The expected signature of this callable is:
>>> from_interp_rep_transform(
>>> curr_sample: Tensor [2D 1 x num_interp_features]
>>> original_input: Tensor or Tuple of Tensors,
>>> **kwargs: Any
>>> ) -> Tensor or tuple[Tensor, ...]
Returned sampled input should match the type of original_input
and corresponding tensor shapes.
All kwargs passed to the attribute method are
provided as keyword arguments (kwargs) to this callable.
to_interp_rep_transform (Callable): Function which takes a
sample in the original input space and converts to
its interpretable representation (tensor
of shape 1 x num_interp_features).
This argument is necessary if perturb_interpretable_space
is False, otherwise None can be provided for this argument.
The expected signature of this callable is:
>>> to_interp_rep_transform(
>>> curr_sample: Tensor or Tuple of Tensors,
>>> original_input: Tensor or Tuple of Tensors,
>>> **kwargs: Any
>>> ) -> Tensor [2D 1 x num_interp_features]
curr_sample will match the type of original_input
and corresponding tensor shapes.
All kwargs passed to the attribute method are
provided as keyword arguments (kwargs) to this callable.
"""
PerturbationAttribution.__init__(self, forward_func)
self.interpretable_model = interpretable_model
self.similarity_func = similarity_func
self.perturb_func = perturb_func
self.perturb_interpretable_space = perturb_interpretable_space
self.from_interp_rep_transform = from_interp_rep_transform
self.to_interp_rep_transform = to_interp_rep_transform
if self.perturb_interpretable_space:
assert (
self.from_interp_rep_transform is not None
), "Must provide transform from interpretable space to original input space"
" when sampling from interpretable space."
else:
assert (
self.to_interp_rep_transform is not None
), "Must provide transform from original input space to interpretable space"
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
n_samples: int = 50,
perturbations_per_eval: int = 1,
show_progress: bool = False,
**kwargs,
) -> Tensor:
r"""
This method attributes the output of the model with given target index
(in case it is provided, otherwise it assumes that output is a
scalar) to the inputs of the model using the approach described above.
It trains an interpretable model and returns a representation of the
interpretable model.
It is recommended to only provide a single example as input (tensors
with first dimension or batch size = 1). This is because LIME is generally
used for sample-based interpretability, training a separate interpretable
model to explain a model's prediction on each individual example.
A batch of inputs can be provided as inputs only if forward_func
returns a single value per batch (e.g. loss).
The interpretable feature representation should still have shape
1 x num_interp_features, corresponding to the interpretable
representation for the full batch, and perturbations_per_eval
must be set to 1.
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which LIME
is computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which surrogate model is trained
(for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. For all other types,
the given argument is used for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
n_samples (int, optional): The number of samples of the original
model used to train the surrogate interpretable model.
Default: `50` if `n_samples` is not provided.
perturbations_per_eval (int, optional): Allows multiple samples
to be processed simultaneously in one call to forward_fn.
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
If the forward function returns a single scalar per batch,
perturbations_per_eval must be set to 1.
Default: 1
show_progress (bool, optional): Displays the progress of computation.
It will try to use tqdm if available for advanced features
(e.g. time estimation). Otherwise, it will fallback to
a simple output of progress.
Default: False
**kwargs (Any, optional): Any additional arguments necessary for
sampling and transformation functions (provided to
constructor).
Default: None
Returns:
**interpretable model representation**:
- **interpretable model representation** (*Any*):
A representation of the interpretable model trained. The return
type matches the return type of train_interpretable_model_func.
For example, this could contain coefficients of a
linear surrogate model.
Examples::
>>> # SimpleClassifier takes a single input tensor of
>>> # float features with size N x 5,
>>> # and returns an Nx3 tensor of class probabilities.
>>> net = SimpleClassifier()
>>>
>>> # We will train an interpretable model with the same
>>> # features by simply sampling with added Gaussian noise
>>> # to the inputs and training a model to predict the
>>> # score of the target class.
>>>
>>> # For interpretable model training, we will use sklearn
>>> # linear model in this example. We have provided wrappers
>>> # around sklearn linear models to fit the Model interface.
>>> # Any arguments provided to the sklearn constructor can also
>>> # be provided to the wrapper, e.g.:
>>> # SkLearnLinearModel("linear_model.Ridge", alpha=2.0)
>>> from captum._utils.models.linear_model import SkLearnLinearModel
>>>
>>>
>>> # Define similarity kernel (exponential kernel based on L2 norm)
>>> def similarity_kernel(
>>> original_input: Tensor,
>>> perturbed_input: Tensor,
>>> perturbed_interpretable_input: Tensor,
>>> **kwargs)->Tensor:
>>> # kernel_width will be provided to attribute as a kwarg
>>> kernel_width = kwargs["kernel_width"]
>>> l2_dist = torch.norm(original_input - perturbed_input)
>>> return torch.exp(- (l2_dist**2) / (kernel_width**2))
>>>
>>>
>>> # Define sampling function
>>> # This function samples in original input space
>>> def perturb_func(
>>> original_input: Tensor,
>>> **kwargs)->Tensor:
>>> return original_input + torch.randn_like(original_input)
>>>
>>> # For this example, we are setting the interpretable input to
>>> # match the model input, so the to_interp_rep_transform
>>> # function simply returns the input. In most cases, the interpretable
>>> # input will be different and may have a smaller feature set, so
>>> # an appropriate transformation function should be provided.
>>>
>>> def to_interp_transform(curr_sample, original_inp,
>>> **kwargs):
>>> return curr_sample
>>>
>>> # Generating random input with size 1 x 5
>>> input = torch.randn(1, 5)
>>> # Defining LimeBase interpreter
>>> lime_attr = LimeBase(net,
SkLearnLinearModel("linear_model.Ridge"),
similarity_func=similarity_kernel,
perturb_func=perturb_func,
perturb_interpretable_space=False,
from_interp_rep_transform=None,
to_interp_rep_transform=to_interp_transform)
>>> # Computes interpretable model, returning coefficients of linear
>>> # model.
>>> attr_coefs = lime_attr.attribute(input, target=1, kernel_width=1.1)
"""
with torch.no_grad():
inp_tensor = (
cast(Tensor, inputs) if isinstance(inputs, Tensor) else inputs[0]
)
device = inp_tensor.device
interpretable_inps = []
similarities = []
outputs = []
curr_model_inputs = []
expanded_additional_args = None
expanded_target = None
perturb_generator = None
if inspect.isgeneratorfunction(self.perturb_func):
perturb_generator = self.perturb_func(inputs, **kwargs)
if show_progress:
attr_progress = progress(
total=math.ceil(n_samples / perturbations_per_eval),
desc=f"{self.get_name()} attribution",
)
attr_progress.update(0)
batch_count = 0
for _ in range(n_samples):
if perturb_generator:
try:
curr_sample = next(perturb_generator)
except StopIteration:
warnings.warn(
"Generator completed prior to given n_samples iterations!"
)
break
else:
curr_sample = self.perturb_func(inputs, **kwargs)
batch_count += 1
if self.perturb_interpretable_space:
interpretable_inps.append(curr_sample)
curr_model_inputs.append(
self.from_interp_rep_transform( # type: ignore
curr_sample, inputs, **kwargs
)
)
else:
curr_model_inputs.append(curr_sample)
interpretable_inps.append(
self.to_interp_rep_transform( # type: ignore
curr_sample, inputs, **kwargs
)
)
curr_sim = self.similarity_func(
inputs, curr_model_inputs[-1], interpretable_inps[-1], **kwargs
)
similarities.append(
curr_sim.flatten()
if isinstance(curr_sim, Tensor)
else torch.tensor([curr_sim], device=device)
)
if len(curr_model_inputs) == perturbations_per_eval:
if expanded_additional_args is None:
expanded_additional_args = _expand_additional_forward_args(
additional_forward_args, len(curr_model_inputs)
)
if expanded_target is None:
expanded_target = _expand_target(target, len(curr_model_inputs))
model_out = self._evaluate_batch(
curr_model_inputs,
expanded_target,
expanded_additional_args,
device,
)
if show_progress:
attr_progress.update()
outputs.append(model_out)
curr_model_inputs = []
if len(curr_model_inputs) > 0:
expanded_additional_args = _expand_additional_forward_args(
additional_forward_args, len(curr_model_inputs)
)
expanded_target = _expand_target(target, len(curr_model_inputs))
model_out = self._evaluate_batch(
curr_model_inputs,
expanded_target,
expanded_additional_args,
device,
)
if show_progress:
attr_progress.update()
outputs.append(model_out)
if show_progress:
attr_progress.close()
combined_interp_inps = torch.cat(interpretable_inps).float()
combined_outputs = (
torch.cat(outputs)
if len(outputs[0].shape) > 0
else torch.stack(outputs)
).float()
combined_sim = (
torch.cat(similarities)
if len(similarities[0].shape) > 0
else torch.stack(similarities)
).float()
dataset = TensorDataset(
combined_interp_inps, combined_outputs, combined_sim
)
self.interpretable_model.fit(DataLoader(dataset, batch_size=batch_count))
return self.interpretable_model.representation()
def _evaluate_batch(
self,
curr_model_inputs: List[TensorOrTupleOfTensorsGeneric],
expanded_target: TargetType,
expanded_additional_args: Any,
device: torch.device,
):
model_out = _run_forward(
self.forward_func,
_reduce_list(curr_model_inputs),
expanded_target,
expanded_additional_args,
)
if isinstance(model_out, Tensor):
assert model_out.numel() == len(curr_model_inputs), (
"Number of outputs is not appropriate, must return "
"one output per perturbed input"
)
if isinstance(model_out, Tensor):
return model_out.flatten()
return torch.tensor([model_out], device=device)
def has_convergence_delta(self) -> bool:
return False
@property
def multiplies_by_inputs(self):
return False
# Default transformations and methods
# for Lime child implementation.
def default_from_interp_rep_transform(curr_sample, original_inputs, **kwargs):
assert (
"feature_mask" in kwargs
), "Must provide feature_mask to use default interpretable representation transform"
assert (
"baselines" in kwargs
), "Must provide baselines to use default interpretable representation transform"
feature_mask = kwargs["feature_mask"]
if isinstance(feature_mask, Tensor):
binary_mask = curr_sample[0][feature_mask].bool()
return (
binary_mask.to(original_inputs.dtype) * original_inputs
+ (~binary_mask).to(original_inputs.dtype) * kwargs["baselines"]
)
else:
binary_mask = tuple(
curr_sample[0][feature_mask[j]].bool() for j in range(len(feature_mask))
)
return tuple(
binary_mask[j].to(original_inputs[j].dtype) * original_inputs[j]
+ (~binary_mask[j]).to(original_inputs[j].dtype) * kwargs["baselines"][j]
for j in range(len(feature_mask))
)
def get_exp_kernel_similarity_function(
distance_mode: str = "cosine", kernel_width: float = 1.0
) -> Callable:
r"""
This method constructs an appropriate similarity function to compute
weights for perturbed sample in LIME. Distance between the original
and perturbed inputs is computed based on the provided distance mode,
and the distance is passed through an exponential kernel with given
kernel width to convert to a range between 0 and 1.
The callable returned can be provided as the similarity_fn for
Lime or LimeBase.
Args:
distance_mode (str, optional): Distance mode can be either "cosine" or
"euclidean" corresponding to either cosine distance
or Euclidean distance respectively. Distance is computed
by flattening the original inputs and perturbed inputs
(concatenating tuples of inputs if necessary) and computing
distances between the resulting vectors.
Default: "cosine"
kernel_width (float, optional):
Kernel width for exponential kernel applied to distance.
Default: 1.0
Returns:
*Callable*:
- **similarity_fn** (*Callable*):
Similarity function. This callable can be provided as the
similarity_fn for Lime or LimeBase.
"""
def default_exp_kernel(original_inp, perturbed_inp, __, **kwargs):
flattened_original_inp = _flatten_tensor_or_tuple(original_inp).float()
flattened_perturbed_inp = _flatten_tensor_or_tuple(perturbed_inp).float()
if distance_mode == "cosine":
cos_sim = CosineSimilarity(dim=0)
distance = 1 - cos_sim(flattened_original_inp, flattened_perturbed_inp)
elif distance_mode == "euclidean":
distance = torch.norm(flattened_original_inp - flattened_perturbed_inp)
else:
raise ValueError("distance_mode must be either cosine or euclidean.")
return math.exp(-1 * (distance**2) / (2 * (kernel_width**2)))
return default_exp_kernel
def default_perturb_func(original_inp, **kwargs):
assert (
"num_interp_features" in kwargs
), "Must provide num_interp_features to use default interpretable sampling function"
if isinstance(original_inp, Tensor):
device = original_inp.device
else:
device = original_inp[0].device
probs = torch.ones(1, kwargs["num_interp_features"]) * 0.5
return torch.bernoulli(probs).to(device=device).long()
def construct_feature_mask(feature_mask, formatted_inputs):
if feature_mask is None:
feature_mask, num_interp_features = _construct_default_feature_mask(
formatted_inputs
)
else:
feature_mask = _format_tensor_into_tuples(feature_mask)
min_interp_features = int(
min(
torch.min(single_mask).item()
for single_mask in feature_mask
if single_mask.numel()
)
)
if min_interp_features != 0:
warnings.warn(
"Minimum element in feature mask is not 0, shifting indices to"
" start at 0."
)
feature_mask = tuple(
single_mask - min_interp_features for single_mask in feature_mask
)
num_interp_features = _get_max_feature_index(feature_mask) + 1
return feature_mask, num_interp_features
class Lime(LimeBase):
r"""
Lime is an interpretability method that trains an interpretable surrogate model
by sampling points around a specified input example and using model evaluations
at these points to train a simpler interpretable 'surrogate' model, such as a
linear model.
Lime provides a more specific implementation than LimeBase in order to expose
a consistent API with other perturbation-based algorithms. For more general
use of the LIME framework, consider using the LimeBase class directly and
defining custom sampling and transformation to / from interpretable
representation functions.
Lime assumes that the interpretable representation is a binary vector,
corresponding to some elements in the input being set to their baseline value
if the corresponding binary interpretable feature value is 0 or being set
to the original input value if the corresponding binary interpretable
feature value is 1. Input values can be grouped to correspond to the same
binary interpretable feature using a feature mask provided when calling
attribute, similar to other perturbation-based attribution methods.
One example of this setting is when applying Lime to an image classifier.
Pixels in an image can be grouped into super-pixels or segments, which
correspond to interpretable features, provided as a feature_mask when
calling attribute. Sampled binary vectors convey whether a super-pixel
is on (retains the original input values) or off (set to the corresponding
baseline value, e.g. black image). An interpretable linear model is trained
with input being the binary vectors and outputs as the corresponding scores
of the image classifier with the appropriate super-pixels masked based on the
binary vector. Coefficients of the trained surrogate
linear model convey the importance of each super-pixel.
More details regarding LIME can be found in the original paper:
https://arxiv.org/abs/1602.04938
"""
def __init__(
self,
forward_func: Callable,
interpretable_model: Optional[Model] = None,
similarity_func: Optional[Callable] = None,
perturb_func: Optional[Callable] = None,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
interpretable_model (Model, optional): Model object to train
interpretable model.
This argument is optional and defaults to SkLearnLasso(alpha=0.01),
which is a wrapper around the Lasso linear model in SkLearn.
This requires having sklearn version >= 0.23 available.
Other predefined interpretable linear models are provided in
captum._utils.models.linear_model.
Alternatively, a custom model object must provide a `fit` method to
train the model, given a dataloader, with batches containing
three tensors:
- interpretable_inputs: Tensor
[2D num_samples x num_interp_features],
- expected_outputs: Tensor [1D num_samples],
- weights: Tensor [1D num_samples]
The model object must also provide a `representation` method to
access the appropriate coefficients or representation of the
interpretable model after fitting.
Note that calling fit multiple times should retrain the
interpretable model, each attribution call reuses
the same given interpretable model object.
similarity_func (Callable, optional): Function which takes a single sample
along with its corresponding interpretable representation
and returns the weight of the interpretable sample for
training the interpretable model.
This is often referred to as a similarity kernel.
This argument is optional and defaults to a function which
applies an exponential kernel to the cosine distance between
the original input and perturbed input, with a kernel width
of 1.0.
A similarity function applying an exponential
kernel to cosine / euclidean distances can be constructed
using the provided get_exp_kernel_similarity_function in
captum.attr._core.lime.
Alternately, a custom callable can also be provided.
The expected signature of this callable is:
>>> def similarity_func(
>>> original_input: Tensor or tuple[Tensor, ...],
>>> perturbed_input: Tensor or tuple[Tensor, ...],
>>> perturbed_interpretable_input:
>>> Tensor [2D 1 x num_interp_features],
>>> **kwargs: Any
>>> ) -> float or Tensor containing float scalar
perturbed_input and original_input will be the same type and
contain tensors of the same shape, with original_input
being the same as the input provided when calling attribute.
kwargs includes baselines, feature_mask, num_interp_features
(integer, determined from feature mask).
perturb_func (Callable, optional): Function which returns a single
sampled input, which is a binary vector of length
num_interp_features, or a generator of such tensors.
This function is optional, the default function returns
a binary vector where each element is selected
independently and uniformly at random. Custom
logic for selecting sampled binary vectors can
be implemented by providing a function with the
following expected signature:
>>> perturb_func(
>>> original_input: Tensor or tuple[Tensor, ...],
>>> **kwargs: Any
>>> ) -> Tensor [Binary 2D Tensor 1 x num_interp_features]
>>> or generator yielding such tensors
kwargs includes baselines, feature_mask, num_interp_features
(integer, determined from feature mask).
"""
if interpretable_model is None:
interpretable_model = SkLearnLasso(alpha=0.01)
if similarity_func is None:
similarity_func = get_exp_kernel_similarity_function()
if perturb_func is None:
perturb_func = default_perturb_func
LimeBase.__init__(
self,
forward_func,
interpretable_model,
similarity_func,
perturb_func,
True,
default_from_interp_rep_transform,
None,
)
@log_usage()
def attribute( # type: ignore
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
feature_mask: Union[None, Tensor, Tuple[Tensor, ...]] = None,
n_samples: int = 25,
perturbations_per_eval: int = 1,
return_input_shape: bool = True,
show_progress: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
This method attributes the output of the model with given target index
(in case it is provided, otherwise it assumes that output is a
scalar) to the inputs of the model using the approach described above,
training an interpretable model and returning a representation of the
interpretable model.
It is recommended to only provide a single example as input (tensors
with first dimension or batch size = 1). This is because LIME is generally
used for sample-based interpretability, training a separate interpretable
model to explain a model's prediction on each individual example.
A batch of inputs can also be provided as inputs, similar to
other perturbation-based attribution methods. In this case, if forward_fn
returns a scalar per example, attributions will be computed for each
example independently, with a separate interpretable model trained for each
example. Note that provided similarity and perturbation functions will be
provided each example separately (first dimension = 1) in this case.
If forward_fn returns a scalar per batch (e.g. loss), attributions will
still be computed using a single interpretable model for the full batch.
In this case, similarity and perturbation functions will be provided the
same original input containing the full batch.
The number of interpretable features is determined from the provided
feature mask, or if none is provided, from the default feature mask,
which considers each scalar input as a separate feature. It is
generally recommended to provide a feature mask which groups features
into a small number of interpretable features / components (e.g.
superpixels in images).
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which LIME
is computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define reference value which replaces each
feature when the corresponding interpretable feature
is set to 0.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which surrogate model is trained
(for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. It will be
repeated for each of `n_steps` along the integrated
path. For all other types, the given argument is used
for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
feature_mask (Tensor or tuple[Tensor, ...], optional):
feature_mask defines a mask for the input, grouping
features which correspond to the same
interpretable feature. feature_mask
should contain the same number of tensors as inputs.
Each tensor should
be the same size as the corresponding input or
broadcastable to match the input tensor. Values across
all tensors should be integers in the range 0 to
num_interp_features - 1, and indices corresponding to the
same feature should have the same value.
Note that features are grouped across tensors
(unlike feature ablation and occlusion), so
if the same index is used in different tensors, those
features are still grouped and added simultaneously.
If None, then a feature mask is constructed which assigns
each scalar within a tensor as a separate feature.
Default: None
n_samples (int, optional): The number of samples of the original
model used to train the surrogate interpretable model.
Default: `50` if `n_samples` is not provided.
perturbations_per_eval (int, optional): Allows multiple samples
to be processed simultaneously in one call to forward_fn.
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
If the forward function returns a single scalar per batch,
perturbations_per_eval must be set to 1.
Default: 1
return_input_shape (bool, optional): Determines whether the returned
tensor(s) only contain the coefficients for each interp-
retable feature from the trained surrogate model, or
whether the returned attributions match the input shape.
When return_input_shape is True, the return type of attribute
matches the input shape, with each element containing the
coefficient of the corresponding interpretale feature.
All elements with the same value in the feature mask
will contain the same coefficient in the returned
attributions. If return_input_shape is False, a 1D
tensor is returned, containing only the coefficients
of the trained interpreatable models, with length
num_interp_features.
show_progress (bool, optional): Displays the progress of computation.
It will try to use tqdm if available for advanced features
(e.g. time estimation). Otherwise, it will fallback to
a simple output of progress.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The attributions with respect to each input feature.
If return_input_shape = True, attributions will be
the same size as the provided inputs, with each value
providing the coefficient of the corresponding
interpretale feature.
If return_input_shape is False, a 1D
tensor is returned, containing only the coefficients
of the trained interpreatable models, with length
num_interp_features.
Examples::
>>> # SimpleClassifier takes a single input tensor of size Nx4x4,
>>> # and returns an Nx3 tensor of class probabilities.
>>> net = SimpleClassifier()
>>> # Generating random input with size 1 x 4 x 4
>>> input = torch.randn(1, 4, 4)
>>> # Defining Lime interpreter
>>> lime = Lime(net)
>>> # Computes attribution, with each of the 4 x 4 = 16
>>> # features as a separate interpretable feature
>>> attr = lime.attribute(input, target=1, n_samples=200)
>>> # Alternatively, we can group each 2x2 square of the inputs
>>> # as one 'interpretable' feature and perturb them together.
>>> # This can be done by creating a feature mask as follows, which
>>> # defines the feature groups, e.g.:
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # With this mask, all inputs with the same value are set to their
>>> # baseline value, when the corresponding binary interpretable
>>> # feature is set to 0.
>>> # The attributions can be calculated as follows:
>>> # feature mask has dimensions 1 x 4 x 4
>>> feature_mask = torch.tensor([[[0,0,1,1],[0,0,1,1],
>>> [2,2,3,3],[2,2,3,3]]])
>>> # Computes interpretable model and returning attributions
>>> # matching input shape.
>>> attr = lime.attribute(input, target=1, feature_mask=feature_mask)
"""
return self._attribute_kwargs(
inputs=inputs,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
feature_mask=feature_mask,
n_samples=n_samples,
perturbations_per_eval=perturbations_per_eval,
return_input_shape=return_input_shape,
show_progress=show_progress,
)
def _attribute_kwargs( # type: ignore
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
feature_mask: Union[None, Tensor, Tuple[Tensor, ...]] = None,
n_samples: int = 25,
perturbations_per_eval: int = 1,
return_input_shape: bool = True,
show_progress: bool = False,
**kwargs,
) -> TensorOrTupleOfTensorsGeneric:
is_inputs_tuple = _is_tuple(inputs)
formatted_inputs, baselines = _format_input_baseline(inputs, baselines)
bsz = formatted_inputs[0].shape[0]
feature_mask, num_interp_features = construct_feature_mask(
feature_mask, formatted_inputs
)
if num_interp_features > 10000:
warnings.warn(
"Attempting to construct interpretable model with > 10000 features."
"This can be very slow or lead to OOM issues. Please provide a feature"
"mask which groups input features to reduce the number of interpretable"
"features. "
)
coefs: Tensor
if bsz > 1:
test_output = _run_forward(
self.forward_func, inputs, target, additional_forward_args
)
if isinstance(test_output, Tensor) and torch.numel(test_output) > 1:
if torch.numel(test_output) == bsz:
warnings.warn(
"You are providing multiple inputs for Lime / Kernel SHAP "
"attributions. This trains a separate interpretable model "
"for each example, which can be time consuming. It is "
"recommended to compute attributions for one example at a time."
)
output_list = []
for (
curr_inps,
curr_target,
curr_additional_args,
curr_baselines,
curr_feature_mask,
) in _batch_example_iterator(
bsz,
formatted_inputs,
target,
additional_forward_args,
baselines,
feature_mask,
):
coefs = super().attribute.__wrapped__(
self,
inputs=curr_inps if is_inputs_tuple else curr_inps[0],
target=curr_target,
additional_forward_args=curr_additional_args,
n_samples=n_samples,
perturbations_per_eval=perturbations_per_eval,
baselines=curr_baselines
if is_inputs_tuple
else curr_baselines[0],
feature_mask=curr_feature_mask
if is_inputs_tuple
else curr_feature_mask[0],
num_interp_features=num_interp_features,
show_progress=show_progress,
**kwargs,
)
if return_input_shape:
output_list.append(
self._convert_output_shape(
curr_inps,
curr_feature_mask,
coefs,
num_interp_features,
is_inputs_tuple,
)
)
else:
output_list.append(coefs.reshape(1, -1)) # type: ignore
return _reduce_list(output_list)
else:
raise AssertionError(
"Invalid number of outputs, forward function should return a"
"scalar per example or a scalar per input batch."
)
else:
assert perturbations_per_eval == 1, (
"Perturbations per eval must be 1 when forward function"
"returns single value per batch!"
)
coefs = super().attribute.__wrapped__(
self,
inputs=inputs,
target=target,
additional_forward_args=additional_forward_args,
n_samples=n_samples,
perturbations_per_eval=perturbations_per_eval,
baselines=baselines if is_inputs_tuple else baselines[0],
feature_mask=feature_mask if is_inputs_tuple else feature_mask[0],
num_interp_features=num_interp_features,
show_progress=show_progress,
**kwargs,
)
if return_input_shape:
return self._convert_output_shape(
formatted_inputs,
feature_mask,
coefs,
num_interp_features,
is_inputs_tuple,
)
else:
return coefs
@typing.overload
def _convert_output_shape(
self,
formatted_inp: Tuple[Tensor, ...],
feature_mask: Tuple[Tensor, ...],
coefs: Tensor,
num_interp_features: int,
is_inputs_tuple: Literal[True],
) -> Tuple[Tensor, ...]:
...
@typing.overload
def _convert_output_shape(
self,
formatted_inp: Tuple[Tensor, ...],
feature_mask: Tuple[Tensor, ...],
coefs: Tensor,
num_interp_features: int,
is_inputs_tuple: Literal[False],
) -> Tensor:
...
def _convert_output_shape(
self,
formatted_inp: Tuple[Tensor, ...],
feature_mask: Tuple[Tensor, ...],
coefs: Tensor,
num_interp_features: int,
is_inputs_tuple: bool,
) -> Union[Tensor, Tuple[Tensor, ...]]:
coefs = coefs.flatten()
attr = [
torch.zeros_like(single_inp, dtype=torch.float)
for single_inp in formatted_inp
]
for tensor_ind in range(len(formatted_inp)):
for single_feature in range(num_interp_features):
attr[tensor_ind] += (
coefs[single_feature].item()
* (feature_mask[tensor_ind] == single_feature).float()
)
return _format_output(is_inputs_tuple, tuple(attr))
|
#!/usr/bin/env python3
from enum import Enum
from typing import Any, cast, List, Tuple, Union
import torch
from captum._utils.common import (
_expand_and_update_additional_forward_args,
_expand_and_update_baselines,
_expand_and_update_feature_mask,
_expand_and_update_target,
_format_output,
_format_tensor_into_tuples,
_is_tuple,
)
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._utils.attribution import Attribution, GradientAttribution
from captum.attr._utils.common import _validate_noise_tunnel_type
from captum.log import log_usage
from torch import Tensor
class NoiseTunnelType(Enum):
smoothgrad = 1
smoothgrad_sq = 2
vargrad = 3
SUPPORTED_NOISE_TUNNEL_TYPES = list(NoiseTunnelType.__members__.keys())
class NoiseTunnel(Attribution):
r"""
Adds gaussian noise to each input in the batch `nt_samples` times
and applies the given attribution algorithm to each of the samples.
The attributions of the samples are combined based on the given noise
tunnel type (nt_type):
If nt_type is `smoothgrad`, the mean of the sampled attributions is
returned. This approximates smoothing the given attribution method
with a Gaussian Kernel.
If nt_type is `smoothgrad_sq`, the mean of the squared sample attributions
is returned.
If nt_type is `vargrad`, the variance of the sample attributions is
returned.
More details about adding noise can be found in the following papers:
* https://arxiv.org/abs/1810.03292
* https://arxiv.org/abs/1810.03307
* https://arxiv.org/abs/1706.03825
* https://arxiv.org/abs/1806.10758
This method currently also supports batches of multiple examples input,
however it can be computationally expensive depending on the model,
the dimensionality of the data and execution environment.
It is assumed that the batch size is the first dimension of input tensors.
"""
def __init__(self, attribution_method: Attribution) -> None:
r"""
Args:
attribution_method (Attribution): An instance of any attribution algorithm
of type `Attribution`. E.g. Integrated Gradients,
Conductance or Saliency.
"""
self.attribution_method = attribution_method
self.is_delta_supported = self.attribution_method.has_convergence_delta()
self._multiply_by_inputs = self.attribution_method.multiplies_by_inputs
self.is_gradient_method = isinstance(
self.attribution_method, GradientAttribution
)
Attribution.__init__(self, self.attribution_method.forward_func)
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
@log_usage()
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
nt_type: str = "smoothgrad",
nt_samples: int = 5,
nt_samples_batch_size: int = None,
stdevs: Union[float, Tuple[float, ...]] = 1.0,
draw_baseline_from_distrib: bool = False,
**kwargs: Any,
) -> Union[
Union[
Tensor,
Tuple[Tensor, Tensor],
Tuple[Tensor, ...],
Tuple[Tuple[Tensor, ...], Tensor],
]
]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which integrated
gradients are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
nt_type (str, optional): Smoothing type of the attributions.
`smoothgrad`, `smoothgrad_sq` or `vargrad`
Default: `smoothgrad` if `type` is not provided.
nt_samples (int, optional): The number of randomly generated examples
per sample in the input batch. Random examples are
generated by adding gaussian random noise to each sample.
Default: `5` if `nt_samples` is not provided.
nt_samples_batch_size (int, optional): The number of the `nt_samples`
that will be processed together. With the help
of this parameter we can avoid out of memory situation and
reduce the number of randomly generated examples per sample
in each batch.
Default: None if `nt_samples_batch_size` is not provided. In
this case all `nt_samples` will be processed together.
stdevs (float or tuple of float, optional): The standard deviation
of gaussian noise with zero mean that is added to each
input in the batch. If `stdevs` is a single float value
then that same value is used for all inputs. If it is
a tuple, then it must have the same length as the inputs
tuple. In this case, each stdev value in the stdevs tuple
corresponds to the input with the same index in the inputs
tuple.
Default: `1.0` if `stdevs` is not provided.
draw_baseline_from_distrib (bool, optional): Indicates whether to
randomly draw baseline samples from the `baselines`
distribution provided as an input tensor.
Default: False
**kwargs (Any, optional): Contains a list of arguments that are passed
to `attribution_method` attribution algorithm.
Any additional arguments that should be used for the
chosen attribution method should be included here.
For instance, such arguments include
`additional_forward_args` and `baselines`.
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attribution with
respect to each input feature. attributions will always be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
- **delta** (*float*, returned if return_convergence_delta=True):
Approximation error computed by the
attribution algorithm. Not all attribution algorithms
return delta value. It is computed only for some
algorithms, e.g. integrated gradients.
Delta is computed for each input in the batch
and represents the arithmetic mean
across all `nt_samples` perturbed tensors for that input.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> ig = IntegratedGradients(net)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Creates noise tunnel
>>> nt = NoiseTunnel(ig)
>>> # Generates 10 perturbed input tensors per image.
>>> # Computes integrated gradients for class 3 for each generated
>>> # input and averages attributions across all 10
>>> # perturbed inputs per image
>>> attribution = nt.attribute(input, nt_type='smoothgrad',
>>> nt_samples=10, target=3)
"""
def add_noise_to_inputs(nt_samples_partition: int) -> Tuple[Tensor, ...]:
if isinstance(stdevs, tuple):
assert len(stdevs) == len(inputs), (
"The number of input tensors "
"in {} must be equal to the number of stdevs values {}".format(
len(inputs), len(stdevs)
)
)
else:
assert isinstance(
stdevs, float
), "stdevs must be type float. " "Given: {}".format(type(stdevs))
stdevs_ = (stdevs,) * len(inputs)
return tuple(
add_noise_to_input(input, stdev, nt_samples_partition).requires_grad_()
if self.is_gradient_method
else add_noise_to_input(input, stdev, nt_samples_partition)
for (input, stdev) in zip(inputs, stdevs_)
)
def add_noise_to_input(
input: Tensor, stdev: float, nt_samples_partition: int
) -> Tensor:
# batch size
bsz = input.shape[0]
# expand input size by the number of drawn samples
input_expanded_size = (bsz * nt_samples_partition,) + input.shape[1:]
# expand stdev for the shape of the input and number of drawn samples
stdev_expanded = torch.tensor(stdev, device=input.device).repeat(
input_expanded_size
)
# draws `np.prod(input_expanded_size)` samples from normal distribution
# with given input parametrization
# FIXME it look like it is very difficult to make torch.normal
# deterministic this needs an investigation
noise = torch.normal(0, stdev_expanded)
return input.repeat_interleave(nt_samples_partition, dim=0) + noise
def update_sum_attribution_and_sq(
sum_attribution: List[Tensor],
sum_attribution_sq: List[Tensor],
attribution: Tensor,
i: int,
nt_samples_batch_size_inter: int,
) -> None:
bsz = attribution.shape[0] // nt_samples_batch_size_inter
attribution_shape = cast(
Tuple[int, ...], (bsz, nt_samples_batch_size_inter)
)
if len(attribution.shape) > 1:
attribution_shape += cast(Tuple[int, ...], tuple(attribution.shape[1:]))
attribution = attribution.view(attribution_shape)
current_attribution_sum = attribution.sum(dim=1, keepdim=False)
current_attribution_sq = torch.sum(attribution**2, dim=1, keepdim=False)
sum_attribution[i] = (
current_attribution_sum
if not isinstance(sum_attribution[i], torch.Tensor)
else sum_attribution[i] + current_attribution_sum
)
sum_attribution_sq[i] = (
current_attribution_sq
if not isinstance(sum_attribution_sq[i], torch.Tensor)
else sum_attribution_sq[i] + current_attribution_sq
)
def compute_partial_attribution(
inputs_with_noise_partition: Tuple[Tensor, ...], kwargs_partition: Any
) -> Tuple[Tuple[Tensor, ...], bool, Union[None, Tensor]]:
# smoothgrad_Attr(x) = 1 / n * sum(Attr(x + N(0, sigma^2))
# NOTE: using __wrapped__ such that it does not log the inner logs
attributions = attr_func.__wrapped__( # type: ignore
self.attribution_method, # self
inputs_with_noise_partition
if is_inputs_tuple
else inputs_with_noise_partition[0],
**kwargs_partition,
)
delta = None
if self.is_delta_supported and return_convergence_delta:
attributions, delta = attributions
is_attrib_tuple = _is_tuple(attributions)
attributions = _format_tensor_into_tuples(attributions)
return (
cast(Tuple[Tensor, ...], attributions),
cast(bool, is_attrib_tuple),
delta,
)
def expand_partial(nt_samples_partition: int, kwargs_partial: dict) -> None:
# if the algorithm supports targets, baselines and/or
# additional_forward_args they will be expanded based
# on the nt_samples_partition and corresponding kwargs
# variables will be updated accordingly
_expand_and_update_additional_forward_args(
nt_samples_partition, kwargs_partial
)
_expand_and_update_target(nt_samples_partition, kwargs_partial)
_expand_and_update_baselines(
cast(Tuple[Tensor, ...], inputs),
nt_samples_partition,
kwargs_partial,
draw_baseline_from_distrib=draw_baseline_from_distrib,
)
_expand_and_update_feature_mask(nt_samples_partition, kwargs_partial)
def compute_smoothing(
expected_attributions: Tuple[Union[Tensor], ...],
expected_attributions_sq: Tuple[Union[Tensor], ...],
) -> Tuple[Tensor, ...]:
if NoiseTunnelType[nt_type] == NoiseTunnelType.smoothgrad:
return expected_attributions
if NoiseTunnelType[nt_type] == NoiseTunnelType.smoothgrad_sq:
return expected_attributions_sq
vargrad = tuple(
expected_attribution_sq - expected_attribution * expected_attribution
for expected_attribution, expected_attribution_sq in zip(
expected_attributions, expected_attributions_sq
)
)
return cast(Tuple[Tensor, ...], vargrad)
def update_partial_attribution_and_delta(
attributions_partial: Tuple[Tensor, ...],
delta_partial: Tensor,
sum_attributions: List[Tensor],
sum_attributions_sq: List[Tensor],
delta_partial_list: List[Tensor],
nt_samples_partial: int,
) -> None:
for i, attribution_partial in enumerate(attributions_partial):
update_sum_attribution_and_sq(
sum_attributions,
sum_attributions_sq,
attribution_partial,
i,
nt_samples_partial,
)
if self.is_delta_supported and return_convergence_delta:
delta_partial_list.append(delta_partial)
return_convergence_delta: bool
return_convergence_delta = (
"return_convergence_delta" in kwargs and kwargs["return_convergence_delta"]
)
with torch.no_grad():
nt_samples_batch_size = (
nt_samples
if nt_samples_batch_size is None
else min(nt_samples, nt_samples_batch_size)
)
nt_samples_partition = nt_samples // nt_samples_batch_size
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = isinstance(inputs, tuple)
inputs = _format_tensor_into_tuples(inputs) # type: ignore
_validate_noise_tunnel_type(nt_type, SUPPORTED_NOISE_TUNNEL_TYPES)
kwargs_copy = kwargs.copy()
expand_partial(nt_samples_batch_size, kwargs_copy)
attr_func = self.attribution_method.attribute
sum_attributions: List[Union[None, Tensor]] = []
sum_attributions_sq: List[Union[None, Tensor]] = []
delta_partial_list: List[Tensor] = []
for _ in range(nt_samples_partition):
inputs_with_noise = add_noise_to_inputs(nt_samples_batch_size)
(
attributions_partial,
is_attrib_tuple,
delta_partial,
) = compute_partial_attribution(inputs_with_noise, kwargs_copy)
if len(sum_attributions) == 0:
sum_attributions = [None] * len(attributions_partial)
sum_attributions_sq = [None] * len(attributions_partial)
update_partial_attribution_and_delta(
cast(Tuple[Tensor, ...], attributions_partial),
cast(Tensor, delta_partial),
cast(List[Tensor], sum_attributions),
cast(List[Tensor], sum_attributions_sq),
delta_partial_list,
nt_samples_batch_size,
)
nt_samples_remaining = (
nt_samples - nt_samples_partition * nt_samples_batch_size
)
if nt_samples_remaining > 0:
inputs_with_noise = add_noise_to_inputs(nt_samples_remaining)
expand_partial(nt_samples_remaining, kwargs)
(
attributions_partial,
is_attrib_tuple,
delta_partial,
) = compute_partial_attribution(inputs_with_noise, kwargs)
update_partial_attribution_and_delta(
cast(Tuple[Tensor, ...], attributions_partial),
cast(Tensor, delta_partial),
cast(List[Tensor], sum_attributions),
cast(List[Tensor], sum_attributions_sq),
delta_partial_list,
nt_samples_remaining,
)
expected_attributions = tuple(
[
cast(Tensor, sum_attribution) * 1 / nt_samples
for sum_attribution in sum_attributions
]
)
expected_attributions_sq = tuple(
[
cast(Tensor, sum_attribution_sq) * 1 / nt_samples
for sum_attribution_sq in sum_attributions_sq
]
)
attributions = compute_smoothing(
cast(Tuple[Tensor, ...], expected_attributions),
cast(Tuple[Tensor, ...], expected_attributions_sq),
)
delta = None
if self.is_delta_supported and return_convergence_delta:
delta = torch.cat(delta_partial_list, dim=0)
return self._apply_checks_and_return_attributions(
attributions, is_attrib_tuple, return_convergence_delta, delta
)
def _apply_checks_and_return_attributions(
self,
attributions: Tuple[Tensor, ...],
is_attrib_tuple: bool,
return_convergence_delta: bool,
delta: Union[None, Tensor],
) -> Union[
TensorOrTupleOfTensorsGeneric, Tuple[TensorOrTupleOfTensorsGeneric, Tensor]
]:
attributions = _format_output(is_attrib_tuple, attributions)
ret = (
(attributions, cast(Tensor, delta))
if self.is_delta_supported and return_convergence_delta
else attributions
)
ret = cast(
Union[
TensorOrTupleOfTensorsGeneric,
Tuple[TensorOrTupleOfTensorsGeneric, Tensor],
],
ret,
)
return ret
def has_convergence_delta(self) -> bool:
return self.is_delta_supported
|
#!/usr/bin/env python3
from collections import defaultdict
from copy import copy
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import torch
from captum._utils.common import (
_format_baseline,
_format_feature_mask,
_format_output,
_format_tensor_into_tuples,
_get_max_feature_index,
_run_forward,
)
from captum._utils.typing import BaselineType
from captum.attr._core.feature_ablation import FeatureAblation
from captum.attr._utils.attribution import Attribution
from torch import Tensor
class InputRole:
need_attr = 0
need_forward = 1
no_forward = 2
SUPPORTED_METHODS = {FeatureAblation}
# default reducer wehn reduce is None. Simply concat the outputs by the batch dimension
def _concat_tensors(accum, cur_output, _):
return cur_output if accum is None else torch.cat([accum, cur_output])
def _create_perturbation_mask(
perturbed_feature_indices: Tensor, # 1D tensor of one-hot feature indices
feature_mask: Tuple[Tensor, ...],
feature_idx_to_mask_idx: Dict[int, List[int]],
) -> Tuple[Union[Tensor, None], ...]:
"""
Create binary mask for inputs based on perturbed one-hot feature indices
Use None if no perturbation is needed for the corresponding input
"""
# a set of input/mask indices that need perturbation
perturbation_mask_indices = set()
for i, v in enumerate(perturbed_feature_indices.tolist()):
# value 0 means the feature has been perturbed
if not v:
perturbation_mask_indices |= set(feature_idx_to_mask_idx[i])
# create binary mask for inputs & set it to None if no perturbation is needed
perturbation_mask = tuple(
perturbed_feature_indices[mask_elem] if i in perturbation_mask_indices else None
for i, mask_elem in enumerate(feature_mask)
)
return perturbation_mask
def _perturb_inputs(
inputs: Iterable[Any],
input_roles: Tuple[int],
baselines: Tuple[Union[int, float, Tensor], ...],
perturbation_mask: Tuple[Union[Tensor, None], ...],
) -> Tuple[Any, ...]:
"""
Perturb inputs based on perturbation mask and baselines
"""
perturbed_inputs = []
attr_inp_count = 0
for inp, role in zip(inputs, input_roles):
if role != InputRole.need_attr:
perturbed_inputs.append(inp)
continue
pert_mask = perturbation_mask[attr_inp_count]
# no perturbation is needed for this input
if pert_mask is None:
perturbed_inputs.append(inp)
else:
baseline = baselines[attr_inp_count]
perturbed_inp = inp * pert_mask + baseline * (1 - pert_mask)
perturbed_inputs.append(perturbed_inp)
attr_inp_count += 1
perturbed_inputs = tuple(perturbed_inputs)
return perturbed_inputs
def _convert_output_shape(
unique_attr: Tensor,
attr_inputs: Tuple[Tensor, ...],
feature_mask: Tuple[Tensor, ...],
) -> Tuple[Tensor, ...]:
"""
Convert the shape of a single tensor of unique feature attributionto
to match the shape of the inputs returned by dataloader
"""
# unique_attr in shape(*output_dims, n_features)
output_dims = unique_attr.shape[:-1]
n_features = unique_attr.shape[-1]
attr = []
for inp, mask in zip(attr_inputs, feature_mask):
# input in shape(batch_size, *inp_feature_dims)
# attribute in shape(*output_dims, *inp_feature_dims)
attr_shape = (*output_dims, *inp.shape[1:])
expanded_feature_indices = mask.expand(attr_shape)
if len(inp.shape) > 2:
# exclude batch_size & last of actual value
extra_inp_dims = list(inp.shape[1:-1])
# unsqueeze unqiue_attr to have same number of dims as inp
# (*output_dims, 1..., 1, n_features)
# then broadcast to (*output_dims, *inp.shape[1:-1], n_features)
n_extra_dims = len(extra_inp_dims)
unsqueezed_shape = (*output_dims, *(1,) * n_extra_dims, n_features)
expanded_shape = (*output_dims, *extra_inp_dims, n_features)
expanded_unqiue_attr = unique_attr.reshape(unsqueezed_shape).expand(
expanded_shape
)
else:
expanded_unqiue_attr = unique_attr
# gather from (*output_dims, *inp.shape[1:-1], n_features)
inp_attr = torch.gather(expanded_unqiue_attr, -1, expanded_feature_indices)
attr.append(inp_attr)
return tuple(attr)
class DataLoaderAttribution(Attribution):
r"""
Decorate a perturbation-based attribution algorthm to make it work with dataloaders.
The decorated instance will calculate attribution in the
same way as configured in the original attribution instance, but it will provide a
new "attribute" function which accepts a pytorch "dataloader" instance as the input
instead of a single batched "tensor" and supports customizing a "reduce" function to
determine how the forward return of each iteration of the dataloader should be
aggregated to single metric tensor to attribute. This would
be specially useful to attribute against some corpus-wise metrics,
e.g., Precision & Recall.
"""
def __init__(self, attr_method: Attribution) -> None:
r"""
Args:
attr_method (Attribution): An instance of any attribution algorithm
of type `Attribution`. E.g. Integrated Gradients,
Conductance or Saliency.
"""
assert (
type(attr_method) in SUPPORTED_METHODS
), f"DataloaderAttribution does not support {type(attr_method)}"
super().__init__(attr_method.forward_func)
# shallow copy is enough to avoid modifying original instance
self.attr_method = copy(attr_method)
self.attr_method.forward_func = self._forward_with_dataloader
def _forward_with_dataloader(
self,
batched_perturbed_feature_indices: Tensor,
dataloader: torch.utils.data.DataLoader,
input_roles: Tuple[int],
baselines: Tuple[Union[int, float, Tensor], ...],
feature_mask: Tuple[Tensor, ...],
reduce: Callable,
to_metric: Optional[Callable],
show_progress: bool,
feature_idx_to_mask_idx: Dict[int, List[int]],
):
"""
Wrapper of the original given forward_func to be used in the attribution method
It iterates over the dataloader with the given forward_func
"""
# batched_perturbed_feature_indices in shape(n_perturb, n_features)
# n_perturb is not always the same as perturb_per_pass if not enough perturb
perturbation_mask_list: List[Tuple[Union[Tensor, None], ...]] = [
_create_perturbation_mask(
perturbed_feature_indices,
feature_mask,
feature_idx_to_mask_idx,
)
for perturbed_feature_indices in batched_perturbed_feature_indices
]
# each perturbation needs an accum state
accum_states = [None for _ in range(len(perturbation_mask_list))]
# tranverse the dataloader
for inputs in dataloader:
# for each batch read from the dataloader,
# apply every perturbation based on perturbations_per_pass
for i, perturbation_mask in enumerate(perturbation_mask_list):
perturbed_inputs = _perturb_inputs(
inputs, input_roles, baselines, perturbation_mask
)
# due to explicitly defined roles
# we can keep inputs in their original order
# regardless of if they need attr
# instead of using additional_forward_inputs
forward_inputs = tuple(
_
for _, role in zip(perturbed_inputs, input_roles)
if role != InputRole.no_forward
)
output = _run_forward(
self.forward_func,
forward_inputs,
)
accum_states[i] = reduce(accum_states[i], output, perturbed_inputs)
accum_results = [
to_metric(accum) if to_metric else accum for accum in accum_states
]
assert all(type(r) is Tensor for r in accum_results), (
"Accumulated metrics for attribution must be a Tensor,"
f"received: {next(r for r in accum_results if type(r) is not Tensor)}"
)
# shape(n_perturb * output_dims[0], *output_dims[1:])
# the underneath attr method needs to support forward_func output's
# 1st dim to grow with perturb_per_eval
batched_accum = torch.stack(accum_results, dim=0)
return batched_accum
def attribute(
self,
dataloader: torch.utils.data.DataLoader,
input_roles: Optional[Tuple[int, ...]] = None,
baselines: BaselineType = None,
feature_mask: Union[None, Tensor, Tuple[Tensor, ...]] = None,
reduce: Optional[Callable] = None,
to_metric: Optional[Callable] = None,
perturbations_per_pass: int = 1,
show_progress: bool = False,
return_input_shape: bool = True,
) -> Union[Tensor, Tuple[Tensor, ...]]:
r"""
Args:
dataloader (torch.Dataloader): the dataloader to attribute, which should
return a tuple of consistant size for every iteration
input_roles (tuple[int, ...], optional): a tuple of integers to define the
role of each element returned from the dataloader. It should
have the same size as the return of the dataloader.
The available roles are:
0: the element is passed to forward_func and needs attribution.
It must be a tensor.
1: the element is excluded for forward_func. A typical example
is the label.
2: the element is passed to forward_func but does not need
attribution. Like additional_forward_args
baselines (Union[Tensor, tuple[Tensor, ...]], optional): same as the
baseline in attribute. The same baseline will be
applied to the entire dataloader. The first dimension is
assumed to be batch size and it must be 1. Baselines should only
be specififed for the dataloader's returns that need
attribution (role = 0)
feature_mask (Union[Tensor, tuple[Tensor, ...]], optional): same as the
feature_mask in attribute. The same feature_mask will be
applied to the entire dataloader. The first dimension is
assumed to be batch size and it must be 1. Mask should only
be specififed for the dataloader's returns that need
attribution (role = 0)
reduce (Callable, optional): a function to accumulate the forward output of
each iteration of the dataloader. The function signature is:
``reduce(accum, current_output, current_inputs) -> accum``,
where:
accum (Any): accumulated states, can be any type
current_output (Tensor): current output tensor from forward_func
current_inputs (tuple[Any,...]): current inputs from dataloader
to_metric (Callable, optional): an optional function to further convert
accumulated results through "reduce" after tranversing the whole
dataloader to a single tensor of metrics to calculate
attribution against. The function signature is:
``to_metric(accum) -> metric``, where:
accum (Any): accumulated state from reduce function
metric (Tensor): final result to be attributed, must be a Tensor
If None, will directly attribute w.r.t the reduced ``accum``
perturbations_per_pass (int, optional) the number perturbations to execute
concurrently in each traverse of the dataloader. The number of
traverses needed is
ceil(n_perturbations / perturbations_per_pass).
This arguement offers control of the trade-off between memory
and efficiency. If the dataloader involves slow operations like
remote request or file I/O, multiple traversals can be
inefficient. On the other hand, each perturbation needs to
store its accumulated outputs of the reduce
function until the end of the data traverse.
return_input_shape (bool, optional): if True, returns the attribution
following the input shapes given by the dataloader.
Otherwise, returns a single tensor for the attributions of
all the features, where the last dimension
is the number of features.
Returns:
**attributions** :
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attribution with respect to each input feature.
if return_input_shape is True, attributions will be
the same size as the given dataloader's returns that need
attribution (role = 0), with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
If return_input_shape is False, a single tensor is returned
where each index of the last dimension represents a feature
"""
inputs = next(iter(dataloader))
is_inputs_tuple = True
if type(inputs) is list:
# support list as it is a common return type for dataloader in torch
inputs = tuple(inputs)
elif type(inputs) is not tuple:
is_inputs_tuple = False
inputs = _format_tensor_into_tuples(inputs)
if input_roles:
assert len(input_roles) == len(inputs), (
"input_roles must have the same size as the return of the dataloader,",
f"length of input_roles is {len(input_roles)} ",
f"whereas the length of dataloader return is {len(inputs)}",
)
assert any(role == InputRole.need_attr for role in input_roles), (
"input_roles must contain at least one element need attribution"
f"({InputRole.need_attr}), received input_roles: {input_roles}"
)
else:
# by default, assume every element in the dataloader needs attribution
input_roles = tuple(InputRole.need_attr for _ in inputs)
attr_inputs = tuple(
inp for role, inp in zip(input_roles, inputs) if role == InputRole.need_attr
)
baselines = _format_baseline(baselines, attr_inputs)
assert len(attr_inputs) == len(baselines), (
"Baselines must have the same size as the return of the dataloader ",
"that need attribution",
f"length of baseline is {len(baselines)} ",
f'whereas the length of dataloader return with role "0" is {len(inputs)}',
)
for i, baseline in enumerate(baselines):
if isinstance(baseline, Tensor):
assert baseline.size(0) == 1, (
"If the baseline is a tensor, "
"its 1st dim of baseline must be 1 so it can be broadacasted to "
"any batch of the dataloader:"
f"baselines[{i}].shape = {baseline.shape}"
)
feature_mask = _format_feature_mask(feature_mask, attr_inputs)
assert len(attr_inputs) == len(feature_mask), (
"Feature mask must have the same size as the return of the dataloader ",
"that need attribution",
f"length of feature_mask is {len(feature_mask)} ",
f'whereas the length of dataloader return with role "0" is {len(inputs)}',
)
for i, each_mask in enumerate(feature_mask):
assert each_mask.size(0) == 1, (
"The 1st dim of feature_mask must be 1 so it can be broadcasted to "
"any batch of the dataloader:"
f"feature_mask[{i}].shape = {each_mask.shape}"
)
# map to retrieve masks contain a given feature index
feature_idx_to_mask_idx = defaultdict(list)
for i, mask in enumerate(feature_mask):
unqiue_feature_indices = torch.unique(mask).tolist()
for feature_idx in unqiue_feature_indices:
feature_idx_to_mask_idx[feature_idx].append(i)
max_feature_idx = _get_max_feature_index(feature_mask)
n_features = max_feature_idx + 1
if reduce is None:
reduce = _concat_tensors
# onehot tensor for feature indices
feature_indices = torch.ones((1, n_features), device=attr_inputs[0].device)
# unique_attr in shape(*output_dims, n_features)
unique_attr = self.attr_method.attribute(
feature_indices,
perturbations_per_eval=perturbations_per_pass,
additional_forward_args=(
dataloader,
input_roles,
baselines,
feature_mask,
reduce,
to_metric,
show_progress,
feature_idx_to_mask_idx,
),
)
if not return_input_shape:
return unique_attr
else:
attr = _convert_output_shape(
unique_attr,
attr_inputs,
feature_mask,
)
return _format_output(is_inputs_tuple, attr)
|
#!/usr/bin/env python3
import warnings
from typing import Any, List, Tuple, Union
import torch
import torch.nn.functional as F
from captum._utils.common import (
_format_output,
_format_tensor_into_tuples,
_is_tuple,
_register_backward_hook,
)
from captum._utils.gradient import (
apply_gradient_requirements,
undo_gradient_requirements,
)
from captum._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._utils.attribution import GradientAttribution
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
from torch.utils.hooks import RemovableHandle
class ModifiedReluGradientAttribution(GradientAttribution):
def __init__(self, model: Module, use_relu_grad_output: bool = False) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
"""
GradientAttribution.__init__(self, model)
self.model = model
self.backward_hooks: List[RemovableHandle] = []
self.use_relu_grad_output = use_relu_grad_output
assert isinstance(self.model, torch.nn.Module), (
"Given model must be an instance of torch.nn.Module to properly hook"
" ReLU layers."
)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Computes attribution by overriding relu gradients. Based on constructor
flag use_relu_grad_output, performs either GuidedBackpropagation if False
and Deconvolution if True. This class is the parent class of both these
methods, more information on usage can be found in the docstrings for each
implementing class.
"""
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = _is_tuple(inputs)
inputs = _format_tensor_into_tuples(inputs)
gradient_mask = apply_gradient_requirements(inputs)
# set hooks for overriding ReLU gradients
warnings.warn(
"Setting backward hooks on ReLU activations."
"The hooks will be removed after the attribution is finished"
)
try:
self.model.apply(self._register_hooks)
gradients = self.gradient_func(
self.forward_func, inputs, target, additional_forward_args
)
finally:
self._remove_hooks()
undo_gradient_requirements(inputs, gradient_mask)
return _format_output(is_inputs_tuple, gradients)
def _register_hooks(self, module: Module):
if isinstance(module, torch.nn.ReLU):
hooks = _register_backward_hook(module, self._backward_hook, self)
self.backward_hooks.extend(hooks)
def _backward_hook(
self,
module: Module,
grad_input: Union[Tensor, Tuple[Tensor, ...]],
grad_output: Union[Tensor, Tuple[Tensor, ...]],
):
to_override_grads = grad_output if self.use_relu_grad_output else grad_input
if isinstance(to_override_grads, tuple):
return tuple(
F.relu(to_override_grad) for to_override_grad in to_override_grads
)
else:
return F.relu(to_override_grads)
def _remove_hooks(self):
for hook in self.backward_hooks:
hook.remove()
class GuidedBackprop(ModifiedReluGradientAttribution):
r"""
Computes attribution using guided backpropagation. Guided backpropagation
computes the gradient of the target output with respect to the input,
but gradients of ReLU functions are overridden so that only
non-negative gradients are backpropagated.
More details regarding the guided backpropagation algorithm can be found
in the original paper here:
https://arxiv.org/abs/1412.6806
Warning: Ensure that all ReLU operations in the forward function of the
given model are performed using a module (nn.module.ReLU).
If nn.functional.ReLU is used, gradients are not overridden appropriately.
"""
def __init__(self, model: Module) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
"""
ModifiedReluGradientAttribution.__init__(
self, model, use_relu_grad_output=False
)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which
attributions are computed. If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The guided backprop gradients with respect to each
input feature. Attributions will always
be the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> gbp = GuidedBackprop(net)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes Guided Backprop attribution scores for class 3.
>>> attribution = gbp.attribute(input, target=3)
"""
return super().attribute.__wrapped__(
self, inputs, target, additional_forward_args
)
class Deconvolution(ModifiedReluGradientAttribution):
r"""
Computes attribution using deconvolution. Deconvolution
computes the gradient of the target output with respect to the input,
but gradients of ReLU functions are overridden so that the gradient
of the ReLU input is simply computed taking ReLU of the output gradient,
essentially only propagating non-negative gradients (without
dependence on the sign of the ReLU input).
More details regarding the deconvolution algorithm can be found
in these papers:
https://arxiv.org/abs/1311.2901
https://link.springer.com/chapter/10.1007/978-3-319-46466-4_8
Warning: Ensure that all ReLU operations in the forward function of the
given model are performed using a module (nn.module.ReLU).
If nn.functional.ReLU is used, gradients are not overridden appropriately.
"""
def __init__(self, model: Module) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
"""
ModifiedReluGradientAttribution.__init__(self, model, use_relu_grad_output=True)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which
attributions are computed. If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The deconvolution attributions with respect to each
input feature. Attributions will always
be the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> deconv = Deconvolution(net)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes Deconvolution attribution scores for class 3.
>>> attribution = deconv.attribute(input, target=3)
"""
return super().attribute.__wrapped__(
self, inputs, target, additional_forward_args
)
|
#!/usr/bin/env python3
import typing
from collections import defaultdict
from typing import Any, cast, List, Tuple, Union
import torch.nn as nn
from captum._utils.common import (
_format_output,
_format_tensor_into_tuples,
_is_tuple,
_register_backward_hook,
_run_forward,
)
from captum._utils.gradient import (
apply_gradient_requirements,
undo_gradient_requirements,
)
from captum._utils.typing import Literal, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._utils.attribution import GradientAttribution
from captum.attr._utils.common import _sum_rows
from captum.attr._utils.custom_modules import Addition_Module
from captum.attr._utils.lrp_rules import EpsilonRule, PropagationRule
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
from torch.utils.hooks import RemovableHandle
class LRP(GradientAttribution):
r"""
Layer-wise relevance propagation is based on a backward propagation
mechanism applied sequentially to all layers of the model. Here, the
model output score represents the initial relevance which is decomposed
into values for each neuron of the underlying layers. The decomposition
is defined by rules that are chosen for each layer, involving its weights
and activations. Details on the model can be found in the original paper
[https://doi.org/10.1371/journal.pone.0130140]. The implementation is
inspired by the tutorial of the same group
[https://doi.org/10.1016/j.dsp.2017.10.011] and the publication by
Ancona et al. [https://openreview.net/forum?id=Sy21R9JAW].
"""
def __init__(self, model: Module) -> None:
r"""
Args:
model (Module): The forward function of the model or any modification of
it. Custom rules for a given layer need to be defined as attribute
`module.rule` and need to be of type PropagationRule. If no rule is
specified for a layer, a pre-defined default rule for the module type
is used.
"""
GradientAttribution.__init__(self, model)
self.model = model
self._check_rules()
@property
def multiplies_by_inputs(self) -> bool:
return True
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: Literal[False] = False,
verbose: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
...
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
*,
return_convergence_delta: Literal[True],
verbose: bool = False,
) -> Tuple[TensorOrTupleOfTensorsGeneric, Tensor]:
...
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: bool = False,
verbose: bool = False,
) -> Union[
TensorOrTupleOfTensorsGeneric, Tuple[TensorOrTupleOfTensorsGeneric, Tensor]
]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which relevance is
propagated. If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (tuple, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
verbose (bool, optional): Indicates whether information on application
of rules is printed during propagation.
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**
or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The propagated relevance values with respect to each
input feature. The values are normalized by the output score
value (sum(relevance)=1). To obtain values comparable to other
methods or implementations these values need to be multiplied
by the output score. Attributions will always
be the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned. The sum of attributions
is one and not corresponding to the prediction score as in other
implementations.
- **delta** (*Tensor*, returned if return_convergence_delta=True):
Delta is calculated per example, meaning that the number of
elements in returned delta tensor is equal to the number of
of examples in the inputs.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities. It has one
>>> # Conv2D and a ReLU layer.
>>> net = ImageClassifier()
>>> lrp = LRP(net)
>>> input = torch.randn(3, 3, 32, 32)
>>> # Attribution size matches input size: 3x3x32x32
>>> attribution = lrp.attribute(input, target=5)
"""
self.verbose = verbose
self._original_state_dict = self.model.state_dict()
self.layers: List[Module] = []
self._get_layers(self.model)
self._check_and_attach_rules()
self.backward_handles: List[RemovableHandle] = []
self.forward_handles: List[RemovableHandle] = []
is_inputs_tuple = _is_tuple(inputs)
inputs = _format_tensor_into_tuples(inputs)
gradient_mask = apply_gradient_requirements(inputs)
try:
# 1. Forward pass: Change weights of layers according to selected rules.
output = self._compute_output_and_change_weights(
inputs, target, additional_forward_args
)
# 2. Forward pass + backward pass: Register hooks to configure relevance
# propagation and execute back-propagation.
self._register_forward_hooks()
normalized_relevances = self.gradient_func(
self._forward_fn_wrapper, inputs, target, additional_forward_args
)
relevances = tuple(
normalized_relevance
* output.reshape((-1,) + (1,) * (normalized_relevance.dim() - 1))
for normalized_relevance in normalized_relevances
)
finally:
self._restore_model()
undo_gradient_requirements(inputs, gradient_mask)
if return_convergence_delta:
return (
_format_output(is_inputs_tuple, relevances),
self.compute_convergence_delta(relevances, output),
)
else:
return _format_output(is_inputs_tuple, relevances) # type: ignore
def has_convergence_delta(self) -> bool:
return True
def compute_convergence_delta(
self, attributions: Union[Tensor, Tuple[Tensor, ...]], output: Tensor
) -> Tensor:
"""
Here, we use the completeness property of LRP: The relevance is conserved
during the propagation through the models' layers. Therefore, the difference
between the sum of attribution (relevance) values and model output is taken as
the convergence delta. It should be zero for functional attribution. However,
when rules with an epsilon value are used for stability reasons, relevance is
absorbed during propagation and the convergence delta is non-zero.
Args:
attributions (Tensor or tuple[Tensor, ...]): Attribution scores that
are precomputed by an attribution algorithm.
Attributions can be provided in form of a single tensor
or a tuple of those. It is assumed that attribution
tensor's dimension 0 corresponds to the number of
examples, and if multiple input tensors are provided,
the examples must be aligned appropriately.
output (Tensor): The output value with respect to which
the attribution values are computed. This value corresponds to
the target score of a classification model. The given tensor
should only have a single element.
Returns:
*Tensor*:
- **delta** Difference of relevance in output layer and input layer.
"""
if isinstance(attributions, tuple):
for attr in attributions:
summed_attr = cast(
Tensor, sum(_sum_rows(attr) for attr in attributions)
)
else:
summed_attr = _sum_rows(attributions)
return output.flatten() - summed_attr.flatten()
def _get_layers(self, model: Module) -> None:
for layer in model.children():
if len(list(layer.children())) == 0:
self.layers.append(layer)
else:
self._get_layers(layer)
def _check_and_attach_rules(self) -> None:
for layer in self.layers:
if hasattr(layer, "rule"):
layer.activations = {} # type: ignore
layer.rule.relevance_input = defaultdict(list) # type: ignore
layer.rule.relevance_output = {} # type: ignore
pass
elif type(layer) in SUPPORTED_LAYERS_WITH_RULES.keys():
layer.activations = {} # type: ignore
layer.rule = SUPPORTED_LAYERS_WITH_RULES[type(layer)]() # type: ignore
layer.rule.relevance_input = defaultdict(list) # type: ignore
layer.rule.relevance_output = {} # type: ignore
elif type(layer) in SUPPORTED_NON_LINEAR_LAYERS:
layer.rule = None # type: ignore
else:
raise TypeError(
(
f"Module of type {type(layer)} has no rule defined and no"
"default rule exists for this module type. Please, set a rule"
"explicitly for this module and assure that it is appropriate"
"for this type of layer."
)
)
def _check_rules(self) -> None:
for module in self.model.modules():
if hasattr(module, "rule"):
if (
not isinstance(module.rule, PropagationRule)
and module.rule is not None
):
raise TypeError(
(
f"Please select propagation rules inherited from class "
f"PropagationRule for module: {module}"
)
)
def _register_forward_hooks(self) -> None:
for layer in self.layers:
if type(layer) in SUPPORTED_NON_LINEAR_LAYERS:
backward_handles = _register_backward_hook(
layer, PropagationRule.backward_hook_activation, self
)
self.backward_handles.extend(backward_handles)
else:
forward_handle = layer.register_forward_hook(
layer.rule.forward_hook # type: ignore
)
self.forward_handles.append(forward_handle)
if self.verbose:
print(f"Applied {layer.rule} on layer {layer}")
def _register_weight_hooks(self) -> None:
for layer in self.layers:
if layer.rule is not None:
forward_handle = layer.register_forward_hook(
layer.rule.forward_hook_weights # type: ignore
)
self.forward_handles.append(forward_handle)
def _register_pre_hooks(self) -> None:
for layer in self.layers:
if layer.rule is not None:
forward_handle = layer.register_forward_pre_hook(
layer.rule.forward_pre_hook_activations # type: ignore
)
self.forward_handles.append(forward_handle)
def _compute_output_and_change_weights(
self,
inputs: Tuple[Tensor, ...],
target: TargetType,
additional_forward_args: Any,
) -> Tensor:
try:
self._register_weight_hooks()
output = _run_forward(self.model, inputs, target, additional_forward_args)
finally:
self._remove_forward_hooks()
# Register pre_hooks that pass the initial activations from before weight
# adjustments as inputs to the layers with adjusted weights. This procedure
# is important for graph generation in the 2nd forward pass.
self._register_pre_hooks()
return output
def _remove_forward_hooks(self) -> None:
for forward_handle in self.forward_handles:
forward_handle.remove()
def _remove_backward_hooks(self) -> None:
for backward_handle in self.backward_handles:
backward_handle.remove()
for layer in self.layers:
if hasattr(layer.rule, "_handle_input_hooks"):
for handle in layer.rule._handle_input_hooks: # type: ignore
handle.remove()
if hasattr(layer.rule, "_handle_output_hook"):
layer.rule._handle_output_hook.remove() # type: ignore
def _remove_rules(self) -> None:
for layer in self.layers:
if hasattr(layer, "rule"):
del layer.rule
def _clear_properties(self) -> None:
for layer in self.layers:
if hasattr(layer, "activation"):
del layer.activation
def _restore_state(self) -> None:
self.model.load_state_dict(self._original_state_dict) # type: ignore
def _restore_model(self) -> None:
self._restore_state()
self._remove_backward_hooks()
self._remove_forward_hooks()
self._remove_rules()
self._clear_properties()
def _forward_fn_wrapper(self, *inputs: Tensor) -> Tensor:
"""
Wraps a forward function with addition of zero as a workaround to
https://github.com/pytorch/pytorch/issues/35802 discussed in
https://github.com/pytorch/captum/issues/143#issuecomment-611750044
#TODO: Remove when bugs are fixed
"""
adjusted_inputs = tuple(
input + 0 if input is not None else input for input in inputs
)
return self.model(*adjusted_inputs)
SUPPORTED_LAYERS_WITH_RULES = {
nn.MaxPool1d: EpsilonRule,
nn.MaxPool2d: EpsilonRule,
nn.MaxPool3d: EpsilonRule,
nn.Conv2d: EpsilonRule,
nn.AvgPool2d: EpsilonRule,
nn.AdaptiveAvgPool2d: EpsilonRule,
nn.Linear: EpsilonRule,
nn.BatchNorm2d: EpsilonRule,
Addition_Module: EpsilonRule,
}
SUPPORTED_NON_LINEAR_LAYERS = [nn.ReLU, nn.Dropout, nn.Tanh]
|
#!/usr/bin/env python3
import warnings
from typing import Any, List, Union
import torch
from captum._utils.common import _format_output, _format_tensor_into_tuples, _is_tuple
from captum._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.guided_backprop_deconvnet import GuidedBackprop
from captum.attr._core.layer.grad_cam import LayerGradCam
from captum.attr._utils.attribution import GradientAttribution, LayerAttribution
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
class GuidedGradCam(GradientAttribution):
r"""
Computes element-wise product of guided backpropagation attributions
with upsampled (non-negative) GradCAM attributions.
GradCAM attributions are computed with respect to the layer
provided in the constructor, and attributions
are upsampled to match the input size. GradCAM is designed for
convolutional neural networks, and is usually applied to the last
convolutional layer.
Note that if multiple input tensors are provided, attributions for
each input tensor are computed by upsampling the GradCAM
attributions to match that input's dimensions. If interpolation is
not possible for the input tensor dimensions and interpolation mode,
then an empty tensor is returned in the attributions for the
corresponding position of that input tensor. This can occur if the
input tensor does not have the same number of dimensions as the chosen
layer's output or is not either 3D, 4D or 5D.
Note that attributions are only meaningful for input tensors
which are spatially alligned with the chosen layer, e.g. an input
image tensor for a convolutional layer.
More details regarding GuidedGradCAM can be found in the original
GradCAM paper here:
https://arxiv.org/abs/1610.02391
Warning: Ensure that all ReLU operations in the forward function of the
given model are performed using a module (nn.module.ReLU).
If nn.functional.ReLU is used, gradients are not overridden appropriately.
"""
def __init__(
self, model: Module, layer: Module, device_ids: Union[None, List[int]] = None
) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
layer (torch.nn.Module): Layer for which GradCAM attributions are computed.
Currently, only layers with a single tensor output are
supported.
device_ids (list[int]): Device ID list, necessary only if model
is a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If model is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
GradientAttribution.__init__(self, model)
self.grad_cam = LayerGradCam(model, layer, device_ids)
self.guided_backprop = GuidedBackprop(model)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
interpolate_mode: str = "nearest",
attribute_to_layer_input: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which attributions
are computed. If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to model in order following the
arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
interpolate_mode (str, optional): Method for interpolation, which
must be a valid input interpolation mode for
torch.nn.functional. These methods are
"nearest", "area", "linear" (3D-only), "bilinear"
(4D-only), "bicubic" (4D-only), "trilinear" (5D-only)
based on the number of dimensions of the chosen layer
output (which must also match the number of
dimensions for the input tensor). Note that
the original GradCAM paper uses "bilinear"
interpolation, but we default to "nearest" for
applicability to any of 3D, 4D or 5D tensors.
Default: "nearest"
attribute_to_layer_input (bool, optional): Indicates whether to
compute the attribution with respect to the layer input
or output in `LayerGradCam`.
If `attribute_to_layer_input` is set to True
then the attributions will be computed with respect to
layer inputs, otherwise it will be computed with respect
to layer outputs.
Note that currently it is assumed that either the input
or the output of internal layer, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
*Tensor* of **attributions**:
- **attributions** (*Tensor*):
Element-wise product of (upsampled) GradCAM
and Guided Backprop attributions.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Attributions will be the same size as the provided inputs,
with each value providing the attribution of the
corresponding input index.
If the GradCAM attributions cannot be upsampled to the shape
of a given input tensor, None is returned in the corresponding
index position.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv4, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx50x8x8.
>>> # It is the last convolution layer, which is the recommended
>>> # use case for GuidedGradCAM.
>>> net = ImageClassifier()
>>> guided_gc = GuidedGradCam(net, net.conv4)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes guided GradCAM attributions for class 3.
>>> # attribution size matches input size, Nx3x32x32
>>> attribution = guided_gc.attribute(input, 3)
"""
is_inputs_tuple = _is_tuple(inputs)
inputs = _format_tensor_into_tuples(inputs)
grad_cam_attr = self.grad_cam.attribute.__wrapped__(
self.grad_cam, # self
inputs=inputs,
target=target,
additional_forward_args=additional_forward_args,
attribute_to_layer_input=attribute_to_layer_input,
relu_attributions=True,
)
if isinstance(grad_cam_attr, tuple):
assert len(grad_cam_attr) == 1, (
"GuidedGradCAM attributions for layer with multiple inputs / "
"outputs is not supported."
)
grad_cam_attr = grad_cam_attr[0]
guided_backprop_attr = self.guided_backprop.attribute.__wrapped__(
self.guided_backprop, # self
inputs=inputs,
target=target,
additional_forward_args=additional_forward_args,
)
output_attr: List[Tensor] = []
for i in range(len(inputs)):
try:
output_attr.append(
guided_backprop_attr[i]
* LayerAttribution.interpolate(
grad_cam_attr,
inputs[i].shape[2:],
interpolate_mode=interpolate_mode,
)
)
except Exception:
warnings.warn(
"Couldn't appropriately interpolate GradCAM attributions for some "
"input tensors, returning empty tensor for corresponding "
"attributions."
)
output_attr.append(torch.empty(0))
return _format_output(is_inputs_tuple, tuple(output_attr))
|
#!/usr/bin/env python3
import itertools
import math
import warnings
from typing import Any, Callable, Iterable, Sequence, Tuple, Union
import torch
from captum._utils.common import (
_expand_additional_forward_args,
_expand_target,
_format_additional_forward_args,
_format_feature_mask,
_format_output,
_format_tensor_into_tuples,
_get_max_feature_index,
_is_tuple,
_run_forward,
)
from captum._utils.progress import progress
from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._utils.attribution import PerturbationAttribution
from captum.attr._utils.common import (
_find_output_mode_and_verify,
_format_input_baseline,
_tensorize_baseline,
)
from captum.log import log_usage
from torch import Tensor
def _all_perm_generator(num_features: int, num_samples: int) -> Iterable[Sequence[int]]:
for perm in itertools.permutations(range(num_features)):
yield perm
def _perm_generator(num_features: int, num_samples: int) -> Iterable[Sequence[int]]:
for _ in range(num_samples):
yield torch.randperm(num_features).tolist()
class ShapleyValueSampling(PerturbationAttribution):
"""
A perturbation based approach to compute attribution, based on the concept
of Shapley Values from cooperative game theory. This method involves taking
a random permutation of the input features and adding them one-by-one to the
given baseline. The output difference after adding each feature corresponds
to its attribution, and these difference are averaged when repeating this
process n_samples times, each time choosing a new random permutation of
the input features.
By default, each scalar value within
the input tensors are taken as a feature and added independently. Passing
a feature mask, allows grouping features to be added together. This can
be used in cases such as images, where an entire segment or region
can be grouped together, measuring the importance of the segment
(feature group). Each input scalar in the group will be given the same
attribution value equal to the change in output as a result of adding back
the entire feature group.
More details regarding Shapley Value sampling can be found in these papers:
https://www.sciencedirect.com/science/article/pii/S0305054808000804
https://pdfs.semanticscholar.org/7715/bb1070691455d1fcfc6346ff458dbca77b2c.pdf
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or
any modification of it. The forward function can either
return a scalar per example, or a single scalar for the
full batch. If a single scalar is returned for the batch,
`perturbations_per_eval` must be 1, and the returned
attributions will have first dimension 1, corresponding to
feature importance across all examples in the batch.
"""
PerturbationAttribution.__init__(self, forward_func)
self.permutation_generator = _perm_generator
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
n_samples: int = 25,
perturbations_per_eval: int = 1,
show_progress: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
NOTE: The feature_mask argument differs from other perturbation based
methods, since feature indices can overlap across tensors. See the
description of the feature_mask argument below for more details.
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which Shapley value
sampling attributions are computed. If forward_func takes
a single tensor as input, a single input tensor should
be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define reference value which replaces each
feature when ablated.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which difference is computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. For all other types,
the given argument is used for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
feature_mask (Tensor or tuple[Tensor, ...], optional):
feature_mask defines a mask for the input, grouping
features which should be added together. feature_mask
should contain the same number of tensors as inputs.
Each tensor should
be the same size as the corresponding input or
broadcastable to match the input tensor. Values across
all tensors should be integers in the range 0 to
num_features - 1, and indices corresponding to the same
feature should have the same value.
Note that features are grouped across tensors
(unlike feature ablation and occlusion), so
if the same index is used in different tensors, those
features are still grouped and added simultaneously.
If the forward function returns a single scalar per batch,
we enforce that the first dimension of each mask must be 1,
since attributions are returned batch-wise rather than per
example, so the attributions must correspond to the
same features (indices) in each input example.
If None, then a feature mask is constructed which assigns
each scalar within a tensor as a separate feature
Default: None
n_samples (int, optional): The number of feature permutations
tested.
Default: `25` if `n_samples` is not provided.
perturbations_per_eval (int, optional): Allows multiple ablations
to be processed simultaneously in one call to forward_fn.
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
If the forward function returns a single scalar per batch,
perturbations_per_eval must be set to 1.
Default: 1
show_progress (bool, optional): Displays the progress of computation.
It will try to use tqdm if available for advanced features
(e.g. time estimation). Otherwise, it will fallback to
a simple output of progress.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The attributions with respect to each input feature.
If the forward function returns
a scalar value per example, attributions will be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If the forward function returns a scalar per batch, then
attribution tensor(s) will have first dimension 1 and
the remaining dimensions will match the input.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # SimpleClassifier takes a single input tensor of size Nx4x4,
>>> # and returns an Nx3 tensor of class probabilities.
>>> net = SimpleClassifier()
>>> # Generating random input with size 2 x 4 x 4
>>> input = torch.randn(2, 4, 4)
>>> # Defining ShapleyValueSampling interpreter
>>> svs = ShapleyValueSampling(net)
>>> # Computes attribution, taking random orderings
>>> # of the 16 features and computing the output change when adding
>>> # each feature. We average over 200 trials (random permutations).
>>> attr = svs.attribute(input, target=1, n_samples=200)
>>> # Alternatively, we may want to add features in groups, e.g.
>>> # grouping each 2x2 square of the inputs and adding them together.
>>> # This can be done by creating a feature mask as follows, which
>>> # defines the feature groups, e.g.:
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # With this mask, all inputs with the same value are added
>>> # together, and the attribution for each input in the same
>>> # group (0, 1, 2, and 3) per example are the same.
>>> # The attributions can be calculated as follows:
>>> # feature mask has dimensions 1 x 4 x 4
>>> feature_mask = torch.tensor([[[0,0,1,1],[0,0,1,1],
>>> [2,2,3,3],[2,2,3,3]]])
>>> attr = svs.attribute(input, target=1, feature_mask=feature_mask)
"""
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = _is_tuple(inputs)
inputs, baselines = _format_input_baseline(inputs, baselines)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
feature_mask = _format_feature_mask(feature_mask, inputs)
assert (
isinstance(perturbations_per_eval, int) and perturbations_per_eval >= 1
), "Ablations per evaluation must be at least 1."
with torch.no_grad():
baselines = _tensorize_baseline(inputs, baselines)
num_examples = inputs[0].shape[0]
total_features = _get_max_feature_index(feature_mask) + 1
if show_progress:
attr_progress = progress(
desc=f"{self.get_name()} attribution",
total=self._get_n_evaluations(
total_features, n_samples, perturbations_per_eval
)
+ 1, # add 1 for the initial eval
)
attr_progress.update(0)
initial_eval = _run_forward(
self.forward_func, baselines, target, additional_forward_args
)
if show_progress:
attr_progress.update()
agg_output_mode = _find_output_mode_and_verify(
initial_eval, num_examples, perturbations_per_eval, feature_mask
)
# Initialize attribution totals and counts
total_attrib = [
torch.zeros_like(
input[0:1] if agg_output_mode else input, dtype=torch.float
)
for input in inputs
]
iter_count = 0
# Iterate for number of samples, generate a permutation of the features
# and evalute the incremental increase for each feature.
for feature_permutation in self.permutation_generator(
total_features, n_samples
):
iter_count += 1
prev_results = initial_eval
for (
current_inputs,
current_add_args,
current_target,
current_masks,
) in self._perturbation_generator(
inputs,
additional_forward_args,
target,
baselines,
feature_mask,
feature_permutation,
perturbations_per_eval,
):
if sum(torch.sum(mask).item() for mask in current_masks) == 0:
warnings.warn(
"Feature mask is missing some integers between 0 and "
"num_features, for optimal performance, make sure each"
" consecutive integer corresponds to a feature."
)
# modified_eval dimensions: 1D tensor with length
# equal to #num_examples * #features in batch
modified_eval = _run_forward(
self.forward_func,
current_inputs,
current_target,
current_add_args,
)
if show_progress:
attr_progress.update()
if agg_output_mode:
eval_diff = modified_eval - prev_results
prev_results = modified_eval
else:
all_eval = torch.cat((prev_results, modified_eval), dim=0)
eval_diff = all_eval[num_examples:] - all_eval[:-num_examples]
prev_results = all_eval[-num_examples:]
for j in range(len(total_attrib)):
current_eval_diff = eval_diff
if not agg_output_mode:
# current_eval_diff dimensions:
# (#features in batch, #num_examples, 1,.. 1)
# (contains 1 more dimension than inputs). This adds extra
# dimensions of 1 to make the tensor broadcastable with the
# inputs tensor.
current_eval_diff = current_eval_diff.reshape(
(-1, num_examples) + (len(inputs[j].shape) - 1) * (1,)
)
total_attrib[j] += (
current_eval_diff * current_masks[j].float()
).sum(dim=0)
if show_progress:
attr_progress.close()
# Divide total attributions by number of random permutations and return
# formatted attributions.
attrib = tuple(
tensor_attrib_total / iter_count for tensor_attrib_total in total_attrib
)
formatted_attr = _format_output(is_inputs_tuple, attrib)
return formatted_attr
def _perturbation_generator(
self,
inputs: Tuple[Tensor, ...],
additional_args: Any,
target: TargetType,
baselines: Tuple[Tensor, ...],
input_masks: TensorOrTupleOfTensorsGeneric,
feature_permutation: Sequence[int],
perturbations_per_eval: int,
) -> Iterable[Tuple[Tuple[Tensor, ...], Any, TargetType, Tuple[Tensor, ...]]]:
"""
This method is a generator which yields each perturbation to be evaluated
including inputs, additional_forward_args, targets, and mask.
"""
# current_tensors starts at baselines and includes each additional feature as
# added based on the permutation order.
current_tensors = baselines
current_tensors_list = []
current_mask_list = []
# Compute repeated additional args and targets
additional_args_repeated = (
_expand_additional_forward_args(additional_args, perturbations_per_eval)
if additional_args is not None
else None
)
target_repeated = _expand_target(target, perturbations_per_eval)
for i in range(len(feature_permutation)):
current_tensors = tuple(
current * (~(mask == feature_permutation[i])).to(current.dtype)
+ input * (mask == feature_permutation[i]).to(input.dtype)
for input, current, mask in zip(inputs, current_tensors, input_masks)
)
current_tensors_list.append(current_tensors)
current_mask_list.append(
tuple(mask == feature_permutation[i] for mask in input_masks)
)
if len(current_tensors_list) == perturbations_per_eval:
combined_inputs = tuple(
torch.cat(aligned_tensors, dim=0)
for aligned_tensors in zip(*current_tensors_list)
)
combined_masks = tuple(
torch.stack(aligned_masks, dim=0)
for aligned_masks in zip(*current_mask_list)
)
yield (
combined_inputs,
additional_args_repeated,
target_repeated,
combined_masks,
)
current_tensors_list = []
current_mask_list = []
# Create batch with remaining evaluations, may not be a complete batch
# (= perturbations_per_eval)
if len(current_tensors_list) != 0:
additional_args_repeated = (
_expand_additional_forward_args(
additional_args, len(current_tensors_list)
)
if additional_args is not None
else None
)
target_repeated = _expand_target(target, len(current_tensors_list))
combined_inputs = tuple(
torch.cat(aligned_tensors, dim=0)
for aligned_tensors in zip(*current_tensors_list)
)
combined_masks = tuple(
torch.stack(aligned_masks, dim=0)
for aligned_masks in zip(*current_mask_list)
)
yield (
combined_inputs,
additional_args_repeated,
target_repeated,
combined_masks,
)
def _get_n_evaluations(self, total_features, n_samples, perturbations_per_eval):
"""return the total number of forward evaluations needed"""
return math.ceil(total_features / perturbations_per_eval) * n_samples
class ShapleyValues(ShapleyValueSampling):
"""
A perturbation based approach to compute attribution, based on the concept
of Shapley Values from cooperative game theory. This method involves taking
each permutation of the input features and adding them one-by-one to the
given baseline. The output difference after adding each feature corresponds
to its attribution, and these difference are averaged over all possible
random permutations of the input features.
By default, each scalar value within
the input tensors are taken as a feature and added independently. Passing
a feature mask, allows grouping features to be added together. This can
be used in cases such as images, where an entire segment or region
can be grouped together, measuring the importance of the segment
(feature group). Each input scalar in the group will be given the same
attribution value equal to the change in output as a result of adding back
the entire feature group.
More details regarding Shapley Values can be found in these papers:
https://apps.dtic.mil/dtic/tr/fulltext/u2/604084.pdf
https://www.sciencedirect.com/science/article/pii/S0305054808000804
https://pdfs.semanticscholar.org/7715/bb1070691455d1fcfc6346ff458dbca77b2c.pdf
NOTE: The method implemented here is very computationally intensive, and
should only be used with a very small number of features (e.g. < 7).
This implementation simply extends ShapleyValueSampling and
evaluates all permutations, leading to a total of n * n! evaluations for n
features. Shapley values can alternatively be computed with only 2^n
evaluations, and we plan to add this approach in the future.
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or
any modification of it. The forward function can either
return a scalar per example, or a single scalar for the
full batch. If a single scalar is returned for the batch,
`perturbations_per_eval` must be 1, and the returned
attributions will have first dimension 1, corresponding to
feature importance across all examples in the batch.
"""
ShapleyValueSampling.__init__(self, forward_func)
self.permutation_generator = _all_perm_generator
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
perturbations_per_eval: int = 1,
show_progress: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
NOTE: The feature_mask argument differs from other perturbation based
methods, since feature indices can overlap across tensors. See the
description of the feature_mask argument below for more details.
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which Shapley value
sampling attributions are computed. If forward_func takes
a single tensor as input, a single input tensor should
be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define reference value which replaces each
feature when ablated.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which difference is computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. For all other types,
the given argument is used for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
feature_mask (Tensor or tuple[Tensor, ...], optional):
feature_mask defines a mask for the input, grouping
features which should be added together. feature_mask
should contain the same number of tensors as inputs.
Each tensor should
be the same size as the corresponding input or
broadcastable to match the input tensor. Values across
all tensors should be integers in the range 0 to
num_features - 1, and indices corresponding to the same
feature should have the same value.
Note that features are grouped across tensors
(unlike feature ablation and occlusion), so
if the same index is used in different tensors, those
features are still grouped and added simultaneously.
If the forward function returns a single scalar per batch,
we enforce that the first dimension of each mask must be 1,
since attributions are returned batch-wise rather than per
example, so the attributions must correspond to the
same features (indices) in each input example.
If None, then a feature mask is constructed which assigns
each scalar within a tensor as a separate feature
Default: None
perturbations_per_eval (int, optional): Allows multiple ablations
to be processed simultaneously in one call to forward_fn.
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
If the forward function returns a single scalar per batch,
perturbations_per_eval must be set to 1.
Default: 1
show_progress (bool, optional): Displays the progress of computation.
It will try to use tqdm if available for advanced features
(e.g. time estimation). Otherwise, it will fallback to
a simple output of progress.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The attributions with respect to each input feature.
If the forward function returns
a scalar value per example, attributions will be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If the forward function returns a scalar per batch, then
attribution tensor(s) will have first dimension 1 and
the remaining dimensions will match the input.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # SimpleClassifier takes a single input tensor of size Nx4x4,
>>> # and returns an Nx3 tensor of class probabilities.
>>> net = SimpleClassifier()
>>> # Generating random input with size 2 x 4 x 4
>>> input = torch.randn(2, 4, 4)
>>> # We may want to add features in groups, e.g.
>>> # grouping each 2x2 square of the inputs and adding them together.
>>> # This can be done by creating a feature mask as follows, which
>>> # defines the feature groups, e.g.:
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # With this mask, all inputs with the same value are added
>>> # together, and the attribution for each input in the same
>>> # group (0, 1, 2, and 3) per example are the same.
>>> # The attributions can be calculated as follows:
>>> # feature mask has dimensions 1 x 4 x 4
>>> feature_mask = torch.tensor([[[0,0,1,1],[0,0,1,1],
>>> [2,2,3,3],[2,2,3,3]]])
>>> # With only 4 features, it is feasible to compute exact
>>> # Shapley Values. These can be computed as follows:
>>> sv = ShapleyValues(net)
>>> attr = sv.attribute(input, target=1, feature_mask=feature_mask)
"""
if feature_mask is None:
total_features = sum(
torch.numel(inp[0]) for inp in _format_tensor_into_tuples(inputs)
)
else:
total_features = (
int(max(torch.max(single_mask).item() for single_mask in feature_mask))
+ 1
)
if total_features >= 10:
warnings.warn(
"You are attempting to compute Shapley Values with at least 10 "
"features, which will likely be very computationally expensive."
"Consider using Shapley Value Sampling instead."
)
return super().attribute.__wrapped__(
self,
inputs=inputs,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
feature_mask=feature_mask,
perturbations_per_eval=perturbations_per_eval,
show_progress=show_progress,
)
def _get_n_evaluations(self, total_features, n_samples, perturbations_per_eval):
"""return the total number of forward evaluations needed"""
return math.ceil(total_features / perturbations_per_eval) * math.factorial(
total_features
)
|
#!/usr/bin/env python3
import typing
import warnings
from typing import Any, Callable, cast, List, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from captum._utils.common import (
_expand_additional_forward_args,
_expand_target,
_format_additional_forward_args,
_format_baseline,
_format_output,
_format_tensor_into_tuples,
_is_tuple,
_register_backward_hook,
_run_forward,
_select_targets,
ExpansionTypes,
)
from captum._utils.gradient import (
apply_gradient_requirements,
undo_gradient_requirements,
)
from captum._utils.typing import (
BaselineType,
Literal,
TargetType,
TensorOrTupleOfTensorsGeneric,
)
from captum.attr._utils.attribution import GradientAttribution
from captum.attr._utils.common import (
_call_custom_attribution_func,
_compute_conv_delta_and_format_attrs,
_format_callable_baseline,
_tensorize_baseline,
_validate_input,
)
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
from torch.utils.hooks import RemovableHandle
class DeepLift(GradientAttribution):
r"""
Implements DeepLIFT algorithm based on the following paper:
Learning Important Features Through Propagating Activation Differences,
Avanti Shrikumar, et. al.
https://arxiv.org/abs/1704.02685
and the gradient formulation proposed in:
Towards better understanding of gradient-based attribution methods for
deep neural networks, Marco Ancona, et.al.
https://openreview.net/pdf?id=Sy21R9JAW
This implementation supports only Rescale rule. RevealCancel rule will
be supported in later releases.
In addition to that, in order to keep the implementation cleaner, DeepLIFT
for internal neurons and layers extends current implementation and is
implemented separately in LayerDeepLift and NeuronDeepLift.
Although DeepLIFT's(Rescale Rule) attribution quality is comparable with
Integrated Gradients, it runs significantly faster than Integrated
Gradients and is preferred for large datasets.
Currently we only support a limited number of non-linear activations
but the plan is to expand the list in the future.
Note: As we know, currently we cannot access the building blocks,
of PyTorch's built-in LSTM, RNNs and GRUs such as Tanh and Sigmoid.
Nonetheless, it is possible to build custom LSTMs, RNNS and GRUs
with performance similar to built-in ones using TorchScript.
More details on how to build custom RNNs can be found here:
https://pytorch.org/blog/optimizing-cuda-rnn-with-torchscript/
"""
def __init__(
self,
model: Module,
multiply_by_inputs: bool = True,
eps: float = 1e-10,
) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in
then that type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of DeepLift, if `multiply_by_inputs`
is set to True, final sensitivity scores
are being multiplied by (inputs - baselines).
This flag applies only if `custom_attribution_func` is
set to None.
eps (float, optional): A value at which to consider output/input change
significant when computing the gradients for non-linear layers.
This is useful to adjust, depending on your model's bit depth,
to avoid numerical issues during the gradient computation.
Default: 1e-10
"""
GradientAttribution.__init__(self, model)
self.model = model
self.eps = eps
self.forward_handles: List[RemovableHandle] = []
self.backward_handles: List[RemovableHandle] = []
self._multiply_by_inputs = multiply_by_inputs
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: Literal[False] = False,
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> TensorOrTupleOfTensorsGeneric:
...
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
*,
return_convergence_delta: Literal[True],
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> Tuple[TensorOrTupleOfTensorsGeneric, Tensor]:
...
@log_usage()
def attribute( # type: ignore
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: bool = False,
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> Union[
TensorOrTupleOfTensorsGeneric, Tuple[TensorOrTupleOfTensorsGeneric, Tensor]
]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which
attributions are computed. If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define reference samples that are compared with
the inputs. In order to assign attribution scores DeepLift
computes the differences between the inputs/outputs and
corresponding references.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
custom_attribution_func (Callable, optional): A custom function for
computing final attribution scores. This function can take
at least one and at most three arguments with the
following signature:
- custom_attribution_func(multipliers)
- custom_attribution_func(multipliers, inputs)
- custom_attribution_func(multipliers, inputs, baselines)
In case this function is not provided, we use the default
logic defined as: multipliers * (inputs - baselines)
It is assumed that all input arguments, `multipliers`,
`inputs` and `baselines` are provided in tuples of same
length. `custom_attribution_func` returns a tuple of
attribution tensors that have the same length as the
`inputs`.
Default: None
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attribution score computed based on DeepLift rescale rule with respect
to each input feature. Attributions will always be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
- **delta** (*Tensor*, returned if return_convergence_delta=True):
This is computed using the property that
the total sum of model(inputs) - model(baselines)
must equal the total sum of the attributions computed
based on DeepLift's rescale rule.
Delta is calculated per example, meaning that the number of
elements in returned delta tensor is equal to the number of
examples in input.
Note that the logic described for deltas is guaranteed when the
default logic for attribution computations is used, meaning that the
`custom_attribution_func=None`, otherwise it is not guaranteed and
depends on the specifics of the `custom_attribution_func`.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> dl = DeepLift(net)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes deeplift attribution scores for class 3.
>>> attribution = dl.attribute(input, target=3)
"""
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = _is_tuple(inputs)
inputs = _format_tensor_into_tuples(inputs)
baselines = _format_baseline(baselines, inputs)
gradient_mask = apply_gradient_requirements(inputs)
_validate_input(inputs, baselines)
# set hooks for baselines
warnings.warn(
"""Setting forward, backward hooks and attributes on non-linear
activations. The hooks and attributes will be removed
after the attribution is finished"""
)
baselines = _tensorize_baseline(inputs, baselines)
main_model_hooks = []
try:
main_model_hooks = self._hook_main_model()
self.model.apply(self._register_hooks)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
expanded_target = _expand_target(
target, 2, expansion_type=ExpansionTypes.repeat
)
wrapped_forward_func = self._construct_forward_func(
self.model,
(inputs, baselines),
expanded_target,
additional_forward_args,
)
gradients = self.gradient_func(wrapped_forward_func, inputs)
if custom_attribution_func is None:
if self.multiplies_by_inputs:
attributions = tuple(
(input - baseline) * gradient
for input, baseline, gradient in zip(
inputs, baselines, gradients
)
)
else:
attributions = gradients
else:
attributions = _call_custom_attribution_func(
custom_attribution_func, gradients, inputs, baselines
)
finally:
# Even if any error is raised, remove all hooks before raising
self._remove_hooks(main_model_hooks)
undo_gradient_requirements(inputs, gradient_mask)
return _compute_conv_delta_and_format_attrs(
self,
return_convergence_delta,
attributions,
baselines,
inputs,
additional_forward_args,
target,
is_inputs_tuple,
)
def _construct_forward_func(
self,
forward_func: Callable,
inputs: Tuple,
target: TargetType = None,
additional_forward_args: Any = None,
) -> Callable:
def forward_fn():
model_out = _run_forward(
forward_func, inputs, None, additional_forward_args
)
return _select_targets(
torch.cat((model_out[:, 0], model_out[:, 1])), target
)
if hasattr(forward_func, "device_ids"):
forward_fn.device_ids = forward_func.device_ids # type: ignore
return forward_fn
def _is_non_linear(self, module: Module) -> bool:
return type(module) in SUPPORTED_NON_LINEAR.keys()
def _forward_pre_hook_ref(
self, module: Module, inputs: Union[Tensor, Tuple[Tensor, ...]]
) -> None:
inputs = _format_tensor_into_tuples(inputs)
module.input_ref = tuple( # type: ignore
input.clone().detach() for input in inputs
)
def _forward_pre_hook(
self, module: Module, inputs: Union[Tensor, Tuple[Tensor, ...]]
) -> None:
"""
For the modules that perform in-place operations such as ReLUs, we cannot
use inputs from forward hooks. This is because in that case inputs
and outputs are the same. We need access the inputs in pre-hooks and
set necessary hooks on inputs there.
"""
inputs = _format_tensor_into_tuples(inputs)
module.input = inputs[0].clone().detach()
def _forward_hook(
self,
module: Module,
inputs: Union[Tensor, Tuple[Tensor, ...]],
outputs: Union[Tensor, Tuple[Tensor, ...]],
) -> None:
r"""
we need forward hook to access and detach the inputs and
outputs of a neuron
"""
outputs = _format_tensor_into_tuples(outputs)
module.output = outputs[0].clone().detach()
def _backward_hook(
self,
module: Module,
grad_input: Tensor,
grad_output: Tensor,
) -> Tensor:
r"""
`grad_input` is the gradient of the neuron with respect to its input
`grad_output` is the gradient of the neuron with respect to its output
we can override `grad_input` according to chain rule with.
`grad_output` * delta_out / delta_in.
"""
# before accessing the attributes from the module we want
# to ensure that the properties exist, if not, then it is
# likely that the module is being reused.
attr_criteria = self.satisfies_attribute_criteria(module)
if not attr_criteria:
raise RuntimeError(
"A Module {} was detected that does not contain some of "
"the input/output attributes that are required for DeepLift "
"computations. This can occur, for example, if "
"your module is being used more than once in the network."
"Please, ensure that module is being used only once in the "
"network.".format(module)
)
multipliers = SUPPORTED_NON_LINEAR[type(module)](
module,
module.input,
module.output,
grad_input,
grad_output,
eps=self.eps,
)
# remove all the properies that we set for the inputs and output
del module.input
del module.output
return multipliers
def satisfies_attribute_criteria(self, module: Module) -> bool:
return hasattr(module, "input") and hasattr(module, "output")
def _can_register_hook(self, module: Module) -> bool:
# TODO find a better way of checking if a module is a container or not
module_fullname = str(type(module))
has_already_hooks = len(module._backward_hooks) > 0 # type: ignore
return not (
"nn.modules.container" in module_fullname
or has_already_hooks
or not self._is_non_linear(module)
)
def _register_hooks(
self, module: Module, attribute_to_layer_input: bool = True
) -> None:
if not self._can_register_hook(module) or (
not attribute_to_layer_input and module is self.layer # type: ignore
):
return
# adds forward hook to leaf nodes that are non-linear
forward_handle = module.register_forward_hook(self._forward_hook)
pre_forward_handle = module.register_forward_pre_hook(self._forward_pre_hook)
backward_handles = _register_backward_hook(module, self._backward_hook, self)
self.forward_handles.append(forward_handle)
self.forward_handles.append(pre_forward_handle)
self.backward_handles.extend(backward_handles)
def _remove_hooks(self, extra_hooks_to_remove: List[RemovableHandle]) -> None:
for handle in extra_hooks_to_remove:
handle.remove()
for forward_handle in self.forward_handles:
forward_handle.remove()
for backward_handle in self.backward_handles:
backward_handle.remove()
def _hook_main_model(self) -> List[RemovableHandle]:
def pre_hook(module: Module, baseline_inputs_add_args: Tuple) -> Tuple:
inputs = baseline_inputs_add_args[0]
baselines = baseline_inputs_add_args[1]
additional_args = None
if len(baseline_inputs_add_args) > 2:
additional_args = baseline_inputs_add_args[2:]
baseline_input_tsr = tuple(
torch.cat([input, baseline])
for input, baseline in zip(inputs, baselines)
)
if additional_args is not None:
expanded_additional_args = cast(
Tuple,
_expand_additional_forward_args(
additional_args, 2, ExpansionTypes.repeat
),
)
return (*baseline_input_tsr, *expanded_additional_args)
return baseline_input_tsr
def forward_hook(module: Module, inputs: Tuple, outputs: Tensor):
return torch.stack(torch.chunk(outputs, 2), dim=1)
if isinstance(
self.model, (nn.DataParallel, nn.parallel.DistributedDataParallel)
):
return [
self.model.module.register_forward_pre_hook(pre_hook), # type: ignore
self.model.module.register_forward_hook(forward_hook),
] # type: ignore
else:
return [
self.model.register_forward_pre_hook(pre_hook), # type: ignore
self.model.register_forward_hook(forward_hook),
] # type: ignore
def has_convergence_delta(self) -> bool:
return True
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
class DeepLiftShap(DeepLift):
r"""
Extends DeepLift algorithm and approximates SHAP values using Deeplift.
For each input sample it computes DeepLift attribution with respect to
each baseline and averages resulting attributions.
More details about the algorithm can be found here:
https://papers.nips.cc/paper/7062-a-unified-approach-to-interpreting-model-predictions.pdf
Note that the explanation model:
1. Assumes that input features are independent of one another
2. Is linear, meaning that the explanations are modeled through
the additive composition of feature effects.
Although, it assumes a linear model for each explanation, the overall
model across multiple explanations can be complex and non-linear.
"""
def __init__(self, model: Module, multiply_by_inputs: bool = True) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in
then that type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of DeepLiftShap, if `multiply_by_inputs`
is set to True, final sensitivity scores
are being multiplied by (inputs - baselines).
This flag applies only if `custom_attribution_func` is
set to None.
"""
DeepLift.__init__(self, model, multiply_by_inputs=multiply_by_inputs)
# There's a mismatch between the signatures of DeepLift.attribute and
# DeepLiftShap.attribute, so we ignore typing here
@typing.overload # type: ignore
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: Union[
TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric]
],
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: Literal[False] = False,
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> TensorOrTupleOfTensorsGeneric:
...
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: Union[
TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric]
],
target: TargetType = None,
additional_forward_args: Any = None,
*,
return_convergence_delta: Literal[True],
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> Tuple[TensorOrTupleOfTensorsGeneric, Tensor]:
...
@log_usage()
def attribute( # type: ignore
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: Union[
TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric]
],
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: bool = False,
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> Union[
TensorOrTupleOfTensorsGeneric, Tuple[TensorOrTupleOfTensorsGeneric, Tensor]
]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which
attributions are computed. If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
baselines (Tensor, tuple[Tensor, ...], or Callable):
Baselines define reference samples that are compared with
the inputs. In order to assign attribution scores DeepLift
computes the differences between the inputs/outputs and
corresponding references. Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
the first dimension equal to the number of examples
in the baselines' distribution. The remaining dimensions
must match with input tensor's dimension starting from
the second dimension.
- a tuple of tensors, if inputs is a tuple of tensors,
with the first dimension of any tensor inside the tuple
equal to the number of examples in the baseline's
distribution. The remaining dimensions must match
the dimensions of the corresponding input tensor
starting from the second dimension.
- callable function, optionally takes `inputs` as an
argument and either returns a single tensor
or a tuple of those.
It is recommended that the number of samples in the baselines'
tensors is larger than one.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
custom_attribution_func (Callable, optional): A custom function for
computing final attribution scores. This function can take
at least one and at most three arguments with the
following signature:
- custom_attribution_func(multipliers)
- custom_attribution_func(multipliers, inputs)
- custom_attribution_func(multipliers, inputs, baselines)
In case this function is not provided we use the default
logic defined as: multipliers * (inputs - baselines)
It is assumed that all input arguments, `multipliers`,
`inputs` and `baselines` are provided in tuples of same
length. `custom_attribution_func` returns a tuple of
attribution tensors that have the same length as the
`inputs`.
Default: None
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attribution score computed based on DeepLift rescale rule with
respect to each input feature. Attributions will always be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
- **delta** (*Tensor*, returned if return_convergence_delta=True):
This is computed using the property that the
total sum of model(inputs) - model(baselines)
must be very close to the total sum of attributions
computed based on approximated SHAP values using
Deeplift's rescale rule.
Delta is calculated for each example input and baseline pair,
meaning that the number of elements in returned delta tensor
is equal to the
`number of examples in input` * `number of examples
in baseline`. The deltas are ordered in the first place by
input example, followed by the baseline.
Note that the logic described for deltas is guaranteed
when the default logic for attribution computations is used,
meaning that the `custom_attribution_func=None`, otherwise
it is not guaranteed and depends on the specifics of the
`custom_attribution_func`.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> dl = DeepLiftShap(net)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes shap values using deeplift for class 3.
>>> attribution = dl.attribute(input, target=3)
"""
baselines = _format_callable_baseline(baselines, inputs)
assert isinstance(baselines[0], torch.Tensor) and baselines[0].shape[0] > 1, (
"Baselines distribution has to be provided in form of a torch.Tensor"
" with more than one example but found: {}."
" If baselines are provided in shape of scalars or with a single"
" baseline example, `DeepLift`"
" approach can be used instead.".format(baselines[0])
)
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = _is_tuple(inputs)
inputs = _format_tensor_into_tuples(inputs)
# batch sizes
inp_bsz = inputs[0].shape[0]
base_bsz = baselines[0].shape[0]
(
exp_inp,
exp_base,
exp_tgt,
exp_addit_args,
) = self._expand_inputs_baselines_targets(
baselines, inputs, target, additional_forward_args
)
attributions = super().attribute.__wrapped__( # type: ignore
self,
exp_inp,
exp_base,
target=exp_tgt,
additional_forward_args=exp_addit_args,
return_convergence_delta=cast(
Literal[True, False], return_convergence_delta
),
custom_attribution_func=custom_attribution_func,
)
if return_convergence_delta:
attributions, delta = cast(Tuple[Tuple[Tensor, ...], Tensor], attributions)
attributions = tuple(
self._compute_mean_across_baselines(
inp_bsz, base_bsz, cast(Tensor, attribution)
)
for attribution in attributions
)
if return_convergence_delta:
return _format_output(is_inputs_tuple, attributions), delta
else:
return _format_output(is_inputs_tuple, attributions)
def _expand_inputs_baselines_targets(
self,
baselines: Tuple[Tensor, ...],
inputs: Tuple[Tensor, ...],
target: TargetType,
additional_forward_args: Any,
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...], TargetType, Any]:
inp_bsz = inputs[0].shape[0]
base_bsz = baselines[0].shape[0]
expanded_inputs = tuple(
[
input.repeat_interleave(base_bsz, dim=0).requires_grad_()
for input in inputs
]
)
expanded_baselines = tuple(
[
baseline.repeat(
(inp_bsz,) + tuple([1] * (len(baseline.shape) - 1))
).requires_grad_()
for baseline in baselines
]
)
expanded_target = _expand_target(
target, base_bsz, expansion_type=ExpansionTypes.repeat_interleave
)
input_additional_args = (
_expand_additional_forward_args(
additional_forward_args,
base_bsz,
expansion_type=ExpansionTypes.repeat_interleave,
)
if additional_forward_args is not None
else None
)
return (
expanded_inputs,
expanded_baselines,
expanded_target,
input_additional_args,
)
def _compute_mean_across_baselines(
self, inp_bsz: int, base_bsz: int, attribution: Tensor
) -> Tensor:
# Average for multiple references
attr_shape: Tuple = (inp_bsz, base_bsz)
if len(attribution.shape) > 1:
attr_shape += attribution.shape[1:]
return torch.mean(attribution.view(attr_shape), dim=1, keepdim=False)
def nonlinear(
module: Module,
inputs: Tensor,
outputs: Tensor,
grad_input: Tensor,
grad_output: Tensor,
eps: float = 1e-10,
) -> Tensor:
r"""
grad_input: (dLoss / dprev_layer_out, dLoss / wij, dLoss / bij)
grad_output: (dLoss / dlayer_out)
https://github.com/pytorch/pytorch/issues/12331
"""
delta_in, delta_out = _compute_diffs(inputs, outputs)
new_grad_inp = torch.where(
abs(delta_in) < eps, grad_input, grad_output * delta_out / delta_in
)
return new_grad_inp
def softmax(
module: Module,
inputs: Tensor,
outputs: Tensor,
grad_input: Tensor,
grad_output: Tensor,
eps: float = 1e-10,
):
delta_in, delta_out = _compute_diffs(inputs, outputs)
grad_input_unnorm = torch.where(
abs(delta_in) < eps, grad_input, grad_output * delta_out / delta_in
)
# normalizing
n = grad_input.numel()
# updating only the first half
new_grad_inp = grad_input_unnorm - grad_input_unnorm.sum() * 1 / n
return new_grad_inp
def maxpool1d(
module: Module,
inputs: Tensor,
outputs: Tensor,
grad_input: Tensor,
grad_output: Tensor,
eps: float = 1e-10,
):
return maxpool(
module,
F.max_pool1d,
F.max_unpool1d,
inputs,
outputs,
grad_input,
grad_output,
eps=eps,
)
def maxpool2d(
module: Module,
inputs: Tensor,
outputs: Tensor,
grad_input: Tensor,
grad_output: Tensor,
eps: float = 1e-10,
):
return maxpool(
module,
F.max_pool2d,
F.max_unpool2d,
inputs,
outputs,
grad_input,
grad_output,
eps=eps,
)
def maxpool3d(
module: Module, inputs, outputs, grad_input, grad_output, eps: float = 1e-10
):
return maxpool(
module,
F.max_pool3d,
F.max_unpool3d,
inputs,
outputs,
grad_input,
grad_output,
eps=eps,
)
def maxpool(
module: Module,
pool_func: Callable,
unpool_func: Callable,
inputs,
outputs,
grad_input,
grad_output,
eps: float = 1e-10,
):
with torch.no_grad():
input, input_ref = inputs.chunk(2)
output, output_ref = outputs.chunk(2)
delta_in = input - input_ref
delta_in = torch.cat(2 * [delta_in])
# Extracts cross maximum between the outputs of maxpool for the
# actual inputs and its corresponding references. In case the delta outputs
# for the references are larger the method relies on the references and
# corresponding gradients to compute the multiplies and contributions.
delta_out_xmax = torch.max(output, output_ref)
delta_out = torch.cat([delta_out_xmax - output_ref, output - delta_out_xmax])
_, indices = pool_func(
module.input,
module.kernel_size,
module.stride,
module.padding,
module.dilation,
module.ceil_mode,
True,
)
grad_output_updated = grad_output
unpool_grad_out_delta, unpool_grad_out_ref_delta = torch.chunk(
unpool_func(
grad_output_updated * delta_out,
indices,
module.kernel_size,
module.stride,
module.padding,
list(cast(torch.Size, module.input.shape)),
),
2,
)
unpool_grad_out_delta = unpool_grad_out_delta + unpool_grad_out_ref_delta
unpool_grad_out_delta = torch.cat(2 * [unpool_grad_out_delta])
if grad_input.shape != inputs.shape:
raise AssertionError(
"A problem occurred during maxpool modul's backward pass. "
"The gradients with respect to inputs include only a "
"subset of inputs. More details about this issue can "
"be found here: "
"https://pytorch.org/docs/stable/"
"nn.html#torch.nn.Module.register_backward_hook "
"This can happen for example if you attribute to the outputs of a "
"MaxPool. As a workaround, please, attribute to the inputs of "
"the following layer."
)
new_grad_inp = torch.where(
abs(delta_in) < eps, grad_input[0], unpool_grad_out_delta / delta_in
)
return new_grad_inp
def _compute_diffs(inputs: Tensor, outputs: Tensor) -> Tuple[Tensor, Tensor]:
input, input_ref = inputs.chunk(2)
# if the model is a single non-linear module and we apply Rescale rule on it
# we might not be able to perform chunk-ing because the output of the module is
# usually being replaced by model output.
output, output_ref = outputs.chunk(2)
delta_in = input - input_ref
delta_out = output - output_ref
return torch.cat(2 * [delta_in]), torch.cat(2 * [delta_out])
SUPPORTED_NON_LINEAR = {
nn.ReLU: nonlinear,
nn.ELU: nonlinear,
nn.LeakyReLU: nonlinear,
nn.Sigmoid: nonlinear,
nn.Tanh: nonlinear,
nn.Softplus: nonlinear,
nn.MaxPool1d: maxpool1d,
nn.MaxPool2d: maxpool2d,
nn.MaxPool3d: maxpool3d,
nn.Softmax: softmax,
}
|
#!/usr/bin/env python3
from typing import Any, Callable
import torch
from captum._utils.common import _format_output, _format_tensor_into_tuples, _is_tuple
from captum._utils.gradient import (
apply_gradient_requirements,
undo_gradient_requirements,
)
from captum._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._utils.attribution import GradientAttribution
from captum.log import log_usage
class Saliency(GradientAttribution):
r"""
A baseline approach for computing input attribution. It returns
the gradients with respect to inputs. If `abs` is set to True, which is
the default, the absolute value of the gradients is returned.
More details about the approach can be found in the following paper:
https://arxiv.org/abs/1312.6034
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or
any modification of it.
"""
GradientAttribution.__init__(self, forward_func)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
abs: bool = True,
additional_forward_args: Any = None,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which saliency
is computed. If forward_func takes a single tensor
as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
abs (bool, optional): Returns absolute value of gradients if set
to True, otherwise returns the (signed) gradients if
False.
Default: True
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The gradients with respect to each input feature.
Attributions will always be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> # Generating random input with size 2x3x3x32
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Defining Saliency interpreter
>>> saliency = Saliency(net)
>>> # Computes saliency maps for class 3.
>>> attribution = saliency.attribute(input, target=3)
"""
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = _is_tuple(inputs)
inputs = _format_tensor_into_tuples(inputs)
gradient_mask = apply_gradient_requirements(inputs)
# No need to format additional_forward_args here.
# They are being formated in the `_run_forward` function in `common.py`
gradients = self.gradient_func(
self.forward_func, inputs, target, additional_forward_args
)
if abs:
attributions = tuple(torch.abs(gradient) for gradient in gradients)
else:
attributions = gradients
undo_gradient_requirements(inputs, gradient_mask)
return _format_output(is_inputs_tuple, attributions)
|
#!/usr/bin/env python3
from typing import Any, Callable, Generator, Tuple, Union
import torch
from captum._utils.models.linear_model import SkLearnLinearRegression
from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.lime import construct_feature_mask, Lime
from captum.attr._utils.common import _format_input_baseline
from captum.log import log_usage
from torch import Tensor
from torch.distributions.categorical import Categorical
class KernelShap(Lime):
r"""
Kernel SHAP is a method that uses the LIME framework to compute
Shapley Values. Setting the loss function, weighting kernel and
regularization terms appropriately in the LIME framework allows
theoretically obtaining Shapley Values more efficiently than
directly computing Shapley Values.
More information regarding this method and proof of equivalence
can be found in the original paper here:
https://arxiv.org/abs/1705.07874
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or
any modification of it.
"""
Lime.__init__(
self,
forward_func,
interpretable_model=SkLearnLinearRegression(),
similarity_func=self.kernel_shap_similarity_kernel,
perturb_func=self.kernel_shap_perturb_generator,
)
self.inf_weight = 1000000.0
@log_usage()
def attribute( # type: ignore
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
feature_mask: Union[None, Tensor, Tuple[Tensor, ...]] = None,
n_samples: int = 25,
perturbations_per_eval: int = 1,
return_input_shape: bool = True,
show_progress: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
This method attributes the output of the model with given target index
(in case it is provided, otherwise it assumes that output is a
scalar) to the inputs of the model using the approach described above,
training an interpretable model based on KernelSHAP and returning a
representation of the interpretable model.
It is recommended to only provide a single example as input (tensors
with first dimension or batch size = 1). This is because LIME / KernelShap
is generally used for sample-based interpretability, training a separate
interpretable model to explain a model's prediction on each individual example.
A batch of inputs can also be provided as inputs, similar to
other perturbation-based attribution methods. In this case, if forward_fn
returns a scalar per example, attributions will be computed for each
example independently, with a separate interpretable model trained for each
example. Note that provided similarity and perturbation functions will be
provided each example separately (first dimension = 1) in this case.
If forward_fn returns a scalar per batch (e.g. loss), attributions will
still be computed using a single interpretable model for the full batch.
In this case, similarity and perturbation functions will be provided the
same original input containing the full batch.
The number of interpretable features is determined from the provided
feature mask, or if none is provided, from the default feature mask,
which considers each scalar input as a separate feature. It is
generally recommended to provide a feature mask which groups features
into a small number of interpretable features / components (e.g.
superpixels in images).
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which KernelShap
is computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define the reference value which replaces each
feature when the corresponding interpretable feature
is set to 0.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which surrogate model is trained
(for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. It will be
repeated for each of `n_steps` along the integrated
path. For all other types, the given argument is used
for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
feature_mask (Tensor or tuple[Tensor, ...], optional):
feature_mask defines a mask for the input, grouping
features which correspond to the same
interpretable feature. feature_mask
should contain the same number of tensors as inputs.
Each tensor should
be the same size as the corresponding input or
broadcastable to match the input tensor. Values across
all tensors should be integers in the range 0 to
num_interp_features - 1, and indices corresponding to the
same feature should have the same value.
Note that features are grouped across tensors
(unlike feature ablation and occlusion), so
if the same index is used in different tensors, those
features are still grouped and added simultaneously.
If None, then a feature mask is constructed which assigns
each scalar within a tensor as a separate feature.
Default: None
n_samples (int, optional): The number of samples of the original
model used to train the surrogate interpretable model.
Default: `50` if `n_samples` is not provided.
perturbations_per_eval (int, optional): Allows multiple samples
to be processed simultaneously in one call to forward_fn.
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
If the forward function returns a single scalar per batch,
perturbations_per_eval must be set to 1.
Default: 1
return_input_shape (bool, optional): Determines whether the returned
tensor(s) only contain the coefficients for each interp-
retable feature from the trained surrogate model, or
whether the returned attributions match the input shape.
When return_input_shape is True, the return type of attribute
matches the input shape, with each element containing the
coefficient of the corresponding interpretable feature.
All elements with the same value in the feature mask
will contain the same coefficient in the returned
attributions. If return_input_shape is False, a 1D
tensor is returned, containing only the coefficients
of the trained interpretable model, with length
num_interp_features.
show_progress (bool, optional): Displays the progress of computation.
It will try to use tqdm if available for advanced features
(e.g. time estimation). Otherwise, it will fallback to
a simple output of progress.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The attributions with respect to each input feature.
If return_input_shape = True, attributions will be
the same size as the provided inputs, with each value
providing the coefficient of the corresponding
interpretale feature.
If return_input_shape is False, a 1D
tensor is returned, containing only the coefficients
of the trained interpreatable models, with length
num_interp_features.
Examples::
>>> # SimpleClassifier takes a single input tensor of size Nx4x4,
>>> # and returns an Nx3 tensor of class probabilities.
>>> net = SimpleClassifier()
>>> # Generating random input with size 1 x 4 x 4
>>> input = torch.randn(1, 4, 4)
>>> # Defining KernelShap interpreter
>>> ks = KernelShap(net)
>>> # Computes attribution, with each of the 4 x 4 = 16
>>> # features as a separate interpretable feature
>>> attr = ks.attribute(input, target=1, n_samples=200)
>>> # Alternatively, we can group each 2x2 square of the inputs
>>> # as one 'interpretable' feature and perturb them together.
>>> # This can be done by creating a feature mask as follows, which
>>> # defines the feature groups, e.g.:
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # With this mask, all inputs with the same value are set to their
>>> # baseline value, when the corresponding binary interpretable
>>> # feature is set to 0.
>>> # The attributions can be calculated as follows:
>>> # feature mask has dimensions 1 x 4 x 4
>>> feature_mask = torch.tensor([[[0,0,1,1],[0,0,1,1],
>>> [2,2,3,3],[2,2,3,3]]])
>>> # Computes KernelSHAP attributions with feature mask.
>>> attr = ks.attribute(input, target=1, feature_mask=feature_mask)
"""
formatted_inputs, baselines = _format_input_baseline(inputs, baselines)
feature_mask, num_interp_features = construct_feature_mask(
feature_mask, formatted_inputs
)
num_features_list = torch.arange(num_interp_features, dtype=torch.float)
denom = num_features_list * (num_interp_features - num_features_list)
probs = (num_interp_features - 1) / denom
probs[0] = 0.0
return self._attribute_kwargs(
inputs=inputs,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
feature_mask=feature_mask,
n_samples=n_samples,
perturbations_per_eval=perturbations_per_eval,
return_input_shape=return_input_shape,
num_select_distribution=Categorical(probs),
show_progress=show_progress,
)
def kernel_shap_similarity_kernel(
self, _, __, interpretable_sample: Tensor, **kwargs
) -> Tensor:
assert (
"num_interp_features" in kwargs
), "Must provide num_interp_features to use default similarity kernel"
num_selected_features = int(interpretable_sample.sum(dim=1).item())
num_features = kwargs["num_interp_features"]
if num_selected_features == 0 or num_selected_features == num_features:
# weight should be theoretically infinite when
# num_selected_features = 0 or num_features
# enforcing that trained linear model must satisfy
# end-point criteria. In practice, it is sufficient to
# make this weight substantially larger so setting this
# weight to 1000000 (all other weights are 1).
similarities = self.inf_weight
else:
similarities = 1.0
return torch.tensor([similarities])
def kernel_shap_perturb_generator(
self, original_inp: Union[Tensor, Tuple[Tensor, ...]], **kwargs
) -> Generator[Tensor, None, None]:
r"""
Perturbations are sampled by the following process:
- Choose k (number of selected features), based on the distribution
p(k) = (M - 1) / (k * (M - k))
where M is the total number of features in the interpretable space
- Randomly select a binary vector with k ones, each sample is equally
likely. This is done by generating a random vector of normal
values and thresholding based on the top k elements.
Since there are M choose k vectors with k ones, this weighted sampling
is equivalent to applying the Shapley kernel for the sample weight,
defined as:
k(M, k) = (M - 1) / (k * (M - k) * (M choose k))
"""
assert (
"num_select_distribution" in kwargs and "num_interp_features" in kwargs
), (
"num_select_distribution and num_interp_features are necessary"
" to use kernel_shap_perturb_func"
)
if isinstance(original_inp, Tensor):
device = original_inp.device
else:
device = original_inp[0].device
num_features = kwargs["num_interp_features"]
yield torch.ones(1, num_features, device=device, dtype=torch.long)
yield torch.zeros(1, num_features, device=device, dtype=torch.long)
while True:
num_selected_features = kwargs["num_select_distribution"].sample()
rand_vals = torch.randn(1, num_features)
threshold = torch.kthvalue(
rand_vals, num_features - num_selected_features
).values.item()
yield (rand_vals > threshold).to(device=device).long()
|
#!/usr/bin/env python3
from typing import Any, Callable
from captum._utils.common import _format_output, _format_tensor_into_tuples, _is_tuple
from captum._utils.gradient import (
apply_gradient_requirements,
undo_gradient_requirements,
)
from captum._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._utils.attribution import GradientAttribution
from captum.log import log_usage
class InputXGradient(GradientAttribution):
r"""
A baseline approach for computing the attribution. It multiplies input with
the gradient with respect to input.
https://arxiv.org/abs/1605.01713
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
"""
GradientAttribution.__init__(self, forward_func)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which
attributions are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
forward_func in order following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The input x gradient with
respect to each input feature. Attributions will always be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> # Generating random input with size 2x3x3x32
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Defining InputXGradient interpreter
>>> input_x_gradient = InputXGradient(net)
>>> # Computes inputXgradient for class 4.
>>> attribution = input_x_gradient.attribute(input, target=4)
"""
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = _is_tuple(inputs)
inputs = _format_tensor_into_tuples(inputs)
gradient_mask = apply_gradient_requirements(inputs)
gradients = self.gradient_func(
self.forward_func, inputs, target, additional_forward_args
)
attributions = tuple(
input * gradient for input, gradient in zip(inputs, gradients)
)
undo_gradient_requirements(inputs, gradient_mask)
return _format_output(is_inputs_tuple, attributions)
@property
def multiplies_by_inputs(self):
return True
|
#!/usr/bin/env python3
import typing
from typing import Any, Callable, List, Tuple, Union
import torch
from captum._utils.common import (
_expand_additional_forward_args,
_expand_target,
_format_additional_forward_args,
_format_output,
_is_tuple,
)
from captum._utils.typing import (
BaselineType,
Literal,
TargetType,
TensorOrTupleOfTensorsGeneric,
)
from captum.attr._utils.approximation_methods import approximation_parameters
from captum.attr._utils.attribution import GradientAttribution
from captum.attr._utils.batching import _batch_attribution
from captum.attr._utils.common import (
_format_input_baseline,
_reshape_and_sum,
_validate_input,
)
from captum.log import log_usage
from torch import Tensor
class IntegratedGradients(GradientAttribution):
r"""
Integrated Gradients is an axiomatic model interpretability algorithm that
assigns an importance score to each input feature by approximating the
integral of gradients of the model's output with respect to the inputs
along the path (straight line) from given baselines / references to inputs.
Baselines can be provided as input arguments to attribute method.
To approximate the integral we can choose to use either a variant of
Riemann sum or Gauss-Legendre quadrature rule.
More details regarding the integrated gradients method can be found in the
original paper:
https://arxiv.org/abs/1703.01365
"""
def __init__(
self,
forward_func: Callable,
multiply_by_inputs: bool = True,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in,
then that type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of integrated gradients, if `multiply_by_inputs`
is set to True, final sensitivity scores are being multiplied by
(inputs - baselines).
"""
GradientAttribution.__init__(self, forward_func)
self._multiply_by_inputs = multiply_by_inputs
# The following overloaded method signatures correspond to the case where
# return_convergence_delta is False, then only attributions are returned,
# and when return_convergence_delta is True, the return type is
# a tuple with both attributions and deltas.
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
internal_batch_size: Union[None, int] = None,
return_convergence_delta: Literal[False] = False,
) -> TensorOrTupleOfTensorsGeneric:
...
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
internal_batch_size: Union[None, int] = None,
*,
return_convergence_delta: Literal[True],
) -> Tuple[TensorOrTupleOfTensorsGeneric, Tensor]:
...
@log_usage()
def attribute( # type: ignore
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
internal_batch_size: Union[None, int] = None,
return_convergence_delta: bool = False,
) -> Union[
TensorOrTupleOfTensorsGeneric, Tuple[TensorOrTupleOfTensorsGeneric, Tensor]
]:
r"""
This method attributes the output of the model with given target index
(in case it is provided, otherwise it assumes that output is a
scalar) to the inputs of the model using the approach described above.
In addition to that it also returns, if `return_convergence_delta` is
set to True, integral approximation delta based on the completeness
property of integrated gradients.
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which integrated
gradients are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define the starting point from which integral
is computed and can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. It will be
repeated for each of `n_steps` along the integrated
path. For all other types, the given argument is used
for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
n_steps (int, optional): The number of steps used by the approximation
method. Default: 50.
method (str, optional): Method for approximating the integral,
one of `riemann_right`, `riemann_left`, `riemann_middle`,
`riemann_trapezoid` or `gausslegendre`.
Default: `gausslegendre` if no method is provided.
internal_batch_size (int, optional): Divides total #steps * #examples
data points into chunks of size at most internal_batch_size,
which are computed (forward / backward passes)
sequentially. internal_batch_size must be at least equal to
#examples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain internal_batch_size / num_devices examples.
If internal_batch_size is None, then all evaluations are
processed in one batch.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Integrated gradients with respect to each input feature.
attributions will always be the same size as the provided
inputs, with each value providing the attribution of the
corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
- **delta** (*Tensor*, returned if return_convergence_delta=True):
The difference between the total approximated and true
integrated gradients. This is computed using the property
that the total sum of forward_func(inputs) -
forward_func(baselines) must equal the total sum of the
integrated gradient.
Delta is calculated per example, meaning that the number of
elements in returned delta tensor is equal to the number of
examples in inputs.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> ig = IntegratedGradients(net)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes integrated gradients for class 3.
>>> attribution = ig.attribute(input, target=3)
"""
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = _is_tuple(inputs)
inputs, baselines = _format_input_baseline(inputs, baselines)
_validate_input(inputs, baselines, n_steps, method)
if internal_batch_size is not None:
num_examples = inputs[0].shape[0]
attributions = _batch_attribution(
self,
num_examples,
internal_batch_size,
n_steps,
inputs=inputs,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
method=method,
)
else:
attributions = self._attribute(
inputs=inputs,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
n_steps=n_steps,
method=method,
)
if return_convergence_delta:
start_point, end_point = baselines, inputs
# computes approximation error based on the completeness axiom
delta = self.compute_convergence_delta(
attributions,
start_point,
end_point,
additional_forward_args=additional_forward_args,
target=target,
)
return _format_output(is_inputs_tuple, attributions), delta
return _format_output(is_inputs_tuple, attributions)
def _attribute(
self,
inputs: Tuple[Tensor, ...],
baselines: Tuple[Union[Tensor, int, float], ...],
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
step_sizes_and_alphas: Union[None, Tuple[List[float], List[float]]] = None,
) -> Tuple[Tensor, ...]:
if step_sizes_and_alphas is None:
# retrieve step size and scaling factor for specified
# approximation method
step_sizes_func, alphas_func = approximation_parameters(method)
step_sizes, alphas = step_sizes_func(n_steps), alphas_func(n_steps)
else:
step_sizes, alphas = step_sizes_and_alphas
# scale features and compute gradients. (batch size is abbreviated as bsz)
# scaled_features' dim -> (bsz * #steps x inputs[0].shape[1:], ...)
scaled_features_tpl = tuple(
torch.cat(
[baseline + alpha * (input - baseline) for alpha in alphas], dim=0
).requires_grad_()
for input, baseline in zip(inputs, baselines)
)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
# apply number of steps to additional forward args
# currently, number of steps is applied only to additional forward arguments
# that are nd-tensors. It is assumed that the first dimension is
# the number of batches.
# dim -> (bsz * #steps x additional_forward_args[0].shape[1:], ...)
input_additional_args = (
_expand_additional_forward_args(additional_forward_args, n_steps)
if additional_forward_args is not None
else None
)
expanded_target = _expand_target(target, n_steps)
# grads: dim -> (bsz * #steps x inputs[0].shape[1:], ...)
grads = self.gradient_func(
forward_fn=self.forward_func,
inputs=scaled_features_tpl,
target_ind=expanded_target,
additional_forward_args=input_additional_args,
)
# flattening grads so that we can multilpy it with step-size
# calling contiguous to avoid `memory whole` problems
scaled_grads = [
grad.contiguous().view(n_steps, -1)
* torch.tensor(step_sizes).view(n_steps, 1).to(grad.device)
for grad in grads
]
# aggregates across all steps for each tensor in the input tuple
# total_grads has the same dimensionality as inputs
total_grads = tuple(
_reshape_and_sum(
scaled_grad, n_steps, grad.shape[0] // n_steps, grad.shape[1:]
)
for (scaled_grad, grad) in zip(scaled_grads, grads)
)
# computes attribution for each tensor in input tuple
# attributions has the same dimensionality as inputs
if not self.multiplies_by_inputs:
attributions = total_grads
else:
attributions = tuple(
total_grad * (input - baseline)
for total_grad, input, baseline in zip(total_grads, inputs, baselines)
)
return attributions
def has_convergence_delta(self) -> bool:
return True
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Tuple, Union
from captum._utils.common import (
_format_additional_forward_args,
_format_output,
_format_tensor_into_tuples,
_is_tuple,
)
from captum._utils.gradient import (
_forward_layer_eval_with_neuron_grads,
apply_gradient_requirements,
undo_gradient_requirements,
)
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution
from captum.log import log_usage
from torch.nn import Module
class NeuronGradient(NeuronAttribution, GradientAttribution):
r"""
Computes the gradient of the output of a particular neuron with
respect to the inputs of the network.
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
layer (torch.nn.Module): Layer for which attributions are computed.
Output size of attribute matches this layer's input or
output dimensions, depending on whether we attribute to
the inputs or outputs of the layer, corresponding to
attribution of each neuron in the input or output of
this layer.
Currently, it is assumed that the inputs or the outputs
of the layer, depending on which one is used for
attribution, can only be a single tensor.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
NeuronAttribution.__init__(self, forward_func, layer, device_ids)
GradientAttribution.__init__(self, forward_func)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
additional_forward_args: Any = None,
attribute_to_neuron_input: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which neuron
gradients are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
neuron_selector (int, Callable, tuple[int], or slice):
Selector for neuron
in given layer for which attribution is desired.
Neuron selector can be provided as:
- a single integer, if the layer output is 2D. This integer
selects the appropriate neuron column in the layer input
or output
- a tuple of integers or slice objects. Length of this
tuple must be one less than the number of dimensions
in the input / output of the given layer (since
dimension 0 corresponds to number of examples).
The elements of the tuple can be either integers or
slice objects (slice object allows indexing a
range of neurons rather individual ones).
If any of the tuple elements is a slice object, the
indexed output tensor is used for attribution. Note
that specifying a slice of a tensor would amount to
computing the attribution of the sum of the specified
neurons, and not the individual neurons independently.
- a callable, which should
take the target layer as input (single tensor or tuple
if multiple tensors are in layer) and return a neuron or
aggregate of the layer's neurons for attribution.
For example, this function could return the
sum of the neurons in the layer or sum of neurons with
activations in a particular range. It is expected that
this function returns either a tensor with one element
or a 1D tensor with length equal to batch_size (one scalar
per input example)
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
attribute_to_neuron_input (bool, optional): Indicates whether to
compute the attributions with respect to the neuron input
or output. If `attribute_to_neuron_input` is set to True
then the attributions will be computed with respect to
neuron's inputs, otherwise it will be computed with respect
to neuron's outputs.
Note that currently it is assumed that either the input
or the output of internal neurons, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Gradients of particular neuron with respect to each input
feature. Attributions will always be the same size as the
provided inputs, with each value providing the attribution
of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x32x32.
>>> net = ImageClassifier()
>>> neuron_ig = NeuronGradient(net, net.conv1)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # To compute neuron attribution, we need to provide the neuron
>>> # index for which attribution is desired. Since the layer output
>>> # is Nx12x32x32, we need a tuple in the form (0..11,0..31,0..31)
>>> # which indexes a particular neuron in the layer output.
>>> # For this example, we choose the index (4,1,2).
>>> # Computes neuron gradient for neuron with
>>> # index (4,1,2).
>>> attribution = neuron_ig.attribute(input, (4,1,2))
"""
is_inputs_tuple = _is_tuple(inputs)
inputs = _format_tensor_into_tuples(inputs)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
gradient_mask = apply_gradient_requirements(inputs)
_, input_grads = _forward_layer_eval_with_neuron_grads(
self.forward_func,
inputs,
self.layer,
additional_forward_args,
gradient_neuron_selector=neuron_selector,
device_ids=self.device_ids,
attribute_to_layer_input=attribute_to_neuron_input,
)
undo_gradient_requirements(inputs, gradient_mask)
return _format_output(is_inputs_tuple, input_grads)
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Tuple, Union
from captum._utils.gradient import construct_neuron_grad_fn
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._core.guided_backprop_deconvnet import Deconvolution, GuidedBackprop
from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution
from captum.log import log_usage
from torch.nn import Module
class NeuronDeconvolution(NeuronAttribution, GradientAttribution):
r"""
Computes attribution of the given neuron using deconvolution.
Deconvolution computes the gradient of the target output with
respect to the input, but gradients of ReLU functions are overridden so
that the gradient of the ReLU input is simply computed taking ReLU of
the output gradient, essentially only propagating non-negative gradients
(without dependence on the sign of the ReLU input).
More details regarding the deconvolution algorithm can be found
in these papers:
https://arxiv.org/abs/1311.2901
https://link.springer.com/chapter/10.1007/978-3-319-46466-4_8
Warning: Ensure that all ReLU operations in the forward function of the
given model are performed using a module (nn.module.ReLU).
If nn.functional.ReLU is used, gradients are not overridden appropriately.
"""
def __init__(
self, model: Module, layer: Module, device_ids: Union[None, List[int]] = None
) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
layer (Module): Layer for which attributions are computed.
Output size of attribute matches this layer's input or
output dimensions, depending on whether we attribute to
the inputs or outputs of the layer, corresponding to
attribution of each neuron in the input or output of
this layer.
Currently, it is assumed that the inputs or the outputs
of the layer, depending on which one is used for
attribution, can only be a single tensor.
device_ids (list[int]): Device ID list, necessary only if model
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If model is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
NeuronAttribution.__init__(self, model, layer, device_ids)
GradientAttribution.__init__(self, model)
self.deconv = Deconvolution(model)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
additional_forward_args: Any = None,
attribute_to_neuron_input: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which
attributions are computed. If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
neuron_selector (int, Callable, tuple[int], or slice):
Selector for neuron
in given layer for which attribution is desired.
Neuron selector can be provided as:
- a single integer, if the layer output is 2D. This integer
selects the appropriate neuron column in the layer input
or output
- a tuple of integers or slice objects. Length of this
tuple must be one less than the number of dimensions
in the input / output of the given layer (since
dimension 0 corresponds to number of examples).
The elements of the tuple can be either integers or
slice objects (slice object allows indexing a
range of neurons rather individual ones).
If any of the tuple elements is a slice object, the
indexed output tensor is used for attribution. Note
that specifying a slice of a tensor would amount to
computing the attribution of the sum of the specified
neurons, and not the individual neurons independently.
- a callable, which should
take the target layer as input (single tensor or tuple
if multiple tensors are in layer) and return a neuron or
aggregate of the layer's neurons for attribution.
For example, this function could return the
sum of the neurons in the layer or sum of neurons with
activations in a particular range. It is expected that
this function returns either a tensor with one element
or a 1D tensor with length equal to batch_size (one scalar
per input example)
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
attribute_to_neuron_input (bool, optional): Indicates whether to
compute the attributions with respect to the neuron input
or output. If `attribute_to_neuron_input` is set to True
then the attributions will be computed with respect to
neuron's inputs, otherwise it will be computed with respect
to neuron's outputs.
Note that currently it is assumed that either the input
or the output of internal neuron, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Deconvolution attribution of
particular neuron with respect to each input feature.
Attributions will always be the same size as the provided
inputs, with each value providing the attribution of the
corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x32x32.
>>> net = ImageClassifier()
>>> neuron_deconv = NeuronDeconvolution(net, net.conv1)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # To compute neuron attribution, we need to provide the neuron
>>> # index for which attribution is desired. Since the layer output
>>> # is Nx12x32x32, we need a tuple in the form (0..11,0..31,0..31)
>>> # which indexes a particular neuron in the layer output.
>>> # For this example, we choose the index (4,1,2).
>>> # Computes neuron deconvolution for neuron with
>>> # index (4,1,2).
>>> attribution = neuron_deconv.attribute(input, (4,1,2))
"""
self.deconv.gradient_func = construct_neuron_grad_fn(
self.layer, neuron_selector, self.device_ids, attribute_to_neuron_input
)
# NOTE: using __wrapped__ to not log
return self.deconv.attribute.__wrapped__(
self.deconv, inputs, None, additional_forward_args
)
class NeuronGuidedBackprop(NeuronAttribution, GradientAttribution):
r"""
Computes attribution of the given neuron using guided backpropagation.
Guided backpropagation computes the gradient of the target neuron
with respect to the input, but gradients of ReLU functions are overridden
so that only non-negative gradients are backpropagated.
More details regarding the guided backpropagation algorithm can be found
in the original paper here:
https://arxiv.org/abs/1412.6806
Warning: Ensure that all ReLU operations in the forward function of the
given model are performed using a module (nn.module.ReLU).
If nn.functional.ReLU is used, gradients are not overridden appropriately.
"""
def __init__(
self, model: Module, layer: Module, device_ids: Union[None, List[int]] = None
) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
layer (Module): Layer for which neuron attributions are computed.
Attributions for a particular neuron in the output of
this layer are computed using the argument neuron_selector
in the attribute method.
Currently, only layers with a single tensor output are
supported.
device_ids (list[int]): Device ID list, necessary only if model
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If model is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
NeuronAttribution.__init__(self, model, layer, device_ids)
GradientAttribution.__init__(self, model)
self.guided_backprop = GuidedBackprop(model)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
additional_forward_args: Any = None,
attribute_to_neuron_input: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which
attributions are computed. If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
neuron_selector (int, Callable, tuple[int], or slice):
Selector for neuron
in given layer for which attribution is desired.
Neuron selector can be provided as:
- a single integer, if the layer output is 2D. This integer
selects the appropriate neuron column in the layer input
or output
- a tuple of integers or slice objects. Length of this
tuple must be one less than the number of dimensions
in the input / output of the given layer (since
dimension 0 corresponds to number of examples).
The elements of the tuple can be either integers or
slice objects (slice object allows indexing a
range of neurons rather individual ones).
If any of the tuple elements is a slice object, the
indexed output tensor is used for attribution. Note
that specifying a slice of a tensor would amount to
computing the attribution of the sum of the specified
neurons, and not the individual neurons independently.
- a callable, which should
take the target layer as input (single tensor or tuple
if multiple tensors are in layer) and return a neuron or
aggregate of the layer's neurons for attribution.
For example, this function could return the
sum of the neurons in the layer or sum of neurons with
activations in a particular range. It is expected that
this function returns either a tensor with one element
or a 1D tensor with length equal to batch_size (one scalar
per input example)
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
attribute_to_neuron_input (bool, optional): Indicates whether to
compute the attributions with respect to the neuron input
or output. If `attribute_to_neuron_input` is set to True
then the attributions will be computed with respect to
neuron's inputs, otherwise it will be computed with respect
to neuron's outputs.
Note that currently it is assumed that either the input
or the output of internal neurons, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Guided backprop attribution of
particular neuron with respect to each input feature.
Attributions will always be the same size as the provided
inputs, with each value providing the attribution of the
corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x32x32.
>>> net = ImageClassifier()
>>> neuron_gb = NeuronGuidedBackprop(net, net.conv1)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # To compute neuron attribution, we need to provide the neuron
>>> # index for which attribution is desired. Since the layer output
>>> # is Nx12x32x32, we need a tuple in the form (0..11,0..31,0..31)
>>> # which indexes a particular neuron in the layer output.
>>> # For this example, we choose the index (4,1,2).
>>> # Computes neuron guided backpropagation for neuron with
>>> # index (4,1,2).
>>> attribution = neuron_gb.attribute(input, (4,1,2))
"""
self.guided_backprop.gradient_func = construct_neuron_grad_fn(
self.layer, neuron_selector, self.device_ids, attribute_to_neuron_input
)
# NOTE: using __wrapped__ to not log
return self.guided_backprop.attribute.__wrapped__(
self.guided_backprop, inputs, None, additional_forward_args
)
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Tuple, Union
import torch
from captum._utils.common import _verify_select_neuron
from captum._utils.gradient import _forward_layer_eval
from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.feature_ablation import FeatureAblation
from captum.attr._utils.attribution import NeuronAttribution, PerturbationAttribution
from captum.log import log_usage
from torch.nn import Module
class NeuronFeatureAblation(NeuronAttribution, PerturbationAttribution):
r"""
A perturbation based approach to computing neuron attribution,
involving replacing each input feature with a given baseline /
reference, and computing the difference in the neuron's input / output.
By default, each scalar value within
each input tensor is taken as a feature and replaced independently. Passing
a feature mask, allows grouping features to be ablated together. This can
be used in cases such as images, where an entire segment or region
can be ablated, measuring the importance of the segment (feature group).
Each input scalar in the group will be given the same attribution value
equal to the change in target as a result of ablating the entire feature
group.
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
layer (torch.nn.Module): Layer for which attributions are computed.
Attributions for a particular neuron in the input or output
of this layer are computed using the argument neuron_selector
in the attribute method.
Currently, it is assumed that the inputs or the outputs
of the layer, depending on which one is used for
attribution, can only be a single tensor.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
NeuronAttribution.__init__(self, forward_func, layer, device_ids)
PerturbationAttribution.__init__(self, forward_func)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
baselines: BaselineType = None,
additional_forward_args: Any = None,
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
attribute_to_neuron_input: bool = False,
perturbations_per_eval: int = 1,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which neuron
attributions are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
neuron_selector (int, Callable, tuple[int], or slice):
Selector for neuron
in given layer for which attribution is desired.
Neuron selector can be provided as:
- a single integer, if the layer output is 2D. This integer
selects the appropriate neuron column in the layer input
or output
- a tuple of integers or slice objects. Length of this
tuple must be one less than the number of dimensions
in the input / output of the given layer (since
dimension 0 corresponds to number of examples).
The elements of the tuple can be either integers or
slice objects (slice object allows indexing a
range of neurons rather individual ones).
If any of the tuple elements is a slice object, the
indexed output tensor is used for attribution. Note
that specifying a slice of a tensor would amount to
computing the attribution of the sum of the specified
neurons, and not the individual neurons independently.
- a callable, which should
take the target layer as input (single tensor or tuple
if multiple tensors are in layer) and return a neuron or
aggregate of the layer's neurons for attribution.
For example, this function could return the
sum of the neurons in the layer or sum of neurons with
activations in a particular range. It is expected that
this function returns either a tensor with one element
or a 1D tensor with length equal to batch_size (one scalar
per input example)
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define reference value which replaces each
feature when ablated.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or
broadcastable to match the dimensions of inputs
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
feature_mask (Tensor or tuple[Tensor, ...], optional):
feature_mask defines a mask for the input, grouping
features which should be ablated together. feature_mask
should contain the same number of tensors as inputs.
Each tensor should
be the same size as the corresponding input or
broadcastable to match the input tensor. Each tensor
should contain integers in the range 0 to num_features
- 1, and indices corresponding to the same feature should
have the same value.
Note that features within each input tensor are ablated
independently (not across tensors).
If None, then a feature mask is constructed which assigns
each scalar within a tensor as a separate feature, which
is ablated independently.
Default: None
attribute_to_neuron_input (bool, optional): Indicates whether to
compute the attributions with respect to the neuron input
or output. If `attribute_to_neuron_input` is set to True
then the attributions will be computed with respect to
neuron's inputs, otherwise it will be computed with respect
to neuron's outputs.
Note that currently it is assumed that either the input
or the output of internal neurons, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
perturbations_per_eval (int, optional): Allows ablation of multiple
features to be processed simultaneously in one call to
forward_fn.
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
Default: 1
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attributions of particular neuron with respect to each input
feature. Attributions will always be the same size as the
provided inputs, with each value providing the attribution
of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # SimpleClassifier takes a single input tensor of size Nx4x4,
>>> # and returns an Nx3 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x3x3.
>>> net = SimpleClassifier()
>>> # Generating random input with size 2 x 4 x 4
>>> input = torch.randn(2, 4, 4)
>>> # Defining NeuronFeatureAblation interpreter
>>> ablator = NeuronFeatureAblation(net, net.conv1)
>>> # To compute neuron attribution, we need to provide the neuron
>>> # index for which attribution is desired. Since the layer output
>>> # is Nx12x3x3, we need a tuple in the form (0..11,0..2,0..2)
>>> # which indexes a particular neuron in the layer output.
>>> # For this example, we choose the index (4,1,2).
>>> # Computes neuron gradient for neuron with
>>> # index (4,1,2).
>>> # Computes ablation attribution, ablating each of the 16
>>> # scalar inputs independently.
>>> attr = ablator.attribute(input, neuron_selector=(4,1,2))
>>> # Alternatively, we may want to ablate features in groups, e.g.
>>> # grouping each 2x2 square of the inputs and ablating them together.
>>> # This can be done by creating a feature mask as follows, which
>>> # defines the feature groups, e.g.:
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # With this mask, all inputs with the same value are ablated
>>> # simultaneously, and the attribution for each input in the same
>>> # group (0, 1, 2, and 3) per example are the same.
>>> # The attributions can be calculated as follows:
>>> # feature mask has dimensions 1 x 4 x 4
>>> feature_mask = torch.tensor([[[0,0,1,1],[0,0,1,1],
>>> [2,2,3,3],[2,2,3,3]]])
>>> attr = ablator.attribute(input, neuron_selector=(4,1,2),
>>> feature_mask=feature_mask)
"""
def neuron_forward_func(*args: Any):
with torch.no_grad():
layer_eval = _forward_layer_eval(
self.forward_func,
args,
self.layer,
device_ids=self.device_ids,
attribute_to_layer_input=attribute_to_neuron_input,
)
return _verify_select_neuron(layer_eval, neuron_selector)
ablator = FeatureAblation(neuron_forward_func)
# NOTE: using __wrapped__ to not log
return ablator.attribute.__wrapped__(
ablator, # self
inputs,
baselines=baselines,
additional_forward_args=additional_forward_args,
feature_mask=feature_mask,
perturbations_per_eval=perturbations_per_eval,
)
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Tuple, Union
from captum._utils.gradient import construct_neuron_grad_fn
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._core.gradient_shap import GradientShap
from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution
from captum.log import log_usage
from torch.nn import Module
class NeuronGradientShap(NeuronAttribution, GradientAttribution):
r"""
Implements gradient SHAP for a neuron in a hidden layer based on the
implementation from SHAP's primary author. For reference, please, view:
https://github.com/slundberg/shap\
#deep-learning-example-with-gradientexplainer-tensorflowkeraspytorch-models
A Unified Approach to Interpreting Model Predictions
https://papers.nips.cc/paper\
7062-a-unified-approach-to-interpreting-model-predictions
GradientShap approximates SHAP values by computing the expectations of
gradients by randomly sampling from the distribution of baselines/references.
It adds white noise to each input sample `n_samples` times, selects a
random baseline from baselines' distribution and a random point along the
path between the baseline and the input, and computes the gradient of the
neuron with index `neuron_selector` with respect to those selected random
points. The final SHAP values represent the expected values of
`gradients * (inputs - baselines)`.
GradientShap makes an assumption that the input features are independent
and that the explanation model is linear, meaning that the explanations
are modeled through the additive composition of feature effects.
Under those assumptions, SHAP value can be approximated as the expectation
of gradients that are computed for randomly generated `n_samples` input
samples after adding gaussian noise `n_samples` times to each input for
different baselines/references.
In some sense it can be viewed as an approximation of integrated gradients
by computing the expectations of gradients for different baselines.
Current implementation uses Smoothgrad from :class:`.NoiseTunnel` in order to
randomly draw samples from the distribution of baselines, add noise to input
samples and compute the expectation (smoothgrad).
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
multiply_by_inputs: bool = True,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
layer (torch.nn.Module): Layer for which neuron attributions are computed.
The output size of the attribute method matches the
dimensions of the inputs or outputs of the neuron with
index `neuron_selector` in this layer, depending on whether
we attribute to the inputs or outputs of the neuron.
Currently, it is assumed that the inputs or the outputs
of the neurons in this layer, depending on which one is
used for attribution, can only be a single tensor.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in
then that type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of Neuron Gradient SHAP,
if `multiply_by_inputs` is set to True, the
sensitivity scores for scaled inputs are
being multiplied by (inputs - baselines).
"""
NeuronAttribution.__init__(self, forward_func, layer, device_ids)
GradientAttribution.__init__(self, forward_func)
self._multiply_by_inputs = multiply_by_inputs
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
baselines: Union[
TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric]
],
n_samples: int = 5,
stdevs: float = 0.0,
additional_forward_args: Any = None,
attribute_to_neuron_input: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which SHAP attribution
values are computed. If `forward_func` takes a single
tensor as input, a single input tensor should be provided.
If `forward_func` takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
neuron_selector (int, Callable, tuple[int], or slice):
Selector for neuron
in given layer for which attribution is desired.
Neuron selector can be provided as:
- a single integer, if the layer output is 2D. This integer
selects the appropriate neuron column in the layer input
or output
- a tuple of integers or slice objects. Length of this
tuple must be one less than the number of dimensions
in the input / output of the given layer (since
dimension 0 corresponds to number of examples).
The elements of the tuple can be either integers or
slice objects (slice object allows indexing a
range of neurons rather individual ones).
If any of the tuple elements is a slice object, the
indexed output tensor is used for attribution. Note
that specifying a slice of a tensor would amount to
computing the attribution of the sum of the specified
neurons, and not the individual neurons independently.
- a callable, which should
take the target layer as input (single tensor or tuple
if multiple tensors are in layer) and return a neuron or
aggregate of the layer's neurons for attribution.
For example, this function could return the
sum of the neurons in the layer or sum of neurons with
activations in a particular range. It is expected that
this function returns either a tensor with one element
or a 1D tensor with length equal to batch_size (one scalar
per input example)
baselines (Tensor, tuple[Tensor, ...], or Callable):
Baselines define the starting point from which expectation
is computed and can be provided as:
- a single tensor, if inputs is a single tensor, with
the first dimension equal to the number of examples
in the baselines' distribution. The remaining dimensions
must match with input tensor's dimension starting from
the second dimension.
- a tuple of tensors, if inputs is a tuple of tensors,
with the first dimension of any tensor inside the tuple
equal to the number of examples in the baseline's
distribution. The remaining dimensions must match
the dimensions of the corresponding input tensor
starting from the second dimension.
- callable function, optionally takes `inputs` as an
argument and either returns a single tensor
or a tuple of those.
It is recommended that the number of samples in the baselines'
tensors is larger than one.
n_samples (int, optional): The number of randomly generated examples
per sample in the input batch. Random examples are
generated by adding gaussian random noise to each sample.
Default: `5` if `n_samples` is not provided.
stdevs (float or tuple of float, optional): The standard deviation
of gaussian noise with zero mean that is added to each
input in the batch. If `stdevs` is a single float value
then that same value is used for all inputs. If it is
a tuple, then it must have the same length as the inputs
tuple. In this case, each stdev value in the stdevs tuple
corresponds to the input with the same index in the inputs
tuple.
Default: 0.0
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It can contain a tuple of ND tensors or
any arbitrary python type of any shape.
In case of the ND tensor the first dimension of the
tensor must correspond to the batch size. It will be
repeated for each `n_steps` for each randomly generated
input sample.
Note that the gradients are not computed with respect
to these arguments.
Default: None
attribute_to_neuron_input (bool, optional): Indicates whether to
compute the attributions with respect to the neuron input
or output. If `attribute_to_neuron_input` is set to True
then the attributions will be computed with respect to
neuron's inputs, otherwise it will be computed with respect
to neuron's outputs.
Note that currently it is assumed that either the input
or the output of internal neuron, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attribution score computed based on GradientSHAP with respect
to each input feature. Attributions will always be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> neuron_grad_shap = NeuronGradientShap(net, net.linear2)
>>> input = torch.randn(3, 3, 32, 32, requires_grad=True)
>>> # choosing baselines randomly
>>> baselines = torch.randn(20, 3, 32, 32)
>>> # Computes gradient SHAP of first neuron in linear2 layer
>>> # with respect to the input's of the network.
>>> # Attribution size matches input size: 3x3x32x32
>>> attribution = neuron_grad_shap.attribute(input, neuron_ind=0
baselines)
"""
gs = GradientShap(self.forward_func, self.multiplies_by_inputs)
gs.gradient_func = construct_neuron_grad_fn(
self.layer,
neuron_selector,
self.device_ids,
attribute_to_neuron_input=attribute_to_neuron_input,
)
# NOTE: using __wrapped__ to not log
return gs.attribute.__wrapped__( # type: ignore
gs, # self
inputs,
baselines,
n_samples=n_samples,
stdevs=stdevs,
additional_forward_args=additional_forward_args,
)
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
|
#!/usr/bin/env python3
from typing import Any, Callable, cast, Tuple, Union
from captum._utils.gradient import construct_neuron_grad_fn
from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.deep_lift import DeepLift, DeepLiftShap
from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
class NeuronDeepLift(NeuronAttribution, GradientAttribution):
r"""
Implements DeepLIFT algorithm for the neuron based on the following paper:
Learning Important Features Through Propagating Activation Differences,
Avanti Shrikumar, et. al.
https://arxiv.org/abs/1704.02685
and the gradient formulation proposed in:
Towards better understanding of gradient-based attribution methods for
deep neural networks, Marco Ancona, et.al.
https://openreview.net/pdf?id=Sy21R9JAW
This implementation supports only Rescale rule. RevealCancel rule will
be supported in later releases.
Although DeepLIFT's(Rescale Rule) attribution quality is comparable with
Integrated Gradients, it runs significantly faster than Integrated
Gradients and is preferred for large datasets.
Currently we only support a limited number of non-linear activations
but the plan is to expand the list in the future.
Note: As we know, currently we cannot access the building blocks,
of PyTorch's built-in LSTM, RNNs and GRUs such as Tanh and Sigmoid.
Nonetheless, it is possible to build custom LSTMs, RNNS and GRUs
with performance similar to built-in ones using TorchScript.
More details on how to build custom RNNs can be found here:
https://pytorch.org/blog/optimizing-cuda-rnn-with-torchscript/
"""
def __init__(
self, model: Module, layer: Module, multiply_by_inputs: bool = True
) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
layer (torch.nn.Module): Layer for which neuron attributions are computed.
Attributions for a particular neuron for the input or output
of this layer are computed using the argument neuron_selector
in the attribute method.
Currently, it is assumed that the inputs or the outputs
of the layer, depending on which one is used for
attribution, can only be a single tensor.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in
then that type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of Neuron DeepLift, if `multiply_by_inputs`
is set to True, final sensitivity scores
are being multiplied by (inputs - baselines).
This flag applies only if `custom_attribution_func` is
set to None.
"""
NeuronAttribution.__init__(self, model, layer)
GradientAttribution.__init__(self, model)
self._multiply_by_inputs = multiply_by_inputs
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
baselines: BaselineType = None,
additional_forward_args: Any = None,
attribute_to_neuron_input: bool = False,
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which layer
attributions are computed. If model takes a
single tensor as input, a single input tensor should be
provided. If model takes multiple tensors as input,
a tuple of the input tensors should be provided. It is
assumed that for all given input tensors, dimension 0
corresponds to the number of examples (aka batch size),
and if multiple input tensors are provided, the examples
must be aligned appropriately.
neuron_selector (int, Callable, tuple[int], or slice):
Selector for neuron
in given layer for which attribution is desired.
Neuron selector can be provided as:
- a single integer, if the layer output is 2D. This integer
selects the appropriate neuron column in the layer input
or output
- a tuple of integers or slice objects. Length of this
tuple must be one less than the number of dimensions
in the input / output of the given layer (since
dimension 0 corresponds to number of examples).
The elements of the tuple can be either integers or
slice objects (slice object allows indexing a
range of neurons rather individual ones).
If any of the tuple elements is a slice object, the
indexed output tensor is used for attribution. Note
that specifying a slice of a tensor would amount to
computing the attribution of the sum of the specified
neurons, and not the individual neurons independently.
- a callable, which should
take the target layer as input (single tensor or tuple
if multiple tensors are in layer) and return a neuron or
aggregate of the layer's neurons for attribution.
For example, this function could return the
sum of the neurons in the layer or sum of neurons with
activations in a particular range. It is expected that
this function returns either a tensor with one element
or a 1D tensor with length equal to batch_size (one scalar
per input example)
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define reference samples that are compared with
the inputs. In order to assign attribution scores DeepLift
computes the differences between the inputs/outputs and
corresponding references.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided
to model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
attribute_to_neuron_input (bool, optional): Indicates whether to
compute the attributions with respect to the neuron input
or output. If `attribute_to_neuron_input` is set to True
then the attributions will be computed with respect to
neuron's inputs, otherwise it will be computed with respect
to neuron's outputs.
Note that currently it is assumed that either the input
or the output of internal neuron, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
custom_attribution_func (Callable, optional): A custom function for
computing final attribution scores. This function can take
at least one and at most three arguments with the
following signature:
- custom_attribution_func(multipliers)
- custom_attribution_func(multipliers, inputs)
- custom_attribution_func(multipliers, inputs, baselines)
In case this function is not provided, we use the default
logic defined as: multipliers * (inputs - baselines)
It is assumed that all input arguments, `multipliers`,
`inputs` and `baselines` are provided in tuples of same
length. `custom_attribution_func` returns a tuple of
attribution tensors that have the same length as the
`inputs`.
Default: None
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Computes attributions using Deeplift's rescale rule for
particular neuron with respect to each input feature.
Attributions will always be the same size as the provided
inputs, with each value providing the attribution of the
corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> # creates an instance of LayerDeepLift to interpret target
>>> # class 1 with respect to conv4 layer.
>>> dl = NeuronDeepLift(net, net.conv4)
>>> input = torch.randn(1, 3, 32, 32, requires_grad=True)
>>> # Computes deeplift attribution scores for conv4 layer and neuron
>>> # index (4,1,2).
>>> attribution = dl.attribute(input, (4,1,2))
"""
dl = DeepLift(cast(Module, self.forward_func), self.multiplies_by_inputs)
dl.gradient_func = construct_neuron_grad_fn(
self.layer,
neuron_selector,
attribute_to_neuron_input=attribute_to_neuron_input,
)
# NOTE: using __wrapped__ to not log
return dl.attribute.__wrapped__( # type: ignore
dl, # self
inputs,
baselines,
additional_forward_args=additional_forward_args,
custom_attribution_func=custom_attribution_func,
)
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
class NeuronDeepLiftShap(NeuronAttribution, GradientAttribution):
r"""
Extends NeuronAttribution and uses LayerDeepLiftShap algorithms and
approximates SHAP values for given input `layer` and `neuron_selector`.
For each input sample - baseline pair it computes DeepLift attributions
with respect to inputs or outputs of given `layer` and `neuron_selector`
averages resulting attributions across baselines. Whether to compute the
attributions with respect to the inputs or outputs of the layer is defined
by the input flag `attribute_to_layer_input`.
More details about the algorithm can be found here:
https://papers.nips.cc/paper/7062-a-unified-approach-to-interpreting-model-predictions.pdf
Note that the explanation model:
1. Assumes that input features are independent of one another
2. Is linear, meaning that the explanations are modeled through
the additive composition of feature effects.
Although, it assumes a linear model for each explanation, the overall
model across multiple explanations can be complex and non-linear.
"""
def __init__(
self, model: Module, layer: Module, multiply_by_inputs: bool = True
) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
layer (torch.nn.Module): Layer for which neuron attributions are computed.
Attributions for a particular neuron for the input or output
of this layer are computed using the argument neuron_selector
in the attribute method.
Currently, only layers with a single tensor input and output
are supported.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in
then that type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of Neuron DeepLift Shap, if `multiply_by_inputs`
is set to True, final sensitivity scores
are being multiplied by (inputs - baselines).
This flag applies only if `custom_attribution_func` is
set to None.
"""
NeuronAttribution.__init__(self, model, layer)
GradientAttribution.__init__(self, model)
self._multiply_by_inputs = multiply_by_inputs
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
baselines: Union[
TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric]
],
additional_forward_args: Any = None,
attribute_to_neuron_input: bool = False,
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which layer
attributions are computed. If model takes a
single tensor as input, a single input tensor should be
provided. If model takes multiple tensors as input,
a tuple of the input tensors should be provided. It is
assumed that for all given input tensors, dimension 0
corresponds to the number of examples (aka batch size),
and if multiple input tensors are provided, the examples
must be aligned appropriately.
neuron_selector (int, Callable, tuple[int], or slice):
Selector for neuron
in given layer for which attribution is desired.
Neuron selector can be provided as:
- a single integer, if the layer output is 2D. This integer
selects the appropriate neuron column in the layer input
or output
- a tuple of integers or slice objects. Length of this
tuple must be one less than the number of dimensions
in the input / output of the given layer (since
dimension 0 corresponds to number of examples).
The elements of the tuple can be either integers or
slice objects (slice object allows indexing a
range of neurons rather individual ones).
If any of the tuple elements is a slice object, the
indexed output tensor is used for attribution. Note
that specifying a slice of a tensor would amount to
computing the attribution of the sum of the specified
neurons, and not the individual neurons independently.
- a callable, which should
take the target layer as input (single tensor or tuple
if multiple tensors are in layer) and return a neuron or
aggregate of the layer's neurons for attribution.
For example, this function could return the
sum of the neurons in the layer or sum of neurons with
activations in a particular range. It is expected that
this function returns either a tensor with one element
or a 1D tensor with length equal to batch_size (one scalar
per input example)
baselines (Tensor, tuple[Tensor, ...], or Callable):
Baselines define reference samples that are compared with
the inputs. In order to assign attribution scores DeepLift
computes the differences between the inputs/outputs and
corresponding references. Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
the first dimension equal to the number of examples
in the baselines' distribution. The remaining dimensions
must match with input tensor's dimension starting from
the second dimension.
- a tuple of tensors, if inputs is a tuple of tensors,
with the first dimension of any tensor inside the tuple
equal to the number of examples in the baseline's
distribution. The remaining dimensions must match
the dimensions of the corresponding input tensor
starting from the second dimension.
- callable function, optionally takes `inputs` as an
argument and either returns a single tensor
or a tuple of those.
It is recommended that the number of samples in the baselines'
tensors is larger than one.
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided
to model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
attribute_to_neuron_input (bool, optional): Indicates whether to
compute the attributions with respect to the neuron input
or output. If `attribute_to_neuron_input` is set to True
then the attributions will be computed with respect to
neuron's inputs, otherwise it will be computed with respect
to neuron's outputs.
Note that currently it is assumed that either the input
or the output of internal neuron, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
custom_attribution_func (Callable, optional): A custom function for
computing final attribution scores. This function can take
at least one and at most three arguments with the
following signature:
- custom_attribution_func(multipliers)
- custom_attribution_func(multipliers, inputs)
- custom_attribution_func(multipliers, inputs, baselines)
In case this function is not provided, we use the default
logic defined as: multipliers * (inputs - baselines)
It is assumed that all input arguments, `multipliers`,
`inputs` and `baselines` are provided in tuples of same
length. `custom_attribution_func` returns a tuple of
attribution tensors that have the same length as the
`inputs`.
Default: None
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Computes attributions using Deeplift's rescale rule for
particular neuron with respect to each input feature.
Attributions will always be the same size as the provided
inputs, with each value providing the attribution of the
corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> # creates an instance of LayerDeepLift to interpret target
>>> # class 1 with respect to conv4 layer.
>>> dl = NeuronDeepLiftShap(net, net.conv4)
>>> input = torch.randn(1, 3, 32, 32, requires_grad=True)
>>> # Computes deeplift attribution scores for conv4 layer and neuron
>>> # index (4,1,2).
>>> attribution = dl.attribute(input, (4,1,2))
"""
dl = DeepLiftShap(cast(Module, self.forward_func), self.multiplies_by_inputs)
dl.gradient_func = construct_neuron_grad_fn(
self.layer,
neuron_selector,
attribute_to_neuron_input=attribute_to_neuron_input,
)
# NOTE: using __wrapped__ to not log
return dl.attribute.__wrapped__( # type: ignore
dl, # self
inputs,
baselines,
additional_forward_args=additional_forward_args,
custom_attribution_func=custom_attribution_func,
)
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Tuple, Union
from captum._utils.gradient import construct_neuron_grad_fn
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._core.integrated_gradients import IntegratedGradients
from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
class NeuronIntegratedGradients(NeuronAttribution, GradientAttribution):
r"""
Approximates the integral of gradients for a particular neuron
along the path from a baseline input to the given input.
If no baseline is provided, the default baseline is the zero tensor.
More details regarding the integrated gradient method can be found in the
original paper here:
https://arxiv.org/abs/1703.01365
Note that this method is equivalent to applying integrated gradients
where the output is the output of the identified neuron.
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
multiply_by_inputs: bool = True,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
layer (torch.nn.Module): Layer for which attributions are computed.
Output size of attribute matches this layer's input or
output dimensions, depending on whether we attribute to
the inputs or outputs of the layer, corresponding to
attribution of each neuron in the input or output of
this layer.
Currently, it is assumed that the inputs or the outputs
of the layer, depending on which one is used for
attribution, can only be a single tensor.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in
then that type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of Neuron Integrated Gradients,
if `multiply_by_inputs` is set to True, final
sensitivity scores are being multiplied
by (inputs - baselines).
"""
NeuronAttribution.__init__(self, forward_func, layer, device_ids)
GradientAttribution.__init__(self, forward_func)
self._multiply_by_inputs = multiply_by_inputs
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
baselines: Union[None, Tensor, Tuple[Tensor, ...]] = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
internal_batch_size: Union[None, int] = None,
attribute_to_neuron_input: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which neuron integrated
gradients are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
neuron_selector (int, Callable, tuple[int], or slice):
Selector for neuron
in given layer for which attribution is desired.
Neuron selector can be provided as:
- a single integer, if the layer output is 2D. This integer
selects the appropriate neuron column in the layer input
or output
- a tuple of integers or slice objects. Length of this
tuple must be one less than the number of dimensions
in the input / output of the given layer (since
dimension 0 corresponds to number of examples).
The elements of the tuple can be either integers or
slice objects (slice object allows indexing a
range of neurons rather individual ones).
If any of the tuple elements is a slice object, the
indexed output tensor is used for attribution. Note
that specifying a slice of a tensor would amount to
computing the attribution of the sum of the specified
neurons, and not the individual neurons independently.
- a callable, which should
take the target layer as input (single tensor or tuple
if multiple tensors are in layer) and return a neuron or
aggregate of the layer's neurons for attribution.
For example, this function could return the
sum of the neurons in the layer or sum of neurons with
activations in a particular range. It is expected that
this function returns either a tensor with one element
or a 1D tensor with length equal to batch_size (one scalar
per input example)
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define the starting point from which integral
is computed.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. It will be
repeated for each of `n_steps` along the integrated
path. For all other types, the given argument is used
for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
n_steps (int, optional): The number of steps used by the approximation
method. Default: 50.
method (str, optional): Method for approximating the integral,
one of `riemann_right`, `riemann_left`, `riemann_middle`,
`riemann_trapezoid` or `gausslegendre`.
Default: `gausslegendre` if no method is provided.
internal_batch_size (int, optional): Divides total #steps * #examples
data points into chunks of size at most internal_batch_size,
which are computed (forward / backward passes)
sequentially. internal_batch_size must be at least equal to
#examples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain internal_batch_size / num_devices examples.
If internal_batch_size is None, then all evaluations are
processed in one batch.
Default: None
attribute_to_neuron_input (bool, optional): Indicates whether to
compute the attributions with respect to the neuron input
or output. If `attribute_to_neuron_input` is set to True
then the attributions will be computed with respect to
neuron's inputs, otherwise it will be computed with respect
to neuron's outputs.
Note that currently it is assumed that either the input
or the output of internal neuron, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Integrated gradients for particular neuron with
respect to each input feature.
Attributions will always be the same size as the provided
inputs, with each value providing the attribution of the
corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x32x32.
>>> net = ImageClassifier()
>>> neuron_ig = NeuronIntegratedGradients(net, net.conv1)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # To compute neuron attribution, we need to provide the neuron
>>> # index for which attribution is desired. Since the layer output
>>> # is Nx12x32x32, we need a tuple in the form (0..11,0..31,0..31)
>>> # which indexes a particular neuron in the layer output.
>>> # For this example, we choose the index (4,1,2).
>>> # Computes neuron integrated gradients for neuron with
>>> # index (4,1,2).
>>> attribution = neuron_ig.attribute(input, (4,1,2))
"""
ig = IntegratedGradients(self.forward_func, self.multiplies_by_inputs)
ig.gradient_func = construct_neuron_grad_fn(
self.layer, neuron_selector, self.device_ids, attribute_to_neuron_input
)
# NOTE: using __wrapped__ to not log
# Return only attributions and not delta
return ig.attribute.__wrapped__( # type: ignore
ig, # self
inputs,
baselines,
additional_forward_args=additional_forward_args,
n_steps=n_steps,
method=method,
internal_batch_size=internal_batch_size,
)
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
|
#!/usr/bin/env python3
import warnings
from typing import Any, Callable, List, Tuple, Union
import torch
from captum._utils.common import (
_expand_additional_forward_args,
_expand_target,
_format_additional_forward_args,
_format_output,
_is_tuple,
_verify_select_neuron,
)
from captum._utils.gradient import compute_layer_gradients_and_eval
from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._utils.approximation_methods import approximation_parameters
from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution
from captum.attr._utils.batching import _batch_attribution
from captum.attr._utils.common import (
_format_input_baseline,
_reshape_and_sum,
_validate_input,
)
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
class NeuronConductance(NeuronAttribution, GradientAttribution):
r"""
Computes conductance with respect to particular hidden neuron. The
returned output is in the shape of the input, showing the attribution
/ conductance of each input feature to the selected hidden layer neuron.
The details of the approach can be found here:
https://arxiv.org/abs/1805.12233
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
multiply_by_inputs: bool = True,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
layer (torch.nn.Module): Layer for which neuron attributions are computed.
Attributions for a particular neuron in the input or output
of this layer are computed using the argument neuron_selector
in the attribute method.
Currently, only layers with a single tensor input or output
are supported.
layer (torch.nn.Module): Layer for which attributions are computed.
Output size of attribute matches this layer's input or
output dimensions, depending on whether we attribute to
the inputs or outputs of the layer, corresponding to
attribution of each neuron in the input or output of
this layer.
Currently, it is assumed that the inputs or the outputs
of the layer, depending on which one is used for
attribution, can only be a single tensor.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in
then that type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of Neuron Conductance,
if `multiply_by_inputs` is set to True, final
sensitivity scores are being multiplied
by (inputs - baselines).
"""
NeuronAttribution.__init__(self, forward_func, layer, device_ids)
GradientAttribution.__init__(self, forward_func)
self._multiply_by_inputs = multiply_by_inputs
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
neuron_selector: Union[int, Tuple[int, ...], Callable],
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "riemann_trapezoid",
internal_batch_size: Union[None, int] = None,
attribute_to_neuron_input: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which neuron
conductance is computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
neuron_selector (int, Callable, tuple[int], or slice):
Selector for neuron
in given layer for which attribution is desired.
Neuron selector can be provided as:
- a single integer, if the layer output is 2D. This integer
selects the appropriate neuron column in the layer input
or output
- a tuple of integers. Length of this
tuple must be one less than the number of dimensions
in the input / output of the given layer (since
dimension 0 corresponds to number of examples).
This can be used as long as the layer input / output
is a single tensor.
- a callable, which should
take the target layer as input (single tensor or tuple
if multiple tensors are in layer) and return a selected
neuron - output shape should be 1D with length equal to
batch_size (one scalar per input example)
NOTE: Callables applicable for neuron conductance are
less general than those of other methods and should
NOT aggregate values of the layer, only return a specific
output. This option should only be used in cases where the
layer input / output is a tuple of tensors, where the other
options would not suffice. This limitation is necessary since
neuron conductance, unlike other neuron methods, also utilizes
the gradient of output with respect to the intermedite neuron,
which cannot be computed for aggregations of multiple
intemediate neurons.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define the starting point from which integral
is computed and can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. It will be
repeated for each of `n_steps` along the integrated
path. For all other types, the given argument is used
for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
n_steps (int, optional): The number of steps used by the approximation
method. Default: 50.
method (str, optional): Method for approximating the integral,
one of `riemann_right`, `riemann_left`, `riemann_middle`,
`riemann_trapezoid` or `gausslegendre`.
Default: `gausslegendre` if no method is provided.
internal_batch_size (int, optional): Divides total #steps * #examples
data points into chunks of size at most internal_batch_size,
which are computed (forward / backward passes)
sequentially. internal_batch_size must be at least equal to
#examples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain internal_batch_size / num_devices examples.
If internal_batch_size is None, then all evaluations are
processed in one batch.
Default: None
attribute_to_neuron_input (bool, optional): Indicates whether to
compute the attributions with respect to the neuron input
or output. If `attribute_to_neuron_input` is set to True
then the attributions will be computed with respect to
neuron's inputs, otherwise it will be computed with respect
to neuron's outputs.
Note that currently it is assumed that either the input
or the output of internal neuron, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Conductance for
particular neuron with respect to each input feature.
Attributions will always be the same size as the provided
inputs, with each value providing the attribution of the
corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x32x32.
>>> net = ImageClassifier()
>>> neuron_cond = NeuronConductance(net, net.conv1)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # To compute neuron attribution, we need to provide the neuron
>>> # index for which attribution is desired. Since the layer output
>>> # is Nx12x32x32, we need a tuple in the form (0..11,0..31,0..31)
>>> # which indexes a particular neuron in the layer output.
>>> # Computes neuron conductance for neuron with
>>> # index (4,1,2).
>>> attribution = neuron_cond.attribute(input, (4,1,2))
"""
if callable(neuron_selector):
warnings.warn(
"The neuron_selector provided is a callable. Please ensure that this"
" function only selects neurons from the given layer; aggregating"
" or performing other operations on the tensor may lead to inaccurate"
" results."
)
is_inputs_tuple = _is_tuple(inputs)
inputs, baselines = _format_input_baseline(inputs, baselines)
_validate_input(inputs, baselines, n_steps, method)
num_examples = inputs[0].shape[0]
if internal_batch_size is not None:
num_examples = inputs[0].shape[0]
attrs = _batch_attribution(
self,
num_examples,
internal_batch_size,
n_steps,
inputs=inputs,
baselines=baselines,
neuron_selector=neuron_selector,
target=target,
additional_forward_args=additional_forward_args,
method=method,
attribute_to_neuron_input=attribute_to_neuron_input,
)
else:
attrs = self._attribute(
inputs=inputs,
neuron_selector=neuron_selector,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
n_steps=n_steps,
method=method,
attribute_to_neuron_input=attribute_to_neuron_input,
)
return _format_output(is_inputs_tuple, attrs)
def _attribute(
self,
inputs: Tuple[Tensor, ...],
neuron_selector: Union[int, Tuple[int, ...], Callable],
baselines: Tuple[Union[Tensor, int, float], ...],
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "riemann_trapezoid",
attribute_to_neuron_input: bool = False,
step_sizes_and_alphas: Union[None, Tuple[List[float], List[float]]] = None,
) -> Tuple[Tensor, ...]:
num_examples = inputs[0].shape[0]
total_batch = num_examples * n_steps
if step_sizes_and_alphas is None:
# retrieve step size and scaling factor for specified approximation method
step_sizes_func, alphas_func = approximation_parameters(method)
step_sizes, alphas = step_sizes_func(n_steps), alphas_func(n_steps)
else:
step_sizes, alphas = step_sizes_and_alphas
# Compute scaled inputs from baseline to final input.
scaled_features_tpl = tuple(
torch.cat(
[baseline + alpha * (input - baseline) for alpha in alphas], dim=0
).requires_grad_()
for input, baseline in zip(inputs, baselines)
)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
# apply number of steps to additional forward args
# currently, number of steps is applied only to additional forward arguments
# that are nd-tensors. It is assumed that the first dimension is
# the number of batches.
# dim -> (#examples * #steps x additional_forward_args[0].shape[1:], ...)
input_additional_args = (
_expand_additional_forward_args(additional_forward_args, n_steps)
if additional_forward_args is not None
else None
)
expanded_target = _expand_target(target, n_steps)
# Conductance Gradients - Returns gradient of output with respect to
# hidden layer and hidden layer evaluated at each input.
layer_gradients, layer_eval, input_grads = compute_layer_gradients_and_eval(
forward_fn=self.forward_func,
layer=self.layer,
inputs=scaled_features_tpl,
target_ind=expanded_target,
additional_forward_args=input_additional_args,
gradient_neuron_selector=neuron_selector,
device_ids=self.device_ids,
attribute_to_layer_input=attribute_to_neuron_input,
)
mid_grads = _verify_select_neuron(layer_gradients, neuron_selector)
scaled_input_gradients = tuple(
input_grad
* mid_grads.reshape((total_batch,) + (1,) * (len(input_grad.shape) - 1))
for input_grad in input_grads
)
# Mutliplies by appropriate step size.
scaled_grads = tuple(
scaled_input_gradient.contiguous().view(n_steps, -1)
* torch.tensor(step_sizes).view(n_steps, 1).to(scaled_input_gradient.device)
for scaled_input_gradient in scaled_input_gradients
)
# Aggregates across all steps for each tensor in the input tuple
total_grads = tuple(
_reshape_and_sum(scaled_grad, n_steps, num_examples, input_grad.shape[1:])
for (scaled_grad, input_grad) in zip(scaled_grads, input_grads)
)
if self.multiplies_by_inputs:
# computes attribution for each tensor in input tuple
# attributions has the same dimensionality as inputs
attributions = tuple(
total_grad * (input - baseline)
for total_grad, input, baseline in zip(total_grads, inputs, baselines)
)
else:
attributions = total_grads
return attributions
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Tuple, Union
import torch
from captum._utils.common import (
_expand_additional_forward_args,
_expand_target,
_format_additional_forward_args,
_format_output,
)
from captum._utils.gradient import compute_layer_gradients_and_eval
from captum._utils.typing import BaselineType, TargetType
from captum.attr._utils.approximation_methods import approximation_parameters
from captum.attr._utils.attribution import GradientAttribution, LayerAttribution
from captum.attr._utils.batching import _batch_attribution
from captum.attr._utils.common import (
_format_input_baseline,
_reshape_and_sum,
_validate_input,
)
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
class InternalInfluence(LayerAttribution, GradientAttribution):
r"""
Computes internal influence by approximating the integral of gradients
for a particular layer along the path from a baseline input to the
given input.
If no baseline is provided, the default baseline is the zero tensor.
More details on this approach can be found here:
https://arxiv.org/abs/1802.03788
Note that this method is similar to applying integrated gradients and
taking the layer as input, integrating the gradient of the layer with
respect to the output.
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
layer (torch.nn.Module): Layer for which attributions are computed.
Output size of attribute matches this layer's input or
output dimensions, depending on whether we attribute to
the inputs or outputs of the layer, corresponding to
attribution of each neuron in the input or output of
this layer.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
LayerAttribution.__init__(self, forward_func, layer, device_ids)
GradientAttribution.__init__(self, forward_func)
@log_usage()
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
internal_batch_size: Union[None, int] = None,
attribute_to_layer_input: bool = False,
) -> Union[Tensor, Tuple[Tensor, ...]]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which internal
influence is computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define a starting point from which integral
is computed and can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. It will be
repeated for each of `n_steps` along the integrated
path. For all other types, the given argument is used
for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
n_steps (int, optional): The number of steps used by the approximation
method. Default: 50.
method (str, optional): Method for approximating the integral,
one of `riemann_right`, `riemann_left`, `riemann_middle`,
`riemann_trapezoid` or `gausslegendre`.
Default: `gausslegendre` if no method is provided.
internal_batch_size (int, optional): Divides total #steps * #examples
data points into chunks of size at most internal_batch_size,
which are computed (forward / backward passes)
sequentially. internal_batch_size must be at least equal to
#examples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain internal_batch_size / num_devices examples.
If internal_batch_size is None, then all evaluations
are processed in one batch.
Default: None
attribute_to_layer_input (bool, optional): Indicates whether to
compute the attribution with respect to the layer input
or output. If `attribute_to_layer_input` is set to True
then the attributions will be computed with respect to
layer inputs, otherwise it will be computed with respect
to layer outputs.
Note that currently it is assumed that either the input
or the output of internal layer, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Internal influence of each neuron in given
layer output. Attributions will always be the same size
as the output or input of the given layer depending on
whether `attribute_to_layer_input` is set to `False` or
`True` respectively.
Attributions are returned in a tuple if
the layer inputs / outputs contain multiple tensors,
otherwise a single tensor is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x32x32.
>>> net = ImageClassifier()
>>> layer_int_inf = InternalInfluence(net, net.conv1)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes layer internal influence.
>>> # attribution size matches layer output, Nx12x32x32
>>> attribution = layer_int_inf.attribute(input)
"""
inputs, baselines = _format_input_baseline(inputs, baselines)
_validate_input(inputs, baselines, n_steps, method)
if internal_batch_size is not None:
num_examples = inputs[0].shape[0]
attrs = _batch_attribution(
self,
num_examples,
internal_batch_size,
n_steps,
inputs=inputs,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
method=method,
attribute_to_layer_input=attribute_to_layer_input,
)
else:
attrs = self._attribute(
inputs=inputs,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
n_steps=n_steps,
method=method,
attribute_to_layer_input=attribute_to_layer_input,
)
return attrs
def _attribute(
self,
inputs: Tuple[Tensor, ...],
baselines: Tuple[Union[Tensor, int, float], ...],
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
attribute_to_layer_input: bool = False,
step_sizes_and_alphas: Union[None, Tuple[List[float], List[float]]] = None,
) -> Union[Tensor, Tuple[Tensor, ...]]:
if step_sizes_and_alphas is None:
# retrieve step size and scaling factor for specified approximation method
step_sizes_func, alphas_func = approximation_parameters(method)
step_sizes, alphas = step_sizes_func(n_steps), alphas_func(n_steps)
else:
step_sizes, alphas = step_sizes_and_alphas
# Compute scaled inputs from baseline to final input.
scaled_features_tpl = tuple(
torch.cat(
[baseline + alpha * (input - baseline) for alpha in alphas], dim=0
).requires_grad_()
for input, baseline in zip(inputs, baselines)
)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
# apply number of steps to additional forward args
# currently, number of steps is applied only to additional forward arguments
# that are nd-tensors. It is assumed that the first dimension is
# the number of batches.
# dim -> (bsz * #steps x additional_forward_args[0].shape[1:], ...)
input_additional_args = (
_expand_additional_forward_args(additional_forward_args, n_steps)
if additional_forward_args is not None
else None
)
expanded_target = _expand_target(target, n_steps)
# Returns gradient of output with respect to hidden layer.
layer_gradients, _ = compute_layer_gradients_and_eval(
forward_fn=self.forward_func,
layer=self.layer,
inputs=scaled_features_tpl,
target_ind=expanded_target,
additional_forward_args=input_additional_args,
device_ids=self.device_ids,
attribute_to_layer_input=attribute_to_layer_input,
)
# flattening grads so that we can multiply it with step-size
# calling contiguous to avoid `memory whole` problems
scaled_grads = tuple(
layer_grad.contiguous().view(n_steps, -1)
* torch.tensor(step_sizes).view(n_steps, 1).to(layer_grad.device)
for layer_grad in layer_gradients
)
# aggregates across all steps for each tensor in the input tuple
attrs = tuple(
_reshape_and_sum(
scaled_grad, n_steps, inputs[0].shape[0], layer_grad.shape[1:]
)
for scaled_grad, layer_grad in zip(scaled_grads, layer_gradients)
)
return _format_output(len(attrs) > 1, attrs)
|
#!/usr/bin/env python3
import typing
from typing import Any, cast, List, Tuple, Union
from captum._utils.common import (
_format_tensor_into_tuples,
_reduce_list,
_sort_key_list,
)
from captum._utils.gradient import (
apply_gradient_requirements,
compute_gradients,
undo_gradient_requirements,
)
from captum._utils.typing import (
Literal,
ModuleOrModuleList,
TargetType,
TensorOrTupleOfTensorsGeneric,
)
from captum.attr._core.lrp import LRP
from captum.attr._utils.attribution import LayerAttribution
from torch import Tensor
from torch.nn import Module
class LayerLRP(LRP, LayerAttribution):
r"""
Layer-wise relevance propagation is based on a backward propagation
mechanism applied sequentially to all layers of the model. Here, the
model output score represents the initial relevance which is decomposed
into values for each neuron of the underlying layers. The decomposition
is defined by rules that are chosen for each layer, involving its weights
and activations. Details on the model can be found in the original paper
[https://doi.org/10.1371/journal.pone.0130140]. The implementation is
inspired by the tutorial of the same group
[https://doi.org/10.1016/j.dsp.2017.10.011] and the publication by
Ancona et al. [https://openreview.net/forum?id=Sy21R9JAW].
"""
def __init__(self, model: Module, layer: ModuleOrModuleList) -> None:
"""
Args:
model (Module): The forward function of the model or
any modification of it. Custom rules for a given layer need to
be defined as attribute
`module.rule` and need to be of type PropagationRule.
layer (torch.nn.Module or list(torch.nn.Module)): Layer or layers
for which attributions are computed.
The size and dimensionality of the attributions
corresponds to the size and dimensionality of the layer's
input or output depending on whether we attribute to the
inputs or outputs of the layer. If value is None, the
relevance for all layers is returned in attribution.
"""
LayerAttribution.__init__(self, model, layer)
LRP.__init__(self, model)
if hasattr(self.model, "device_ids"):
self.device_ids = cast(List[int], self.model.device_ids)
@typing.overload # type: ignore
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: Literal[False] = False,
attribute_to_layer_input: bool = False,
verbose: bool = False,
) -> Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]]:
...
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
*,
return_convergence_delta: Literal[True],
attribute_to_layer_input: bool = False,
verbose: bool = False,
) -> Tuple[
Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]],
Union[Tensor, List[Tensor]],
]:
...
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: bool = False,
attribute_to_layer_input: bool = False,
verbose: bool = False,
) -> Union[
Tensor,
Tuple[Tensor, ...],
List[Union[Tensor, Tuple[Tensor, ...]]],
Tuple[
Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]],
Union[Tensor, List[Tensor]],
],
]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which relevance is
propagated.
If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (tuple, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
attribute_to_layer_input (bool, optional): Indicates whether to
compute the attribution with respect to the layer input
or output. If `attribute_to_layer_input` is set to True
then the attributions will be computed with respect to
layer input, otherwise it will be computed with respect
to layer output.
verbose (bool, optional): Indicates whether information on application
of rules is printed during propagation.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions** or 2-element tuple of
**attributions**, **delta** or list of **attributions** and **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The propagated relevance values with respect to each
input feature. Attributions will always
be the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned. The sum of attributions
is one and not corresponding to the prediction score as in other
implementations. If attributions for all layers are returned
(layer=None) a list of tensors or tuples of tensors is returned
with entries for each layer.
- **delta** (*Tensor* or list of *Tensor*
returned if return_convergence_delta=True):
Delta is calculated per example, meaning that the number of
elements in returned delta tensor is equal to the number of
examples in input.
If attributions for all layers are returned (layer=None) a list
of tensors is returned with entries for
each layer.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities. It has one
>>> # Conv2D and a ReLU layer.
>>> net = ImageClassifier()
>>> layer_lrp = LayerLRP(net, net.conv1)
>>> input = torch.randn(3, 3, 32, 32)
>>> # Attribution size matches input size: 3x3x32x32
>>> attribution = layer_lrp.attribute(input, target=5)
"""
self.verbose = verbose
self._original_state_dict = self.model.state_dict()
self.layers = []
self._get_layers(self.model)
self._check_and_attach_rules()
self.attribute_to_layer_input = attribute_to_layer_input
self.backward_handles = []
self.forward_handles = []
inputs = _format_tensor_into_tuples(inputs)
gradient_mask = apply_gradient_requirements(inputs)
try:
# 1. Forward pass
output = self._compute_output_and_change_weights(
inputs, target, additional_forward_args
)
self._register_forward_hooks()
# 2. Forward pass + backward pass
_ = compute_gradients(
self._forward_fn_wrapper, inputs, target, additional_forward_args
)
relevances = self._get_output_relevance(output)
finally:
self._restore_model()
undo_gradient_requirements(inputs, gradient_mask)
if return_convergence_delta:
delta: Union[Tensor, List[Tensor]]
if isinstance(self.layer, list):
delta = []
for relevance_layer in relevances:
delta.append(
self.compute_convergence_delta(relevance_layer, output)
)
else:
delta = self.compute_convergence_delta(
cast(Tuple[Tensor, ...], relevances), output
)
return relevances, delta # type: ignore
else:
return relevances # type: ignore
def _get_single_output_relevance(self, layer, output):
if self.attribute_to_layer_input:
normalized_relevances = layer.rule.relevance_input
else:
normalized_relevances = layer.rule.relevance_output
key_list = _sort_key_list(list(normalized_relevances.keys()), self.device_ids)
normalized_relevances = _reduce_list(
[normalized_relevances[device_id] for device_id in key_list]
)
if isinstance(normalized_relevances, tuple):
return tuple(
normalized_relevance
* output.reshape((-1,) + (1,) * (normalized_relevance.dim() - 1))
for normalized_relevance in normalized_relevances
)
else:
return normalized_relevances * output.reshape(
(-1,) + (1,) * (normalized_relevances.dim() - 1)
)
def _get_output_relevance(self, output):
if isinstance(self.layer, list):
relevances = []
for layer in self.layer:
relevances.append(self._get_single_output_relevance(layer, output))
return relevances
else:
return self._get_single_output_relevance(self.layer, output)
@staticmethod
def _convert_list_to_tuple(
relevances: Union[List[Any], Tuple[Any, ...]]
) -> Tuple[Any, ...]:
if isinstance(relevances, list):
return tuple(relevances)
else:
return relevances
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Tuple, Union
import torch
from captum._utils.common import _format_output
from captum._utils.gradient import _forward_layer_eval
from captum._utils.typing import ModuleOrModuleList
from captum.attr._utils.attribution import LayerAttribution
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
class LayerActivation(LayerAttribution):
r"""
Computes activation of selected layer for given input.
"""
def __init__(
self,
forward_func: Callable,
layer: ModuleOrModuleList,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
layer (torch.nn.Module or list of torch.nn.Module): Layer or layers
for which attributions are computed.
Output size of attribute matches this layer's input or
output dimensions, depending on whether we attribute to
the inputs or outputs of the layer, corresponding to
attribution of each neuron in the input or output of
this layer. If multiple layers are provided, attributions
are returned as a list, each element corresponding to the
activations of the corresponding layer.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
LayerAttribution.__init__(self, forward_func, layer, device_ids)
@log_usage()
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
additional_forward_args: Any = None,
attribute_to_layer_input: bool = False,
) -> Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which layer
activation is computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
attribute_to_layer_input (bool, optional): Indicates whether to
compute the attribution with respect to the layer input
or output. If `attribute_to_layer_input` is set to True
then the attributions will be computed with respect to
layer input, otherwise it will be computed with respect
to layer output.
Note that currently it is assumed that either the input
or the output of internal layer, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* or list of **attributions**:
- **attributions** (*Tensor*, *tuple[Tensor, ...]*, or *list*):
Activation of each neuron in given layer output.
Attributions will always be the same size as the
output of the given layer.
Attributions are returned in a tuple if
the layer inputs / outputs contain multiple tensors,
otherwise a single tensor is returned.
If multiple layers are provided, attributions
are returned as a list, each element corresponding to the
activations of the corresponding layer.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x32x32.
>>> net = ImageClassifier()
>>> layer_act = LayerActivation(net, net.conv1)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes layer activation.
>>> # attribution is layer output, with size Nx12x32x32
>>> attribution = layer_cond.attribute(input)
"""
with torch.no_grad():
layer_eval = _forward_layer_eval(
self.forward_func,
inputs,
self.layer,
additional_forward_args,
device_ids=self.device_ids,
attribute_to_layer_input=attribute_to_layer_input,
)
if isinstance(self.layer, Module):
return _format_output(len(layer_eval) > 1, layer_eval)
else:
return [
_format_output(len(single_layer_eval) > 1, single_layer_eval)
for single_layer_eval in layer_eval
]
@property
def multiplies_by_inputs(self):
return True
|
#!/usr/bin/env python3
import typing
from typing import Any, Callable, List, Tuple, Union
import torch
from captum._utils.common import (
_expand_additional_forward_args,
_expand_target,
_format_additional_forward_args,
_format_output,
)
from captum._utils.gradient import compute_layer_gradients_and_eval
from captum._utils.typing import BaselineType, Literal, TargetType
from captum.attr._utils.approximation_methods import approximation_parameters
from captum.attr._utils.attribution import GradientAttribution, LayerAttribution
from captum.attr._utils.batching import _batch_attribution
from captum.attr._utils.common import (
_format_input_baseline,
_reshape_and_sum,
_validate_input,
)
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
class LayerConductance(LayerAttribution, GradientAttribution):
r"""
Computes conductance with respect to the given layer. The
returned output is in the shape of the layer's output, showing the total
conductance of each hidden layer neuron.
The details of the approach can be found here:
https://arxiv.org/abs/1805.12233
https://arxiv.org/abs/1807.09946
Note that this provides the total conductance of each neuron in the
layer's output. To obtain the breakdown of a neuron's conductance by input
features, utilize NeuronConductance instead, and provide the target
neuron index.
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
layer (torch.nn.Module): Layer for which attributions are computed.
Output size of attribute matches this layer's input or
output dimensions, depending on whether we attribute to
the inputs or outputs of the layer, corresponding to
attribution of each neuron in the input or output of
this layer.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
LayerAttribution.__init__(self, forward_func, layer, device_ids)
GradientAttribution.__init__(self, forward_func)
def has_convergence_delta(self) -> bool:
return True
@typing.overload
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
internal_batch_size: Union[None, int] = None,
*,
return_convergence_delta: Literal[True],
attribute_to_layer_input: bool = False,
) -> Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]:
...
@typing.overload
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
internal_batch_size: Union[None, int] = None,
return_convergence_delta: Literal[False] = False,
attribute_to_layer_input: bool = False,
) -> Union[Tensor, Tuple[Tensor, ...]]:
...
@log_usage()
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: Union[
None, int, float, Tensor, Tuple[Union[int, float, Tensor], ...]
] = None,
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
internal_batch_size: Union[None, int] = None,
return_convergence_delta: bool = False,
attribute_to_layer_input: bool = False,
) -> Union[
Tensor, Tuple[Tensor, ...], Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]
]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which layer
conductance is computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define the starting point from which integral
is computed and can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. It will be repeated
for each of `n_steps` along the integrated path.
For all other types, the given argument is used for
all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
n_steps (int, optional): The number of steps used by the approximation
method. Default: 50.
method (str, optional): Method for approximating the integral,
one of `riemann_right`, `riemann_left`, `riemann_middle`,
`riemann_trapezoid` or `gausslegendre`.
Default: `gausslegendre` if no method is provided.
internal_batch_size (int, optional): Divides total #steps * #examples
data points into chunks of size at most internal_batch_size,
which are computed (forward / backward passes)
sequentially. internal_batch_size must be at least equal to
2 * #examples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain internal_batch_size / num_devices examples.
If internal_batch_size is None, then all evaluations are
processed in one batch.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
attribute_to_layer_input (bool, optional): Indicates whether to
compute the attribution with respect to the layer input
or output. If `attribute_to_layer_input` is set to True
then the attributions will be computed with respect to
layer inputs, otherwise it will be computed with respect
to layer outputs.
Note that currently it is assumed that either the input
or the output of internal layer, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Conductance of each neuron in given layer input or
output. Attributions will always be the same size as
the input or output of the given layer, depending on
whether we attribute to the inputs or outputs
of the layer which is decided by the input flag
`attribute_to_layer_input`.
Attributions are returned in a tuple if
the layer inputs / outputs contain multiple tensors,
otherwise a single tensor is returned.
- **delta** (*Tensor*, returned if return_convergence_delta=True):
The difference between the total
approximated and true conductance.
This is computed using the property that the total sum of
forward_func(inputs) - forward_func(baselines) must equal
the total sum of the attributions.
Delta is calculated per example, meaning that the number of
elements in returned delta tensor is equal to the number of
examples in inputs.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x32x32.
>>> net = ImageClassifier()
>>> layer_cond = LayerConductance(net, net.conv1)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes layer conductance for class 3.
>>> # attribution size matches layer output, Nx12x32x32
>>> attribution = layer_cond.attribute(input, target=3)
"""
inputs, baselines = _format_input_baseline(inputs, baselines)
_validate_input(inputs, baselines, n_steps, method)
num_examples = inputs[0].shape[0]
if internal_batch_size is not None:
num_examples = inputs[0].shape[0]
attrs = _batch_attribution(
self,
num_examples,
internal_batch_size,
n_steps + 1,
include_endpoint=True,
inputs=inputs,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
method=method,
attribute_to_layer_input=attribute_to_layer_input,
)
else:
attrs = self._attribute(
inputs=inputs,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
n_steps=n_steps,
method=method,
attribute_to_layer_input=attribute_to_layer_input,
)
is_layer_tuple = isinstance(attrs, tuple)
attributions = attrs if is_layer_tuple else (attrs,)
if return_convergence_delta:
start_point, end_point = baselines, inputs
delta = self.compute_convergence_delta(
attributions,
start_point,
end_point,
target=target,
additional_forward_args=additional_forward_args,
)
return _format_output(is_layer_tuple, attributions), delta
return _format_output(is_layer_tuple, attributions)
def _attribute(
self,
inputs: Tuple[Tensor, ...],
baselines: Tuple[Union[Tensor, int, float], ...],
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
attribute_to_layer_input: bool = False,
step_sizes_and_alphas: Union[None, Tuple[List[float], List[float]]] = None,
) -> Union[Tensor, Tuple[Tensor, ...]]:
num_examples = inputs[0].shape[0]
if step_sizes_and_alphas is None:
# Retrieve scaling factors for specified approximation method
step_sizes_func, alphas_func = approximation_parameters(method)
alphas = alphas_func(n_steps + 1)
else:
_, alphas = step_sizes_and_alphas
# Compute scaled inputs from baseline to final input.
scaled_features_tpl = tuple(
torch.cat(
[baseline + alpha * (input - baseline) for alpha in alphas], dim=0
).requires_grad_()
for input, baseline in zip(inputs, baselines)
)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
# apply number of steps to additional forward args
# currently, number of steps is applied only to additional forward arguments
# that are nd-tensors. It is assumed that the first dimension is
# the number of batches.
# dim -> (#examples * #steps x additional_forward_args[0].shape[1:], ...)
input_additional_args = (
_expand_additional_forward_args(additional_forward_args, n_steps + 1)
if additional_forward_args is not None
else None
)
expanded_target = _expand_target(target, n_steps + 1)
# Conductance Gradients - Returns gradient of output with respect to
# hidden layer and hidden layer evaluated at each input.
(layer_gradients, layer_evals,) = compute_layer_gradients_and_eval(
forward_fn=self.forward_func,
layer=self.layer,
inputs=scaled_features_tpl,
additional_forward_args=input_additional_args,
target_ind=expanded_target,
device_ids=self.device_ids,
attribute_to_layer_input=attribute_to_layer_input,
)
# Compute differences between consecutive evaluations of layer_eval.
# This approximates the total input gradient of each step multiplied
# by the step size.
grad_diffs = tuple(
layer_eval[num_examples:] - layer_eval[:-num_examples]
for layer_eval in layer_evals
)
# Element-wise multiply gradient of output with respect to hidden layer
# and summed gradients with respect to input (chain rule) and sum
# across stepped inputs.
attributions = tuple(
_reshape_and_sum(
grad_diff * layer_gradient[:-num_examples],
n_steps,
num_examples,
layer_eval.shape[1:],
)
for layer_gradient, layer_eval, grad_diff in zip(
layer_gradients, layer_evals, grad_diffs
)
)
return _format_output(len(attributions) > 1, attributions)
@property
def multiplies_by_inputs(self):
return True
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Tuple, Union
import torch
import torch.nn.functional as F
from captum._utils.common import (
_format_additional_forward_args,
_format_output,
_format_tensor_into_tuples,
)
from captum._utils.gradient import compute_layer_gradients_and_eval
from captum._utils.typing import TargetType
from captum.attr._utils.attribution import GradientAttribution, LayerAttribution
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
class LayerGradCam(LayerAttribution, GradientAttribution):
r"""
Computes GradCAM attribution for chosen layer. GradCAM is designed for
convolutional neural networks, and is usually applied to the last
convolutional layer.
GradCAM computes the gradients of the target output with respect to
the given layer, averages for each output channel (dimension 2 of
output), and multiplies the average gradient for each channel by the
layer activations. The results are summed over all channels.
Note that in the original GradCAM algorithm described in the paper,
ReLU is applied to the output, returning only non-negative attributions.
For providing more flexibility to the user, we choose to not perform the
ReLU internally by default and return the sign information. To match the
original GradCAM algorithm, it is necessary to pass the parameter
relu_attributions=True to apply ReLU on the final
attributions or alternatively only visualize the positive attributions.
Note: this procedure sums over the second dimension (# of channels),
so the output of GradCAM attributions will have a second
dimension of 1, but all other dimensions will match that of the layer
output.
GradCAM attributions are generally upsampled and can be viewed as a
mask to the input, since a convolutional layer output generally
matches the input image spatially. This upsampling can be performed
using LayerAttribution.interpolate, as shown in the example below.
More details regarding the GradCAM method can be found in the
original paper here:
https://arxiv.org/abs/1610.02391
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
layer (torch.nn.Module): Layer for which attributions are computed.
Output size of attribute matches this layer's output
dimensions, except for dimension 2, which will be 1,
since GradCAM sums over channels.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
LayerAttribution.__init__(self, forward_func, layer, device_ids)
GradientAttribution.__init__(self, forward_func)
@log_usage()
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target: TargetType = None,
additional_forward_args: Any = None,
attribute_to_layer_input: bool = False,
relu_attributions: bool = False,
attr_dim_summation: bool = True,
) -> Union[Tensor, Tuple[Tensor, ...]]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which attributions
are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
attribute_to_layer_input (bool, optional): Indicates whether to
compute the attributions with respect to the layer input
or output. If `attribute_to_layer_input` is set to True
then the attributions will be computed with respect to the
layer input, otherwise it will be computed with respect
to layer output.
Note that currently it is assumed that either the input
or the outputs of internal layers, depending on whether we
attribute to the input or output, are single tensors.
Support for multiple tensors will be added later.
Default: False
relu_attributions (bool, optional): Indicates whether to
apply a ReLU operation on the final attribution,
returning only non-negative attributions. Setting this
flag to True matches the original GradCAM algorithm,
otherwise, by default, both positive and negative
attributions are returned.
Default: False
attr_dim_summation (bool, optional): Indicates whether to
sum attributions along dimension 1 (usually channel).
The default (True) means to sum along dimension 1.
Default: True
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attributions based on GradCAM method.
Attributions will be the same size as the
output of the given layer, except for dimension 2,
which will be 1 due to summing over channels.
Attributions are returned in a tuple if
the layer inputs / outputs contain multiple tensors,
otherwise a single tensor is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains a layer conv4, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx50x8x8.
>>> # It is the last convolution layer, which is the recommended
>>> # use case for GradCAM.
>>> net = ImageClassifier()
>>> layer_gc = LayerGradCam(net, net.conv4)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes layer GradCAM for class 3.
>>> # attribution size matches layer output except for dimension
>>> # 1, so dimensions of attr would be Nx1x8x8.
>>> attr = layer_gc.attribute(input, 3)
>>> # GradCAM attributions are often upsampled and viewed as a
>>> # mask to the input, since the convolutional layer output
>>> # spatially matches the original input image.
>>> # This can be done with LayerAttribution's interpolate method.
>>> upsampled_attr = LayerAttribution.interpolate(attr, (32, 32))
"""
inputs = _format_tensor_into_tuples(inputs)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
# Returns gradient of output with respect to
# hidden layer and hidden layer evaluated at each input.
layer_gradients, layer_evals = compute_layer_gradients_and_eval(
self.forward_func,
self.layer,
inputs,
target,
additional_forward_args,
device_ids=self.device_ids,
attribute_to_layer_input=attribute_to_layer_input,
)
summed_grads = tuple(
torch.mean(
layer_grad,
dim=tuple(x for x in range(2, len(layer_grad.shape))),
keepdim=True,
)
if len(layer_grad.shape) > 2
else layer_grad
for layer_grad in layer_gradients
)
if attr_dim_summation:
scaled_acts = tuple(
torch.sum(summed_grad * layer_eval, dim=1, keepdim=True)
for summed_grad, layer_eval in zip(summed_grads, layer_evals)
)
else:
scaled_acts = tuple(
summed_grad * layer_eval
for summed_grad, layer_eval in zip(summed_grads, layer_evals)
)
if relu_attributions:
scaled_acts = tuple(F.relu(scaled_act) for scaled_act in scaled_acts)
return _format_output(len(scaled_acts) > 1, scaled_acts)
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Tuple, Union
from captum._utils.common import (
_format_additional_forward_args,
_format_output,
_format_tensor_into_tuples,
)
from captum._utils.gradient import compute_layer_gradients_and_eval
from captum._utils.typing import ModuleOrModuleList, TargetType
from captum.attr._utils.attribution import GradientAttribution, LayerAttribution
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
class LayerGradientXActivation(LayerAttribution, GradientAttribution):
r"""
Computes element-wise product of gradient and activation for selected
layer on given inputs.
"""
def __init__(
self,
forward_func: Callable,
layer: ModuleOrModuleList,
device_ids: Union[None, List[int]] = None,
multiply_by_inputs: bool = True,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
layer (torch.nn.Module or list of torch.nn.Module): Layer or layers
for which attributions are computed.
Output size of attribute matches this layer's input or
output dimensions, depending on whether we attribute to
the inputs or outputs of the layer, corresponding to
attribution of each neuron in the input or output of
this layer. If multiple layers are provided, attributions
are returned as a list, each element corresponding to the
attributions of the corresponding layer.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in,
then this type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of layer gradient x activation, if `multiply_by_inputs`
is set to True, final sensitivity scores are being multiplied by
layer activations for inputs.
"""
LayerAttribution.__init__(self, forward_func, layer, device_ids)
GradientAttribution.__init__(self, forward_func)
self._multiply_by_inputs = multiply_by_inputs
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
@log_usage()
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target: TargetType = None,
additional_forward_args: Any = None,
attribute_to_layer_input: bool = False,
) -> Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which attributions
are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
attribute_to_layer_input (bool, optional): Indicates whether to
compute the attribution with respect to the layer input
or output. If `attribute_to_layer_input` is set to True
then the attributions will be computed with respect to
layer input, otherwise it will be computed with respect
to layer output.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* or list of **attributions**:
- **attributions** (*Tensor*, *tuple[Tensor, ...]*, or *list*):
Product of gradient and activation for each
neuron in given layer output.
Attributions will always be the same size as the
output of the given layer.
Attributions are returned in a tuple if
the layer inputs / outputs contain multiple tensors,
otherwise a single tensor is returned.
If multiple layers are provided, attributions
are returned as a list, each element corresponding to the
activations of the corresponding layer.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x32x32.
>>> net = ImageClassifier()
>>> layer_ga = LayerGradientXActivation(net, net.conv1)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes layer activation x gradient for class 3.
>>> # attribution size matches layer output, Nx12x32x32
>>> attribution = layer_ga.attribute(input, 3)
"""
inputs = _format_tensor_into_tuples(inputs)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
# Returns gradient of output with respect to
# hidden layer and hidden layer evaluated at each input.
layer_gradients, layer_evals = compute_layer_gradients_and_eval(
self.forward_func,
self.layer,
inputs,
target,
additional_forward_args,
device_ids=self.device_ids,
attribute_to_layer_input=attribute_to_layer_input,
)
if isinstance(self.layer, Module):
return _format_output(
len(layer_evals) > 1,
self.multiply_gradient_acts(layer_gradients, layer_evals),
)
else:
return [
_format_output(
len(layer_evals[i]) > 1,
self.multiply_gradient_acts(layer_gradients[i], layer_evals[i]),
)
for i in range(len(self.layer))
]
def multiply_gradient_acts(
self, gradients: Tuple[Tensor, ...], evals: Tuple[Tensor, ...]
) -> Tuple[Tensor, ...]:
return tuple(
single_gradient * single_eval
if self.multiplies_by_inputs
else single_gradient
for single_gradient, single_eval in zip(gradients, evals)
)
|
#!/usr/bin/env python3
import typing
from typing import Any, Callable, cast, List, Tuple, Union
import numpy as np
import torch
from captum._utils.gradient import _forward_layer_eval, compute_layer_gradients_and_eval
from captum._utils.typing import Literal, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.gradient_shap import _scale_input
from captum.attr._core.noise_tunnel import NoiseTunnel
from captum.attr._utils.attribution import GradientAttribution, LayerAttribution
from captum.attr._utils.common import (
_compute_conv_delta_and_format_attrs,
_format_callable_baseline,
_format_input_baseline,
)
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
class LayerGradientShap(LayerAttribution, GradientAttribution):
r"""
Implements gradient SHAP for layer based on the implementation from SHAP's
primary author. For reference, please, view:
https://github.com/slundberg/shap\
#deep-learning-example-with-gradientexplainer-tensorflowkeraspytorch-models
A Unified Approach to Interpreting Model Predictions
https://papers.nips.cc/paper\
7062-a-unified-approach-to-interpreting-model-predictions
GradientShap approximates SHAP values by computing the expectations of
gradients by randomly sampling from the distribution of baselines/references.
It adds white noise to each input sample `n_samples` times, selects a
random baseline from baselines' distribution and a random point along the
path between the baseline and the input, and computes the gradient of
outputs with respect to selected random points in chosen `layer`.
The final SHAP values represent the expected values of
`gradients * (layer_attr_inputs - layer_attr_baselines)`.
GradientShap makes an assumption that the input features are independent
and that the explanation model is linear, meaning that the explanations
are modeled through the additive composition of feature effects.
Under those assumptions, SHAP value can be approximated as the expectation
of gradients that are computed for randomly generated `n_samples` input
samples after adding gaussian noise `n_samples` times to each input for
different baselines/references.
In some sense it can be viewed as an approximation of integrated gradients
by computing the expectations of gradients for different baselines.
Current implementation uses Smoothgrad from :class:`.NoiseTunnel` in order to
randomly draw samples from the distribution of baselines, add noise to input
samples and compute the expectation (smoothgrad).
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
multiply_by_inputs: bool = True,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
layer (torch.nn.Module): Layer for which attributions are computed.
Output size of attribute matches this layer's input or
output dimensions, depending on whether we attribute to
the inputs or outputs of the layer, corresponding to
attribution of each neuron in the input or output of
this layer.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in,
then this type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of layer gradient shap, if `multiply_by_inputs`
is set to True, the sensitivity scores for scaled inputs
are being multiplied by
layer activations for inputs - layer activations for baselines.
"""
LayerAttribution.__init__(self, forward_func, layer, device_ids)
GradientAttribution.__init__(self, forward_func)
self._multiply_by_inputs = multiply_by_inputs
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: Union[TensorOrTupleOfTensorsGeneric, Callable],
n_samples: int = 5,
stdevs: Union[float, Tuple[float, ...]] = 0.0,
target: TargetType = None,
additional_forward_args: Any = None,
*,
return_convergence_delta: Literal[True],
attribute_to_layer_input: bool = False,
) -> Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]:
...
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: Union[TensorOrTupleOfTensorsGeneric, Callable],
n_samples: int = 5,
stdevs: Union[float, Tuple[float, ...]] = 0.0,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: Literal[False] = False,
attribute_to_layer_input: bool = False,
) -> Union[Tensor, Tuple[Tensor, ...]]:
...
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: Union[TensorOrTupleOfTensorsGeneric, Callable],
n_samples: int = 5,
stdevs: Union[float, Tuple[float, ...]] = 0.0,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: bool = False,
attribute_to_layer_input: bool = False,
) -> Union[
Tensor, Tuple[Tensor, ...], Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]
]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input which are used to compute
SHAP attribution values for a given `layer`. If `forward_func`
takes a single tensor as input, a single input tensor should
be provided.
If `forward_func` takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
baselines (Tensor, tuple[Tensor, ...], or Callable):
Baselines define the starting point from which expectation
is computed and can be provided as:
- a single tensor, if inputs is a single tensor, with
the first dimension equal to the number of examples
in the baselines' distribution. The remaining dimensions
must match with input tensor's dimension starting from
the second dimension.
- a tuple of tensors, if inputs is a tuple of tensors,
with the first dimension of any tensor inside the tuple
equal to the number of examples in the baseline's
distribution. The remaining dimensions must match
the dimensions of the corresponding input tensor
starting from the second dimension.
- callable function, optionally takes `inputs` as an
argument and either returns a single tensor
or a tuple of those.
It is recommended that the number of samples in the baselines'
tensors is larger than one.
n_samples (int, optional): The number of randomly generated examples
per sample in the input batch. Random examples are
generated by adding gaussian random noise to each sample.
Default: `5` if `n_samples` is not provided.
stdevs (float or tuple of float, optional): The standard deviation
of gaussian noise with zero mean that is added to each
input in the batch. If `stdevs` is a single float value
then that same value is used for all inputs. If it is
a tuple, then it must have the same length as the inputs
tuple. In this case, each stdev value in the stdevs tuple
corresponds to the input with the same index in the inputs
tuple.
Default: 0.0
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It can contain a tuple of ND tensors or
any arbitrary python type of any shape.
In case of the ND tensor the first dimension of the
tensor must correspond to the batch size. It will be
repeated for each `n_steps` for each randomly generated
input sample.
Note that the attributions are not computed with respect
to these arguments.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
attribute_to_layer_input (bool, optional): Indicates whether to
compute the attribution with respect to the layer input
or output. If `attribute_to_layer_input` is set to True
then the attributions will be computed with respect to
layer input, otherwise it will be computed with respect
to layer output.
Note that currently it is assumed that either the input
or the output of internal layer, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attribution score computed based on GradientSHAP with
respect to layer's input or output. Attributions will always
be the same size as the provided layer's inputs or outputs,
depending on whether we attribute to the inputs or outputs
of the layer.
Attributions are returned in a tuple if
the layer inputs / outputs contain multiple tensors,
otherwise a single tensor is returned.
- **delta** (*Tensor*, returned if return_convergence_delta=True):
This is computed using the property that the total
sum of forward_func(inputs) - forward_func(baselines)
must be very close to the total sum of the attributions
based on layer gradient SHAP.
Delta is calculated for each example in the input after adding
`n_samples` times gaussian noise to each of them. Therefore,
the dimensionality of the deltas tensor is equal to the
`number of examples in the input` * `n_samples`
The deltas are ordered by each input example and `n_samples`
noisy samples generated for it.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> layer_grad_shap = LayerGradientShap(net, net.linear1)
>>> input = torch.randn(3, 3, 32, 32, requires_grad=True)
>>> # choosing baselines randomly
>>> baselines = torch.randn(20, 3, 32, 32)
>>> # Computes gradient SHAP of output layer when target is equal
>>> # to 0 with respect to the layer linear1.
>>> # Attribution size matches to the size of the linear1 layer
>>> attribution = layer_grad_shap.attribute(input, baselines,
target=5)
"""
# since `baselines` is a distribution, we can generate it using a function
# rather than passing it as an input argument
baselines = _format_callable_baseline(baselines, inputs)
assert isinstance(baselines[0], torch.Tensor), (
"Baselines distribution has to be provided in a form "
"of a torch.Tensor {}.".format(baselines[0])
)
input_min_baseline_x_grad = LayerInputBaselineXGradient(
self.forward_func,
self.layer,
device_ids=self.device_ids,
multiply_by_inputs=self.multiplies_by_inputs,
)
nt = NoiseTunnel(input_min_baseline_x_grad)
attributions = nt.attribute.__wrapped__(
nt, # self
inputs,
nt_type="smoothgrad",
nt_samples=n_samples,
stdevs=stdevs,
draw_baseline_from_distrib=True,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
return_convergence_delta=return_convergence_delta,
attribute_to_layer_input=attribute_to_layer_input,
)
return attributions
def has_convergence_delta(self) -> bool:
return True
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
class LayerInputBaselineXGradient(LayerAttribution, GradientAttribution):
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
multiply_by_inputs: bool = True,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
layer (torch.nn.Module): Layer for which attributions are computed.
Output size of attribute matches this layer's input or
output dimensions, depending on whether we attribute to
the inputs or outputs of the layer, corresponding to
attribution of each neuron in the input or output of
this layer.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in,
then this type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of layer input minus baseline x gradient,
if `multiply_by_inputs` is set to True, the sensitivity scores
for scaled inputs are being multiplied by
layer activations for inputs - layer activations for baselines.
"""
LayerAttribution.__init__(self, forward_func, layer, device_ids)
GradientAttribution.__init__(self, forward_func)
self._multiply_by_inputs = multiply_by_inputs
@typing.overload
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: Union[Tensor, Tuple[Tensor, ...]],
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: Literal[False] = False,
attribute_to_layer_input: bool = False,
) -> Union[Tensor, Tuple[Tensor, ...]]:
...
@typing.overload
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: Union[Tensor, Tuple[Tensor, ...]],
target: TargetType = None,
additional_forward_args: Any = None,
*,
return_convergence_delta: Literal[True],
attribute_to_layer_input: bool = False,
) -> Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]:
...
@log_usage()
def attribute( # type: ignore
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: Union[Tensor, Tuple[Tensor, ...]],
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: bool = False,
attribute_to_layer_input: bool = False,
) -> Union[
Tensor, Tuple[Tensor, ...], Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]
]:
inputs, baselines = _format_input_baseline(inputs, baselines)
rand_coefficient = torch.tensor(
np.random.uniform(0.0, 1.0, inputs[0].shape[0]),
device=inputs[0].device,
dtype=inputs[0].dtype,
)
input_baseline_scaled = tuple(
_scale_input(input, baseline, rand_coefficient)
for input, baseline in zip(inputs, baselines)
)
grads, _ = compute_layer_gradients_and_eval(
self.forward_func,
self.layer,
input_baseline_scaled,
target,
additional_forward_args,
device_ids=self.device_ids,
attribute_to_layer_input=attribute_to_layer_input,
)
attr_baselines = _forward_layer_eval(
self.forward_func,
baselines,
self.layer,
additional_forward_args=additional_forward_args,
device_ids=self.device_ids,
attribute_to_layer_input=attribute_to_layer_input,
)
attr_inputs = _forward_layer_eval(
self.forward_func,
inputs,
self.layer,
additional_forward_args=additional_forward_args,
device_ids=self.device_ids,
attribute_to_layer_input=attribute_to_layer_input,
)
if self.multiplies_by_inputs:
input_baseline_diffs = tuple(
input - baseline for input, baseline in zip(attr_inputs, attr_baselines)
)
attributions = tuple(
input_baseline_diff * grad
for input_baseline_diff, grad in zip(input_baseline_diffs, grads)
)
else:
attributions = grads
return _compute_conv_delta_and_format_attrs(
self,
return_convergence_delta,
attributions,
baselines,
inputs,
additional_forward_args,
target,
cast(Union[Literal[True], Literal[False]], len(attributions) > 1),
)
def has_convergence_delta(self) -> bool:
return True
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Tuple, Union
import torch
from captum._utils.common import (
_extract_device,
_format_additional_forward_args,
_format_output,
_format_tensor_into_tuples,
_run_forward,
)
from captum._utils.gradient import _forward_layer_eval
from captum._utils.typing import BaselineType, TargetType
from captum.attr._core.feature_ablation import FeatureAblation
from captum.attr._utils.attribution import LayerAttribution, PerturbationAttribution
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
from torch.nn.parallel.scatter_gather import scatter
class LayerFeatureAblation(LayerAttribution, PerturbationAttribution):
r"""
A perturbation based approach to computing layer attribution, involving
replacing values in the input / output of a layer with a given baseline /
reference, and computing the difference in output. By default, each
neuron (scalar input / output value) within the layer is replaced
independently.
Passing a layer mask allows grouping neurons to be
ablated together.
Each neuron in the group will be given the same attribution value
equal to the change in target as a result of ablating the entire neuron
group.
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
layer (torch.nn.Module): Layer for which attributions are computed.
Output size of attribute matches this layer's input or
output dimensions, depending on whether we attribute to
the inputs or outputs of the layer, corresponding to
attribution of each neuron in the input or output of
this layer.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself
(or otherwise has a device_ids attribute with the device
ID list), then it is not necessary to provide this
argument.
"""
LayerAttribution.__init__(self, forward_func, layer, device_ids)
PerturbationAttribution.__init__(self, forward_func)
@log_usage()
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer_baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
layer_mask: Union[None, Tensor, Tuple[Tensor, ...]] = None,
attribute_to_layer_input: bool = False,
perturbations_per_eval: int = 1,
) -> Union[Tensor, Tuple[Tensor, ...]]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which layer
attributions are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
layer_baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Layer baselines define reference values which replace each
layer input / output value when ablated.
Layer baselines should be a single tensor with dimensions
matching the input / output of the target layer (or
broadcastable to match it), based
on whether we are attributing to the input or output
of the target layer.
In the cases when `baselines` is not provided, we internally
use zero as the baseline for each neuron.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
layer_mask (Tensor or tuple[Tensor, ...], optional):
layer_mask defines a mask for the layer, grouping
elements of the layer input / output which should be
ablated together.
layer_mask should be a single tensor with dimensions
matching the input / output of the target layer (or
broadcastable to match it), based
on whether we are attributing to the input or output
of the target layer. layer_mask
should contain integers in the range 0 to num_groups
- 1, and all elements with the same value are
considered to be in the same group.
If None, then a layer mask is constructed which assigns
each neuron within the layer as a separate group, which
is ablated independently.
Default: None
attribute_to_layer_input (bool, optional): Indicates whether to
compute the attributions with respect to the layer input
or output. If `attribute_to_layer_input` is set to True
then the attributions will be computed with respect to
layer's inputs, otherwise it will be computed with respect
to layer's outputs.
Note that currently it is assumed that either the input
or the output of the layer, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
perturbations_per_eval (int, optional): Allows ablation of multiple
neuron (groups) to be processed simultaneously in one
call to forward_fn.
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
Default: 1
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attribution of each neuron in given layer input or
output. Attributions will always be the same size as
the input or output of the given layer, depending on
whether we attribute to the inputs or outputs
of the layer which is decided by the input flag
`attribute_to_layer_input`
Attributions are returned in a tuple if
the layer inputs / outputs contain multiple tensors,
otherwise a single tensor is returned.
Examples::
>>> # SimpleClassifier takes a single input tensor of size Nx4x4,
>>> # and returns an Nx3 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x3x3.
>>> net = SimpleClassifier()
>>> # Generating random input with size 2 x 4 x 4
>>> input = torch.randn(2, 4, 4)
>>> # Defining LayerFeatureAblation interpreter
>>> ablator = LayerFeatureAblation(net, net.conv1)
>>> # Computes ablation attribution, ablating each of the 108
>>> # neurons independently.
>>> attr = ablator.attribute(input, target=1)
>>> # Alternatively, we may want to ablate neurons in groups, e.g.
>>> # grouping all the layer outputs in the same row.
>>> # This can be done by creating a layer mask as follows, which
>>> # defines the groups of layer inputs / outouts, e.g.:
>>> # +---+---+---+
>>> # | 0 | 0 | 0 |
>>> # +---+---+---+
>>> # | 1 | 1 | 1 |
>>> # +---+---+---+
>>> # | 2 | 2 | 2 |
>>> # +---+---+---+
>>> # With this mask, all the 36 neurons in a row / channel are ablated
>>> # simultaneously, and the attribution for each neuron in the same
>>> # group (0 - 2) per example are the same.
>>> # The attributions can be calculated as follows:
>>> # layer mask has dimensions 1 x 3 x 3
>>> layer_mask = torch.tensor([[[0,0,0],[1,1,1],
>>> [2,2,2]]])
>>> attr = ablator.attribute(input, target=1,
>>> layer_mask=layer_mask)
"""
def layer_forward_func(*args):
layer_length = args[-1]
layer_input = args[:layer_length]
original_inputs = args[layer_length:-1]
device_ids = self.device_ids
if device_ids is None:
device_ids = getattr(self.forward_func, "device_ids", None)
all_layer_inputs = {}
if device_ids is not None:
scattered_layer_input = scatter(layer_input, target_gpus=device_ids)
for device_tensors in scattered_layer_input:
all_layer_inputs[device_tensors[0].device] = device_tensors
else:
all_layer_inputs[layer_input[0].device] = layer_input
def forward_hook(module, inp, out=None):
device = _extract_device(module, inp, out)
is_layer_tuple = (
isinstance(out, tuple)
if out is not None
else isinstance(inp, tuple)
)
if device not in all_layer_inputs:
raise AssertionError(
"Layer input not placed on appropriate "
"device. If using a DataParallel model, either provide the "
"DataParallel model as forward_func or provide device ids"
" to the constructor."
)
if not is_layer_tuple:
return all_layer_inputs[device][0]
return all_layer_inputs[device]
hook = None
try:
if attribute_to_layer_input:
hook = self.layer.register_forward_pre_hook(forward_hook)
else:
hook = self.layer.register_forward_hook(forward_hook)
eval = _run_forward(self.forward_func, original_inputs, target=target)
finally:
if hook is not None:
hook.remove()
return eval
with torch.no_grad():
inputs = _format_tensor_into_tuples(inputs)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
layer_eval = _forward_layer_eval(
self.forward_func,
inputs,
self.layer,
additional_forward_args,
device_ids=self.device_ids,
attribute_to_layer_input=attribute_to_layer_input,
)
layer_eval_len = (len(layer_eval),)
all_inputs = (
(inputs + additional_forward_args + layer_eval_len)
if additional_forward_args is not None
else inputs + layer_eval_len
)
ablator = FeatureAblation(layer_forward_func)
layer_attribs = ablator.attribute.__wrapped__(
ablator, # self
layer_eval,
baselines=layer_baselines,
additional_forward_args=all_inputs,
feature_mask=layer_mask,
perturbations_per_eval=perturbations_per_eval,
)
_attr = _format_output(len(layer_attribs) > 1, layer_attribs)
return _attr
|
#!/usr/bin/env python3
import functools
import warnings
from typing import Any, Callable, List, overload, Tuple, Union
import torch
from captum._utils.common import (
_extract_device,
_format_additional_forward_args,
_format_outputs,
)
from captum._utils.gradient import _forward_layer_eval, _run_forward
from captum._utils.typing import BaselineType, Literal, ModuleOrModuleList, TargetType
from captum.attr._core.integrated_gradients import IntegratedGradients
from captum.attr._utils.attribution import GradientAttribution, LayerAttribution
from captum.attr._utils.common import (
_format_input_baseline,
_tensorize_baseline,
_validate_input,
)
from captum.log import log_usage
from torch import Tensor
from torch.nn.parallel.scatter_gather import scatter
class LayerIntegratedGradients(LayerAttribution, GradientAttribution):
r"""
Layer Integrated Gradients is a variant of Integrated Gradients that assigns
an importance score to layer inputs or outputs, depending on whether we
attribute to the former or to the latter one.
Integrated Gradients is an axiomatic model interpretability algorithm that
attributes / assigns an importance score to each input feature by approximating
the integral of gradients of the model's output with respect to the inputs
along the path (straight line) from given baselines / references to inputs.
Baselines can be provided as input arguments to attribute method.
To approximate the integral we can choose to use either a variant of
Riemann sum or Gauss-Legendre quadrature rule.
More details regarding the integrated gradients method can be found in the
original paper:
https://arxiv.org/abs/1703.01365
"""
def __init__(
self,
forward_func: Callable,
layer: ModuleOrModuleList,
device_ids: Union[None, List[int]] = None,
multiply_by_inputs: bool = True,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
layer (ModuleOrModuleList): Layer or list of layers for which attributions
are computed. For each layer the output size of the attribute
matches this layer's input or output dimensions, depending on
whether we attribute to the inputs or outputs of the
layer, corresponding to the attribution of each neuron
in the input or output of this layer.
Please note that layers to attribute on cannot be
dependent on each other. That is, a subset of layers in
`layer` cannot produce the inputs for another layer.
For example, if your model is of a simple linked-list
based graph structure (think nn.Sequence), e.g. x -> l1
-> l2 -> l3 -> output. If you pass in any one of those
layers, you cannot pass in another due to the
dependence, e.g. if you pass in l2 you cannot pass in
l1 or l3.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in,
then this type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of layer integrated gradients, if `multiply_by_inputs`
is set to True, final sensitivity scores are being multiplied by
layer activations for inputs - layer activations for baselines.
"""
LayerAttribution.__init__(self, forward_func, layer, device_ids=device_ids)
GradientAttribution.__init__(self, forward_func)
self.ig = IntegratedGradients(forward_func, multiply_by_inputs)
if isinstance(layer, list) and len(layer) > 1:
warnings.warn(
"Multiple layers provided. Please ensure that each layer is"
"**not** solely dependent on the outputs of"
"another layer. Please refer to the documentation for more"
"detail."
)
@overload
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: BaselineType,
target: TargetType,
additional_forward_args: Any,
n_steps: int,
method: str,
internal_batch_size: Union[None, int],
return_convergence_delta: Literal[False],
attribute_to_layer_input: bool,
) -> Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]]:
...
@overload
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: BaselineType,
target: TargetType,
additional_forward_args: Any,
n_steps: int,
method: str,
internal_batch_size: Union[None, int],
return_convergence_delta: Literal[True],
attribute_to_layer_input: bool,
) -> Tuple[
Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]],
Tensor,
]:
...
@overload
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
internal_batch_size: Union[None, int] = None,
return_convergence_delta: bool = False,
attribute_to_layer_input: bool = False,
) -> Union[
Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]],
Tuple[
Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]],
Tensor,
],
]:
...
@log_usage()
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
internal_batch_size: Union[None, int] = None,
return_convergence_delta: bool = False,
attribute_to_layer_input: bool = False,
) -> Union[
Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]],
Tuple[
Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]],
Tensor,
],
]:
r"""
This method attributes the output of the model with given target index
(in case it is provided, otherwise it assumes that output is a
scalar) to layer inputs or outputs of the model, depending on whether
`attribute_to_layer_input` is set to True or False, using the approach
described above.
In addition to that it also returns, if `return_convergence_delta` is
set to True, integral approximation delta based on the completeness
property of integrated gradients.
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which layer integrated
gradients are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define the starting point from which integral
is computed and can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. It will be
repeated for each of `n_steps` along the integrated
path. For all other types, the given argument is used
for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
n_steps (int, optional): The number of steps used by the approximation
method. Default: 50.
method (str, optional): Method for approximating the integral,
one of `riemann_right`, `riemann_left`, `riemann_middle`,
`riemann_trapezoid` or `gausslegendre`.
Default: `gausslegendre` if no method is provided.
internal_batch_size (int, optional): Divides total #steps * #examples
data points into chunks of size at most internal_batch_size,
which are computed (forward / backward passes)
sequentially. internal_batch_size must be at least equal to
#examples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain internal_batch_size / num_devices examples.
If internal_batch_size is None, then all evaluations are
processed in one batch.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
attribute_to_layer_input (bool, optional): Indicates whether to
compute the attribution with respect to the layer input
or output. If `attribute_to_layer_input` is set to True
then the attributions will be computed with respect to
layer input, otherwise it will be computed with respect
to layer output.
Note that currently it is assumed that either the input
or the output of internal layer, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Integrated gradients with respect to `layer`'s inputs
or outputs. Attributions will always be the same size and
dimensionality as the input or output of the given layer,
depending on whether we attribute to the inputs or outputs
of the layer which is decided by the input flag
`attribute_to_layer_input`.
For a single layer, attributions are returned in a tuple if
the layer inputs / outputs contain multiple tensors,
otherwise a single tensor is returned.
For multiple layers, attributions will always be
returned as a list. Each element in this list will be
equivalent to that of a single layer output, i.e. in the
case that one layer, in the given layers, inputs / outputs
multiple tensors: the corresponding output element will be
a tuple of tensors. The ordering of the outputs will be
the same order as the layers given in the constructor.
- **delta** (*Tensor*, returned if return_convergence_delta=True):
The difference between the total approximated and true
integrated gradients. This is computed using the property
that the total sum of forward_func(inputs) -
forward_func(baselines) must equal the total sum of the
integrated gradient.
Delta is calculated per example, meaning that the number of
elements in returned delta tensor is equal to the number of
examples in inputs.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x32x32.
>>> net = ImageClassifier()
>>> lig = LayerIntegratedGradients(net, net.conv1)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes layer integrated gradients for class 3.
>>> # attribution size matches layer output, Nx12x32x32
>>> attribution = lig.attribute(input, target=3)
"""
inps, baselines = _format_input_baseline(inputs, baselines)
_validate_input(inps, baselines, n_steps, method)
baselines = _tensorize_baseline(inps, baselines)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
def flatten_tuple(tup):
return tuple(
sum((list(x) if isinstance(x, (tuple, list)) else [x] for x in tup), [])
)
if self.device_ids is None:
self.device_ids = getattr(self.forward_func, "device_ids", None)
inputs_layer = _forward_layer_eval(
self.forward_func,
inps,
self.layer,
device_ids=self.device_ids,
additional_forward_args=additional_forward_args,
attribute_to_layer_input=attribute_to_layer_input,
)
# if we have one output
if not isinstance(self.layer, list):
inputs_layer = (inputs_layer,)
num_outputs = [1 if isinstance(x, Tensor) else len(x) for x in inputs_layer]
num_outputs_cumsum = torch.cumsum(
torch.IntTensor([0] + num_outputs), dim=0 # type: ignore
)
inputs_layer = flatten_tuple(inputs_layer)
baselines_layer = _forward_layer_eval(
self.forward_func,
baselines,
self.layer,
device_ids=self.device_ids,
additional_forward_args=additional_forward_args,
attribute_to_layer_input=attribute_to_layer_input,
)
baselines_layer = flatten_tuple(baselines_layer)
# inputs -> these inputs are scaled
def gradient_func(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
) -> Tuple[Tensor, ...]:
if self.device_ids is None or len(self.device_ids) == 0:
scattered_inputs = (inputs,)
else:
# scatter method does not have a precise enough return type in its
# stub, so suppress the type warning.
scattered_inputs = scatter( # type:ignore
inputs, target_gpus=self.device_ids
)
scattered_inputs_dict = {
scattered_input[0].device: scattered_input
for scattered_input in scattered_inputs
}
with torch.autograd.set_grad_enabled(True):
def layer_forward_hook(
module, hook_inputs, hook_outputs=None, layer_idx=0
):
device = _extract_device(module, hook_inputs, hook_outputs)
is_layer_tuple = (
isinstance(hook_outputs, tuple)
# hook_outputs is None if attribute_to_layer_input == True
if hook_outputs is not None
else isinstance(hook_inputs, tuple)
)
if is_layer_tuple:
return scattered_inputs_dict[device][
num_outputs_cumsum[layer_idx] : num_outputs_cumsum[
layer_idx + 1
]
]
return scattered_inputs_dict[device][num_outputs_cumsum[layer_idx]]
hooks = []
try:
layers = self.layer
if not isinstance(layers, list):
layers = [self.layer]
for layer_idx, layer in enumerate(layers):
hook = None
# TODO:
# Allow multiple attribute_to_layer_input flags for
# each layer, i.e. attribute_to_layer_input[layer_idx]
if attribute_to_layer_input:
hook = layer.register_forward_pre_hook(
functools.partial(
layer_forward_hook, layer_idx=layer_idx
)
)
else:
hook = layer.register_forward_hook(
functools.partial(
layer_forward_hook, layer_idx=layer_idx
)
)
hooks.append(hook)
output = _run_forward(
self.forward_func, tuple(), target_ind, additional_forward_args
)
finally:
for hook in hooks:
if hook is not None:
hook.remove()
assert output[0].numel() == 1, (
"Target not provided when necessary, cannot"
" take gradient with respect to multiple outputs."
)
# torch.unbind(forward_out) is a list of scalar tensor tuples and
# contains batch_size * #steps elements
grads = torch.autograd.grad(torch.unbind(output), inputs)
return grads
self.ig.gradient_func = gradient_func
all_inputs = (
(inps + additional_forward_args)
if additional_forward_args is not None
else inps
)
attributions = self.ig.attribute.__wrapped__( # type: ignore
self.ig, # self
inputs_layer,
baselines=baselines_layer,
target=target,
additional_forward_args=all_inputs,
n_steps=n_steps,
method=method,
internal_batch_size=internal_batch_size,
return_convergence_delta=False,
)
# handle multiple outputs
output: List[Tuple[Tensor, ...]] = [
tuple(
attributions[
int(num_outputs_cumsum[i]) : int(num_outputs_cumsum[i + 1])
]
)
for i in range(len(num_outputs))
]
if return_convergence_delta:
start_point, end_point = baselines, inps
# computes approximation error based on the completeness axiom
delta = self.compute_convergence_delta(
attributions,
start_point,
end_point,
additional_forward_args=additional_forward_args,
target=target,
)
return _format_outputs(isinstance(self.layer, list), output), delta
return _format_outputs(isinstance(self.layer, list), output)
def has_convergence_delta(self) -> bool:
return True
@property
def multiplies_by_inputs(self):
return self.ig.multiplies_by_inputs
|
#!/usr/bin/env python3
import typing
from typing import Any, Callable, cast, Sequence, Tuple, Union
import torch
from captum._utils.common import (
_expand_target,
_format_additional_forward_args,
_format_baseline,
_format_tensor_into_tuples,
ExpansionTypes,
)
from captum._utils.gradient import compute_layer_gradients_and_eval
from captum._utils.typing import (
BaselineType,
Literal,
TargetType,
TensorOrTupleOfTensorsGeneric,
)
from captum.attr._core.deep_lift import DeepLift, DeepLiftShap
from captum.attr._utils.attribution import LayerAttribution
from captum.attr._utils.common import (
_call_custom_attribution_func,
_compute_conv_delta_and_format_attrs,
_format_callable_baseline,
_tensorize_baseline,
_validate_input,
)
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
class LayerDeepLift(LayerAttribution, DeepLift):
r"""
Implements DeepLIFT algorithm for the layer based on the following paper:
Learning Important Features Through Propagating Activation Differences,
Avanti Shrikumar, et. al.
https://arxiv.org/abs/1704.02685
and the gradient formulation proposed in:
Towards better understanding of gradient-based attribution methods for
deep neural networks, Marco Ancona, et.al.
https://openreview.net/pdf?id=Sy21R9JAW
This implementation supports only Rescale rule. RevealCancel rule will
be supported in later releases.
Although DeepLIFT's(Rescale Rule) attribution quality is comparable with
Integrated Gradients, it runs significantly faster than Integrated
Gradients and is preferred for large datasets.
Currently we only support a limited number of non-linear activations
but the plan is to expand the list in the future.
Note: As we know, currently we cannot access the building blocks,
of PyTorch's built-in LSTM, RNNs and GRUs such as Tanh and Sigmoid.
Nonetheless, it is possible to build custom LSTMs, RNNS and GRUs
with performance similar to built-in ones using TorchScript.
More details on how to build custom RNNs can be found here:
https://pytorch.org/blog/optimizing-cuda-rnn-with-torchscript/
"""
def __init__(
self,
model: Module,
layer: Module,
multiply_by_inputs: bool = True,
) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
layer (torch.nn.Module): Layer for which attributions are computed.
The size and dimensionality of the attributions
corresponds to the size and dimensionality of the layer's
input or output depending on whether we attribute to the
inputs or outputs of the layer.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in
then that type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of Layer DeepLift, if `multiply_by_inputs`
is set to True, final sensitivity scores
are being multiplied by
layer activations for inputs - layer activations for baselines.
This flag applies only if `custom_attribution_func` is
set to None.
"""
LayerAttribution.__init__(self, model, layer)
DeepLift.__init__(self, model)
self.model = model
self._multiply_by_inputs = multiply_by_inputs
# Ignoring mypy error for inconsistent signature with DeepLift
@typing.overload # type: ignore
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: Literal[False] = False,
attribute_to_layer_input: bool = False,
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> Union[Tensor, Tuple[Tensor, ...]]:
...
@typing.overload
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
*,
return_convergence_delta: Literal[True],
attribute_to_layer_input: bool = False,
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]:
...
@log_usage()
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: bool = False,
attribute_to_layer_input: bool = False,
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> Union[
Tensor, Tuple[Tensor, ...], Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]
]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which layer
attributions are computed. If model takes a
single tensor as input, a single input tensor should be
provided. If model takes multiple tensors as input,
a tuple of the input tensors should be provided. It is
assumed that for all given input tensors, dimension 0
corresponds to the number of examples (aka batch size),
and if multiple input tensors are provided, the examples
must be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define reference samples that are compared with
the inputs. In order to assign attribution scores DeepLift
computes the differences between the inputs/outputs and
corresponding references.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
attribute_to_layer_input (bool, optional): Indicates whether to
compute the attribution with respect to the layer input
or output. If `attribute_to_layer_input` is set to True
then the attributions will be computed with respect to
layer input, otherwise it will be computed with respect
to layer output.
Note that currently it is assumed that either the input
or the output of internal layer, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
custom_attribution_func (Callable, optional): A custom function for
computing final attribution scores. This function can take
at least one and at most three arguments with the
following signature:
- custom_attribution_func(multipliers)
- custom_attribution_func(multipliers, inputs)
- custom_attribution_func(multipliers, inputs, baselines)
In case this function is not provided, we use the default
logic defined as: multipliers * (inputs - baselines)
It is assumed that all input arguments, `multipliers`,
`inputs` and `baselines` are provided in tuples of same length.
`custom_attribution_func` returns a tuple of attribution
tensors that have the same length as the `inputs`.
Default: None
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attribution score computed based on DeepLift's rescale rule with
respect to layer's inputs or outputs. Attributions will always be the
same size as the provided layer's inputs or outputs, depending on
whether we attribute to the inputs or outputs of the layer.
If the layer input / output is a single tensor, then
just a tensor is returned; if the layer input / output
has multiple tensors, then a corresponding tuple
of tensors is returned.
- **delta** (*Tensor*, returned if return_convergence_delta=True):
This is computed using the property that the total sum of
model(inputs) - model(baselines) must equal the
total sum of the attributions computed based on DeepLift's
rescale rule.
Delta is calculated per example, meaning that the number of
elements in returned delta tensor is equal to the number of
examples in input.
Note that the logic described for deltas is guaranteed
when the default logic for attribution computations is used,
meaning that the `custom_attribution_func=None`, otherwise
it is not guaranteed and depends on the specifics of the
`custom_attribution_func`.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> # creates an instance of LayerDeepLift to interpret target
>>> # class 1 with respect to conv4 layer.
>>> dl = LayerDeepLift(net, net.conv4)
>>> input = torch.randn(1, 3, 32, 32, requires_grad=True)
>>> # Computes deeplift attribution scores for conv4 layer and class 3.
>>> attribution = dl.attribute(input, target=1)
"""
inputs = _format_tensor_into_tuples(inputs)
baselines = _format_baseline(baselines, inputs)
_validate_input(inputs, baselines)
baselines = _tensorize_baseline(inputs, baselines)
main_model_hooks = []
try:
main_model_hooks = self._hook_main_model()
self.model.apply(
lambda mod: self._register_hooks(
mod, attribute_to_layer_input=attribute_to_layer_input
)
)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
expanded_target = _expand_target(
target, 2, expansion_type=ExpansionTypes.repeat
)
wrapped_forward_func = self._construct_forward_func(
self.model,
(inputs, baselines),
expanded_target,
additional_forward_args,
)
def chunk_output_fn(out: TensorOrTupleOfTensorsGeneric) -> Sequence:
if isinstance(out, Tensor):
return out.chunk(2)
return tuple(out_sub.chunk(2) for out_sub in out)
gradients, attrs = compute_layer_gradients_and_eval(
wrapped_forward_func,
self.layer,
inputs,
attribute_to_layer_input=attribute_to_layer_input,
output_fn=lambda out: chunk_output_fn(out),
)
attr_inputs = tuple(map(lambda attr: attr[0], attrs))
attr_baselines = tuple(map(lambda attr: attr[1], attrs))
gradients = tuple(map(lambda grad: grad[0], gradients))
if custom_attribution_func is None:
if self.multiplies_by_inputs:
attributions = tuple(
(input - baseline) * gradient
for input, baseline, gradient in zip(
attr_inputs, attr_baselines, gradients
)
)
else:
attributions = gradients
else:
attributions = _call_custom_attribution_func(
custom_attribution_func, gradients, attr_inputs, attr_baselines
)
finally:
# remove hooks from all activations
self._remove_hooks(main_model_hooks)
return _compute_conv_delta_and_format_attrs(
self,
return_convergence_delta,
attributions,
baselines,
inputs,
additional_forward_args,
target,
cast(Union[Literal[True], Literal[False]], len(attributions) > 1),
)
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
class LayerDeepLiftShap(LayerDeepLift, DeepLiftShap):
r"""
Extends LayerDeepLift and DeepLiftShap algorithms and approximates SHAP
values for given input `layer`.
For each input sample - baseline pair it computes DeepLift attributions
with respect to inputs or outputs of given `layer` averages
resulting attributions across baselines. Whether to compute the attributions
with respect to the inputs or outputs of the layer is defined by the
input flag `attribute_to_layer_input`.
More details about the algorithm can be found here:
https://papers.nips.cc/paper/7062-a-unified-approach-to-interpreting-model-predictions.pdf
Note that the explanation model:
1. Assumes that input features are independent of one another
2. Is linear, meaning that the explanations are modeled through
the additive composition of feature effects.
Although, it assumes a linear model for each explanation, the overall
model across multiple explanations can be complex and non-linear.
"""
def __init__(
self,
model: Module,
layer: Module,
multiply_by_inputs: bool = True,
) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
layer (torch.nn.Module): Layer for which attributions are computed.
The size and dimensionality of the attributions
corresponds to the size and dimensionality of the layer's
input or output depending on whether we attribute to the
inputs or outputs of the layer.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in
then that type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of LayerDeepLiftShap, if `multiply_by_inputs`
is set to True, final sensitivity scores are being
multiplied by
layer activations for inputs - layer activations for baselines
This flag applies only if `custom_attribution_func` is
set to None.
"""
LayerDeepLift.__init__(self, model, layer)
DeepLiftShap.__init__(self, model, multiply_by_inputs)
# Ignoring mypy error for inconsistent signature with DeepLiftShap
@typing.overload # type: ignore
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: Union[
Tensor, Tuple[Tensor, ...], Callable[..., Union[Tensor, Tuple[Tensor, ...]]]
],
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: Literal[False] = False,
attribute_to_layer_input: bool = False,
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> Union[Tensor, Tuple[Tensor, ...]]:
...
@typing.overload
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: Union[
Tensor, Tuple[Tensor, ...], Callable[..., Union[Tensor, Tuple[Tensor, ...]]]
],
target: TargetType = None,
additional_forward_args: Any = None,
*,
return_convergence_delta: Literal[True],
attribute_to_layer_input: bool = False,
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]:
...
@log_usage()
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: Union[
Tensor, Tuple[Tensor, ...], Callable[..., Union[Tensor, Tuple[Tensor, ...]]]
],
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: bool = False,
attribute_to_layer_input: bool = False,
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> Union[
Tensor, Tuple[Tensor, ...], Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]
]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which layer
attributions are computed. If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
baselines (Tensor, tuple[Tensor, ...], or Callable):
Baselines define reference samples that are compared with
the inputs. In order to assign attribution scores DeepLift
computes the differences between the inputs/outputs and
corresponding references. Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
the first dimension equal to the number of examples
in the baselines' distribution. The remaining dimensions
must match with input tensor's dimension starting from
the second dimension.
- a tuple of tensors, if inputs is a tuple of tensors,
with the first dimension of any tensor inside the tuple
equal to the number of examples in the baseline's
distribution. The remaining dimensions must match
the dimensions of the corresponding input tensor
starting from the second dimension.
- callable function, optionally takes `inputs` as an
argument and either returns a single tensor
or a tuple of those.
It is recommended that the number of samples in the baselines'
tensors is larger than one.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
attribute_to_layer_input (bool, optional): Indicates whether to
compute the attributions with respect to the layer input
or output. If `attribute_to_layer_input` is set to True
then the attributions will be computed with respect to
layer inputs, otherwise it will be computed with respect
to layer outputs.
Note that currently it assumes that both the inputs and
outputs of internal layers are single tensors.
Support for multiple tensors will be added later.
Default: False
custom_attribution_func (Callable, optional): A custom function for
computing final attribution scores. This function can take
at least one and at most three arguments with the
following signature:
- custom_attribution_func(multipliers)
- custom_attribution_func(multipliers, inputs)
- custom_attribution_func(multipliers, inputs, baselines)
In case this function is not provided, we use the default
logic defined as: multipliers * (inputs - baselines)
It is assumed that all input arguments, `multipliers`,
`inputs` and `baselines` are provided in tuples of same
length. `custom_attribution_func` returns a tuple of
attribution tensors that have the same length as the
`inputs`.
Default: None
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attribution score computed based on DeepLift's rescale rule
with respect to layer's inputs or outputs. Attributions
will always be the same size as the provided layer's inputs
or outputs, depending on whether we attribute to the inputs
or outputs of the layer.
Attributions are returned in a tuple based on whether
the layer inputs / outputs are contained in a tuple
from a forward hook. For standard modules, inputs of
a single tensor are usually wrapped in a tuple, while
outputs of a single tensor are not.
- **delta** (*Tensor*, returned if return_convergence_delta=True):
This is computed using the property that the
total sum of model(inputs) - model(baselines)
must be very close to the total sum of attributions
computed based on approximated SHAP values using
DeepLift's rescale rule.
Delta is calculated for each example input and baseline pair,
meaning that the number of elements in returned delta tensor
is equal to the
`number of examples in input` * `number of examples
in baseline`. The deltas are ordered in the first place by
input example, followed by the baseline.
Note that the logic described for deltas is guaranteed
when the default logic for attribution computations is used,
meaning that the `custom_attribution_func=None`, otherwise
it is not guaranteed and depends on the specifics of the
`custom_attribution_func`.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> # creates an instance of LayerDeepLift to interpret target
>>> # class 1 with respect to conv4 layer.
>>> dl = LayerDeepLiftShap(net, net.conv4)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes shap values using deeplift for class 3.
>>> attribution = dl.attribute(input, target=3)
"""
inputs = _format_tensor_into_tuples(inputs)
baselines = _format_callable_baseline(baselines, inputs)
assert isinstance(baselines[0], torch.Tensor) and baselines[0].shape[0] > 1, (
"Baselines distribution has to be provided in form of a torch.Tensor"
" with more than one example but found: {}."
" If baselines are provided in shape of scalars or with a single"
" baseline example, `LayerDeepLift`"
" approach can be used instead.".format(baselines[0])
)
# batch sizes
inp_bsz = inputs[0].shape[0]
base_bsz = baselines[0].shape[0]
(
exp_inp,
exp_base,
exp_target,
exp_addit_args,
) = DeepLiftShap._expand_inputs_baselines_targets(
self, baselines, inputs, target, additional_forward_args
)
attributions = LayerDeepLift.attribute.__wrapped__( # type: ignore
self,
exp_inp,
exp_base,
target=exp_target,
additional_forward_args=exp_addit_args,
return_convergence_delta=cast(
Literal[True, False], return_convergence_delta
),
attribute_to_layer_input=attribute_to_layer_input,
custom_attribution_func=custom_attribution_func,
)
if return_convergence_delta:
attributions, delta = attributions
if isinstance(attributions, tuple):
attributions = tuple(
DeepLiftShap._compute_mean_across_baselines(
self, inp_bsz, base_bsz, cast(Tensor, attrib)
)
for attrib in attributions
)
else:
attributions = DeepLiftShap._compute_mean_across_baselines(
self, inp_bsz, base_bsz, attributions
)
if return_convergence_delta:
return attributions, delta
else:
return attributions
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
|
#!/usr/bin/env python3
from collections import defaultdict
import torch
from pytext.models.embeddings.dict_embedding import DictEmbedding
from pytext.models.embeddings.word_embedding import WordEmbedding
from pytext.models.model import EmbeddingBase, EmbeddingList
class PyTextInterpretableEmbedding(EmbeddingBase):
r"""
In PyText DocNN models we need a way to access word embedding layers,
generate the embeddings and subtract the baseline.
To do so, we separate embedding layers from the model, compute the embeddings
separately and do all operations needed outside of the model.
The original embedding layer is being replaced by `PyTextInterpretableEmbedding`
layer which passes precomputed embedding vectors to lower layers.
"""
def __init__(self, embeddings) -> None:
self.embedding_dims = [embedding.embedding_dim for embedding in embeddings]
super().__init__(sum(self.embedding_dims))
self.embeddings = embeddings
def forward(self, input):
r"""
The forward pass of embedding layer. This can be for the text or any
type of embedding.
Args
input: Input embeddings tensor
Return
output: Output tensor is the same as input. It passes through
the embedding tensors to lower layers without any
modifications
"""
return input
def get_attribution_map(self, attributions):
r"""
After attribution scores are computed for an input embedding vector
we need to split it up into attribution sub tensors for each
feature type: word, dict and other types
TODO: we can potentally also output tuples of attributions. This might be
a better option. We'll work on this in a separate diff.
Args
attributions: A tensor that contains attribution values for each input
field. It usually has the same dimensions as the input
tensor
Return
attribution_map: A dictionary of feature_type and attribution values
"""
begin = 0
attribution_map = defaultdict()
for embedding, embedding_size in zip(self.embeddings, self.embedding_dims):
end = begin + embedding_size
if isinstance(embedding, WordEmbedding):
attribution_map["word"] = attributions[:, :, begin:end]
elif isinstance(embedding, DictEmbedding):
attribution_map["dict"] = attributions[:, :, begin:end]
else:
raise NotImplementedError(
"Currently only word and dict " "embeddings are supported"
)
begin = end
return attribution_map
class BaselineGenerator:
r"""
This is an example input baseline generator for DocNN model which uses
word and dict features.
"""
PAD = "<pad>"
def __init__(self, model, data_handler, device) -> None:
self.model = model
self.data_handler = data_handler
if "dict_feat" in data_handler.features:
self.vocab_dict = data_handler.features["dict_feat"].vocab
if "word_feat" in data_handler.features:
self.vocab_word = data_handler.features["word_feat"].vocab
self.baseline_single_word_feature = self._generate_baseline_single_word_feature(
device
)
self.baseline_single_dict_feature = self._generate_baseline_single_dict_feature(
device
)
def generate_baseline(self, integ_grads_embeddings, seq_length):
r"""
Generates baseline for input word and dict features. In the future we
will extend it to support char and other features as well.
This baseline is entirely based on the `<pad>` token.
Args
integ_grads_embeddings: A reference to integrated gradients embedding
layer
seq_length: The length of each sequence which depends on batch size
Return
baseline: A tuple of feature baselines
Each feature type has a corresponding baseline tensor
in the tuple.
Currently only Dict and Word feature types are supported
"""
baseline = []
for embedding in integ_grads_embeddings.embeddings:
if isinstance(embedding, WordEmbedding):
baseline.append(self._generate_word_baseline(seq_length))
elif isinstance(embedding, DictEmbedding):
baseline.append(self._generate_dict_baseline(seq_length))
else:
raise NotImplementedError(
"Currently only word and dict " "embeddings are supported"
)
return tuple(baseline)
def _generate_baseline_single_word_feature(self, device):
return (
torch.tensor(
[self.vocab_word.stoi[self.PAD] if hasattr(self, "vocab_word") else 0]
)
.unsqueeze(0)
.to(device)
)
def _generate_baseline_single_dict_feature(self, device):
r"""Generate dict features based on Assistant's case study by using
sia_transformer:
fbcode/assistant/sia/transformer/sia_transformer.py
sia_transformer generates dict features in a special gazetter format
See `fbsource/fbcode/pytext/models/embeddings/dict_embedding.py`
It generates word dict feature embeddings for each word token.
The output of SIATransformer after running it on `<pad>` token
looks as following:
OutputRecord(tokens=['<', 'pad', '>'],
token_ranges=[(0, 1), (1, 4), (4, 5)],
gazetteer_feats=['<pad>', '<pad>', '<pad>'],
gazetteer_feat_lengths=[1, 1, 1],
gazetteer_feat_weights=[0.0, 0.0, 0.0],
characters=[['<', '<pad>', '<pad>'],
['p', 'a', 'd'], ['>', '<pad>', '<pad>']],
pretrained_token_embedding=[ ], dense_feats=None)
"""
gazetteer_feats = [self.PAD, self.PAD, self.PAD]
gazetteer_feat_lengths = [1, 1, 1]
gazetteer_feat_weights = [0.0, 0.0, 0.0]
gazetteer_feat_id = (
torch.tensor(
[
self.vocab_dict.stoi[gazetteer_feat]
if hasattr(self, "vocab_dict")
else 0
for gazetteer_feat in gazetteer_feats
]
)
.unsqueeze(0)
.to(device)
)
gazetteer_feat_weights = (
torch.tensor(gazetteer_feat_weights).unsqueeze(0).to(device)
)
gazetteer_feat_lengths = (
torch.tensor(gazetteer_feat_lengths).to(device).view(1, -1)[:, 1]
)
return (gazetteer_feat_id, gazetteer_feat_weights, gazetteer_feat_lengths)
def _generate_word_baseline(self, seq_length):
return self.baseline_single_word_feature.repeat(1, seq_length)
def _generate_dict_baseline(self, seq_length):
return (
self.baseline_single_dict_feature[0].repeat(1, seq_length),
self.baseline_single_dict_feature[1].repeat(1, seq_length),
self.baseline_single_dict_feature[2].repeat(1, seq_length),
)
def configure_task_integ_grads_embeddings(task):
r"""
Wraps Pytext's DocNN model embedding with `IntegratedGradientsEmbedding` for
a given input task.
IntegratedGradientsEmbedding allows to perform baseline related operations
Args
task: DocNN task reference
Returns
integrated_gradients_embedding_lst: The embedding layer which contains
IntegratedGradientsEmbedding as a wrapper over the original
embeddings of the model
"""
integrated_gradients_embedding_lst = configure_model_integ_grads_embeddings(
task.model
)
task.model.embedding = integrated_gradients_embedding_lst
return integrated_gradients_embedding_lst[0]
def configure_model_integ_grads_embeddings(model):
r"""
Wraps Pytext's DocNN model embedding with `IntegratedGradientsEmbedding`
IntegratedGradientsEmbedding allows to perform baseline related operations
Args
model: a reference to DocModel
Returns
integrated_gradients_embedding_lst: The embedding layer which contains
IntegratedGradientsEmbedding as a wrapper over the original
embeddings of the model
"""
embeddings = model.embedding
integrated_gradients_embedding = PyTextInterpretableEmbedding(embeddings)
return EmbeddingList([integrated_gradients_embedding], False)
def reshape_word_features(word_features):
r"""
Creates one-sample batch for word features for sanity check purposes
Args
word_features: A tensor of diemnsions #words x #embeddings
Return
word_features: A tensor of dimensions 1 x #words x #embeddings
"""
return word_features.unsqueeze(0)
def reshape_dict_features(
dict_feature_id_batch, dict_weight_batch, dict_seq_len_batch, seq_length, idx
):
r"""
Creates one-sample batch for dict features for sanity check purposes
It reads and reshapes id, weight and seq_length feature arrays for given
input index `idx` from the input batch
Args
dict_feature_id_batch: The batch tensor for ids
dict_weight_matrix: The batch tensor for weights
dict_seq_len_matrix: The batch tensor for sequence length
seq_length: The number of tokens per sequence
idx: The index of sample in the batch
Return
dict_feature_ids: A tensor of dimensions [ bsz x # dict feature embeddings]
dict_feature_weights: [ bsz x # dict feature embeddings]
dict_feature_lens: [ bsz * seq_length ]
"""
dict_feature_ids = dict_feature_id_batch[idx].unsqueeze(0)
dict_feature_weights = dict_weight_batch[idx].unsqueeze(0)
dict_feature_lens = dict_seq_len_batch[idx].unsqueeze(0)
return (dict_feature_ids, dict_feature_weights, dict_feature_lens)
|
#!/usr/bin/env python3
import warnings
from functools import reduce
import torch
from torch.nn import Module
class InterpretableEmbeddingBase(Module):
r"""
Since some embedding vectors, e.g. word are created and assigned in
the embedding layers of Pytorch models we need a way to access
those layers, generate the embeddings and subtract the baseline.
To do so, we separate embedding layers from the model, compute the
embeddings separately and do all operations needed outside of the model.
The original embedding layer is being replaced by
`InterpretableEmbeddingBase` layer which passes already
precomputed embedding vectors to the layers below.
"""
def __init__(self, embedding, full_name) -> None:
Module.__init__(self)
self.num_embeddings = getattr(embedding, "num_embeddings", None)
self.embedding_dim = getattr(embedding, "embedding_dim", None)
self.embedding = embedding
self.full_name = full_name
def forward(self, *inputs, **kwargs):
r"""
The forward function of a wrapper embedding layer that takes and returns
embedding layer. It allows embeddings to be created outside of the model
and passes them seamlessly to the preceding layers of the model.
Args:
*inputs (Any, optional): A sequence of inputs arguments that the
forward function takes. Since forward functions can take any
type and number of arguments, this will ensure that we can
execute the forward pass using interpretable embedding layer.
Note that if inputs are specified, it is assumed that the first
argument is the embedding tensor generated using the
`self.embedding` layer using all input arguments provided in
`inputs` and `kwargs`.
**kwargs (Any, optional): Similar to `inputs` we want to make sure
that our forward pass supports arbitrary number and type of
key-value arguments. If `inputs` is not provided, `kwargs` must
be provided and the first argument corresponds to the embedding
tensor generated using the `self.embedding`. Note that we make
here an assumption here that `kwargs` is an ordered dict which
is new in python 3.6 and is not guaranteed that it will
consistently remain that way in the newer versions. In case
current implementation doesn't work for special use cases,
it is encouraged to override `InterpretableEmbeddingBase` and
address those specifics in descendant classes.
Returns:
embedding_tensor (Tensor):
Returns a tensor which is the same as first argument passed
to the forward function.
It passes pre-computed embedding tensors to lower layers
without any modifications.
"""
assert len(inputs) > 0 or len(kwargs) > 0, (
"No input arguments are provided to `InterpretableEmbeddingBase`."
"Input embedding tensor has to be provided as first argument to forward "
"function either through inputs argument or kwargs."
)
return inputs[0] if len(inputs) > 0 else list(kwargs.values())[0]
def indices_to_embeddings(self, *input, **kwargs):
r"""
Maps indices to corresponding embedding vectors. E.g. word embeddings
Args:
*input (Any, optional): This can be a tensor(s) of input indices or any
other variable necessary to comput the embeddings. A typical
example of input indices are word or token indices.
**kwargs (Any, optional): Similar to `input` this can be any sequence
of key-value arguments necessary to compute final embedding
tensor.
Returns:
tensor:
A tensor of word embeddings corresponding to the
indices specified in the input
"""
return self.embedding(*input, **kwargs)
class TokenReferenceBase:
r"""
A base class for creating reference (aka baseline) tensor for a sequence of
tokens. A typical example of such token is `PAD`. Users need to provide the
index of the reference token in the vocabulary as an argument to
`TokenReferenceBase` class.
"""
def __init__(self, reference_token_idx: int = 0) -> None:
self.reference_token_idx = reference_token_idx
def generate_reference(self, sequence_length, device: torch.device) -> torch.Tensor:
r"""
Generated reference tensor of given `sequence_length` using
`reference_token_idx`.
Args:
sequence_length (int): The length of the reference sequence
device (torch.device): The device on which the reference tensor will
be created.
Returns:
tensor:
A sequence of reference token with shape:
[sequence_length]
"""
return torch.tensor([self.reference_token_idx] * sequence_length, device=device)
def _get_deep_layer_name(obj, layer_names):
r"""
Traverses through the layer names that are separated by
dot in order to access the embedding layer.
"""
return reduce(getattr, layer_names.split("."), obj)
def _set_deep_layer_value(obj, layer_names, value):
r"""
Traverses through the layer names that are separated by
dot in order to access the embedding layer and update its value.
"""
layer_names = layer_names.split(".")
setattr(reduce(getattr, layer_names[:-1], obj), layer_names[-1], value)
def configure_interpretable_embedding_layer(
model: Module, embedding_layer_name: str = "embedding"
) -> InterpretableEmbeddingBase:
r"""
This method wraps a model's embedding layer with an interpretable embedding
layer that allows us to access the embeddings through their indices.
Args:
model (torch.nn.Module): An instance of PyTorch model that contains embeddings.
embedding_layer_name (str, optional): The name of the embedding layer
in the `model` that we would like to make interpretable.
Returns:
interpretable_emb (InterpretableEmbeddingBase): An instance of
`InterpretableEmbeddingBase` embedding layer that wraps model's
embedding layer that is being accessed through
`embedding_layer_name`.
Examples::
>>> # Let's assume that we have a DocumentClassifier model that
>>> # has a word embedding layer named 'embedding'.
>>> # To make that layer interpretable we need to execute the
>>> # following command:
>>> net = DocumentClassifier()
>>> interpretable_emb = configure_interpretable_embedding_layer(net,
>>> 'embedding')
>>> # then we can use interpretable embedding to convert our
>>> # word indices into embeddings.
>>> # Let's assume that we have the following word indices
>>> input_indices = torch.tensor([1, 0, 2])
>>> # we can access word embeddings for those indices with the command
>>> # line stated below.
>>> input_emb = interpretable_emb.indices_to_embeddings(input_indices)
>>> # Let's assume that we want to apply integrated gradients to
>>> # our model and that target attribution class is 3
>>> ig = IntegratedGradients(net)
>>> attribution = ig.attribute(input_emb, target=3)
>>> # after we finish the interpretation we need to remove
>>> # interpretable embedding layer with the following command:
>>> remove_interpretable_embedding_layer(net, interpretable_emb)
"""
embedding_layer = _get_deep_layer_name(model, embedding_layer_name)
assert (
embedding_layer.__class__ is not InterpretableEmbeddingBase
), "InterpretableEmbeddingBase has already been configured for layer {}".format(
embedding_layer_name
)
warnings.warn(
"In order to make embedding layers more interpretable they will "
"be replaced with an interpretable embedding layer which wraps the "
"original embedding layer and takes word embedding vectors as inputs of "
"the forward function. This allows us to generate baselines for word "
"embeddings and compute attributions for each embedding dimension. "
"The original embedding layer must be set "
"back by calling `remove_interpretable_embedding_layer` function "
"after model interpretation is finished. "
)
interpretable_emb = InterpretableEmbeddingBase(
embedding_layer, embedding_layer_name
)
_set_deep_layer_value(model, embedding_layer_name, interpretable_emb)
return interpretable_emb
def remove_interpretable_embedding_layer(
model: Module, interpretable_emb: InterpretableEmbeddingBase
) -> None:
r"""
Removes interpretable embedding layer and sets back original
embedding layer in the model.
Args:
model (torch.nn.Module): An instance of PyTorch model that contains embeddings
interpretable_emb (InterpretableEmbeddingBase): An instance of
`InterpretableEmbeddingBase` that was originally created in
`configure_interpretable_embedding_layer` function and has
to be removed after interpretation is finished.
Examples::
>>> # Let's assume that we have a DocumentClassifier model that
>>> # has a word embedding layer named 'embedding'.
>>> # To make that layer interpretable we need to execute the
>>> # following command:
>>> net = DocumentClassifier()
>>> interpretable_emb = configure_interpretable_embedding_layer(net,
>>> 'embedding')
>>> # then we can use interpretable embedding to convert our
>>> # word indices into embeddings.
>>> # Let's assume that we have the following word indices
>>> input_indices = torch.tensor([1, 0, 2])
>>> # we can access word embeddings for those indices with the command
>>> # line stated below.
>>> input_emb = interpretable_emb.indices_to_embeddings(input_indices)
>>> # Let's assume that we want to apply integrated gradients to
>>> # our model and that target attribution class is 3
>>> ig = IntegratedGradients(net)
>>> attribution = ig.attribute(input_emb, target=3)
>>> # after we finish the interpretation we need to remove
>>> # interpretable embedding layer with the following command:
>>> remove_interpretable_embedding_layer(net, interpretable_emb)
"""
_set_deep_layer_value(
model, interpretable_emb.full_name, interpretable_emb.embedding
)
|
#!/usr/bin/env python3
from captum.concept._core.cav import CAV # noqa
from captum.concept._core.concept import Concept, ConceptInterpreter # noqa
from captum.concept._core.tcav import TCAV # noqa
from captum.concept._utils.classifier import Classifier, DefaultClassifier # noqa
|
#!/usr/bin/env python3
import glob
import os
from typing import Callable, Iterator
from torch import Tensor
from torch.utils.data import DataLoader, Dataset, IterableDataset
class CustomIterableDataset(IterableDataset):
r"""
An auxiliary class for iterating through a dataset.
"""
def __init__(self, transform_filename_to_tensor: Callable, path: str) -> None:
r"""
Args:
transform_filename_to_tensor (Callable): Function to read a data
file from path and return a tensor from that file.
path (str): Path to dataset files. This can be either a path to a
directory or a file where input examples are stored.
"""
self.file_itr = None
self.path = path
if os.path.isdir(self.path):
self.file_itr = glob.glob(self.path + "*")
self.transform_filename_to_tensor = transform_filename_to_tensor
def __iter__(self) -> Iterator[Tensor]:
r"""
Returns:
iter (Iterator[Tensor]): A map from a function that
processes a list of file path(s) to a list of Tensors.
"""
if self.file_itr is not None:
return map(self.transform_filename_to_tensor, self.file_itr)
else:
return self.transform_filename_to_tensor(self.path)
def dataset_to_dataloader(dataset: Dataset, batch_size: int = 64) -> DataLoader:
r"""
An auxiliary function that creates torch DataLoader from torch Dataset
using input `batch_size`.
Args:
dataset (Dataset): A torch dataset that allows to iterate over
the batches of examples.
batch_size (int, optional): Batch size of for each tensor in the
iteration.
Returns:
dataloader_iter (DataLoader): a DataLoader for data iteration.
"""
return DataLoader(dataset, batch_size=batch_size)
|
#!/usr/bin/env python3
import random
import warnings
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Tuple, Union
import torch
from captum._utils.models.linear_model import model
from torch import Tensor
from torch.utils.data import DataLoader, TensorDataset
class Classifier(ABC):
r"""
An abstract class definition of any classifier that allows to train a model
and access trained weights of that model.
More specifically the classifier can, for instance, be trained on the
activations of a particular layer. Below we can see an example a sklearn
linear classifier wrapped by the `CustomClassifier` which extends `Classifier`
abstract class.
Example::
>>> from sklearn import linear_model
>>>
>>> class CustomClassifier(Classifier):
>>>
>>> def __init__(self):
>>>
>>> self.lm = linear_model.SGDClassifier(alpha=0.01, max_iter=1000,
>>> tol=1e-3)
>>>
>>> def train_and_eval(self, dataloader):
>>>
>>> x_train, x_test, y_train, y_test = train_test_split(inputs, labels)
>>> self.lm.fit(x_train.detach().numpy(), y_train.detach().numpy())
>>>
>>> preds = torch.tensor(self.lm.predict(x_test.detach().numpy()))
>>> return {'accs': (preds == y_test).float().mean()}
>>>
>>>
>>> def weights(self):
>>>
>>> if len(self.lm.coef_) == 1:
>>> # if there are two concepts, there is only one label.
>>> # We split it in two.
>>> return torch.tensor([-1 * self.lm.coef_[0], self.lm.coef_[0]])
>>> else:
>>> return torch.tensor(self.lm.coef_)
>>>
>>>
>>> def classes(self):
>>> return self.lm.classes_
>>>
>>>
"""
@abstractmethod
def __init__(self) -> None:
pass
@abstractmethod
def train_and_eval(
self, dataloader: DataLoader, **kwargs: Any
) -> Union[Dict, None]:
r"""
This method is responsible for training a classifier using the data
provided through `dataloader` input arguments. Based on the specific
implementation, it may or may not return a statistics about model
training and evaluation.
Args:
dataloader (dataloader): A dataloader that enables batch-wise access to
the inputs and corresponding labels. Dataloader allows us to
iterate over the dataset by loading the batches in lazy manner.
kwargs (dict): Named arguments that are used for training and evaluating
concept classifier.
Default: None
Returns:
stats (dict): a dictionary of statistics about the performance of the model.
For example the accuracy of the model on the test and/or
train dataset(s). The user may decide to return None or an
empty dictionary if they decide to not return any performance
statistics.
"""
pass
@abstractmethod
def weights(self) -> Tensor:
r"""
This function returns a C x F tensor weights, where
C is the number of classes and F is the number of features.
Returns:
weights (Tensor): A torch Tensor with the weights resulting from
the model training.
"""
pass
@abstractmethod
def classes(self) -> List[int]:
r"""
This function returns the list of all classes that are used by the
classifier to train the model in the `train_and_eval` method.
The order of returned classes has to match the same order used in
the weights matrix returned by the `weights` method.
Returns:
classes (list): The list of classes used by the classifier to train
the model in the `train_and_eval` method.
"""
pass
class DefaultClassifier(Classifier):
r"""
A default Linear Classifier based on sklearn's SGDClassifier for
learning decision boundaries between concepts.
Note that default implementation slices input dataset into train and test
splits and keeps them in memory.
In case concept datasets are large, this can lead to out of memory and we
recommend to provide a custom Classier that extends `Classifier` abstract
class and handles large concept datasets accordingly.
"""
def __init__(self) -> None:
warnings.warn(
"Using default classifier for TCAV which keeps input"
" both train and test datasets in the memory. Consider defining"
" your own classifier that doesn't rely heavily on memory, for"
" large number of concepts, by extending"
" `Classifer` abstract class"
)
self.lm = model.SkLearnSGDClassifier(alpha=0.01, max_iter=1000, tol=1e-3)
def train_and_eval(
self, dataloader: DataLoader, test_split_ratio: float = 0.33, **kwargs: Any
) -> Union[Dict, None]:
r"""
Implements Classifier::train_and_eval abstract method for small concept
datsets provided by `dataloader`.
It is assumed that when iterating over `dataloader` we can still
retain the entire dataset in the memory.
This method shuffles all examples randomly provided, splits them
into train and test partitions and trains an SGDClassifier using sklearn
library. Ultimately, it measures and returns model accuracy using test
split of the dataset.
Args:
dataloader (dataloader): A dataloader that enables batch-wise access to
the inputs and corresponding labels. Dataloader allows us to
iterate over the dataset by loading the batches in lazy manner.
test_split_ratio (float): The ratio of test split in the entire dataset
served by input data loader `dataloader`.
Default: 0.33
Returns:
stats (dict): a dictionary of statistics about the performance of the model.
In this case stats represents a dictionary of model accuracy
measured on the test split of the dataset.
"""
inputs = []
labels = []
for input, label in dataloader:
inputs.append(input)
labels.append(label)
device = "cpu" if input is None else input.device
x_train, x_test, y_train, y_test = _train_test_split(
torch.cat(inputs), torch.cat(labels), test_split=test_split_ratio
)
self.lm.device = device
self.lm.fit(DataLoader(TensorDataset(x_train, y_train)))
predict = self.lm(x_test)
predict = self.lm.classes()[torch.argmax(predict, dim=1)] # type: ignore
score = predict.long() == y_test.long().cpu()
accs = score.float().mean()
return {"accs": accs}
def weights(self) -> Tensor:
r"""
This function returns a C x F tensor weights, where
C is the number of classes and F is the number of features.
In case of binary classification, C = 2 otherwise it is > 2.
Returns:
weights (Tensor): A torch Tensor with the weights resulting from
the model training.
"""
assert self.lm.linear is not None, (
"The weights cannot be obtained because no model was trained."
"In order to train the model call `train_and_eval` method first."
)
weights = self.lm.representation()
if weights.shape[0] == 1:
# if there are two concepts, there is only one label. We split it in two.
return torch.stack([-1 * weights[0], weights[0]])
else:
return weights
def classes(self) -> List[int]:
r"""
This function returns the list of all classes that are used by the
classifier to train the model in the `train_and_eval` method.
The order of returned classes has to match the same order used in
the weights matrix returned by the `weights` method.
Returns:
classes (list): The list of classes used by the classifier to train
the model in the `train_and_eval` method.
"""
return self.lm.classes().detach().numpy() # type: ignore
def _train_test_split(
x_list: Tensor, y_list: Tensor, test_split: float = 0.33
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
# Shuffle
z_list = list(zip(x_list, y_list))
random.shuffle(z_list)
# Split
test_size = int(test_split * len(z_list))
z_test, z_train = z_list[:test_size], z_list[test_size:]
x_test, y_test = zip(*z_test)
x_train, y_train = zip(*z_train)
return (
torch.stack(x_train),
torch.stack(x_test),
torch.stack(y_train),
torch.stack(y_test),
)
|
#!/usr/bin/env python3
from typing import List
from captum.concept._core.concept import Concept
def concepts_to_str(concepts: List[Concept]) -> str:
r"""
Returns a string of hyphen("-") concatenated concept names.
Example output: "striped-random_0-random_1"
Args:
concepts (list[Concept]): a List of concept names to be
concatenated and used as a concepts key. These concept
names are respective to the Concept objects used for
the classifier train.
Returns:
names_str (str): A string of hyphen("-") concatenated
concept names. Ex.: "striped-random_0-random_1"
"""
return "-".join([str(c.id) for c in concepts])
|
#!/usr/bin/env python3
import os
from typing import Any, Dict, List
import torch
from captum.concept._core.concept import Concept
from captum.concept._utils.common import concepts_to_str
class CAV:
r"""
Concept Activation Vector (CAV) is a vector orthogonal to the decision
boundary of a classifier which distinguishes between activation
vectors produced by different concepts.
More details can be found in the paper:
https://arxiv.org/abs/1711.11279
"""
def __init__(
self,
concepts: List[Concept],
layer: str,
stats: Dict[str, Any] = None,
save_path: str = "./cav/",
model_id: str = "default_model_id",
) -> None:
r"""
This class encapsulates the instances of CAVs objects, saves them in
and loads them from the disk (storage).
Args:
concepts (list[Concept]): a List of Concept objects. Only their
names will be saved and loaded.
layer (str): The layer where concept activation vectors are
computed using a predefined classifier.
stats (dict, optional): a dictionary that retains information about
the CAV classifier such as CAV weights and accuracies.
Ex.: stats = {"weights": weights, "classes": classes,
"accs": accs}, where "weights" are learned
model parameters, "classes" are a list of classes used
by the model to generate the "weights" and "accs"
the classifier training or validation accuracy.
save_path (str, optional): The path where the CAV objects are stored.
model_id (str, optional): A unique model identifier associated with
this CAV instance.
"""
self.concepts = concepts
self.layer = layer
self.stats = stats
self.save_path = save_path
self.model_id = model_id
@staticmethod
def assemble_save_path(
path: str, model_id: str, concepts: List[Concept], layer: str
) -> str:
r"""
A utility method for assembling filename and its path, from
a concept list and a layer name.
Args:
path (str): A path to be concatenated with the concepts key and
layer name.
model_id (str): A unique model identifier associated with input
`layer` and `concepts`
concepts (list[Concept]): A list of concepts that are concatenated
together and used as a concept key using their ids. These
concept ids are retrieved from TCAV s`Concept` objects.
layer (str): The name of the layer for which the activations are
computed.
Returns:
cav_path(str): A string containing the path where the computed CAVs
will be stored.
For example, given:
concept_ids = [0, 1, 2]
concept_names = ["striped", "random_0", "random_1"]
layer = "inception4c"
path = "/cavs",
the resulting save path will be:
"/cavs/default_model_id/0-1-2-inception4c.pkl"
"""
file_name = concepts_to_str(concepts) + "-" + layer + ".pkl"
return os.path.join(path, model_id, file_name)
def save(self):
r"""
Saves a dictionary of the CAV computed values into a pickle file in the
location returned by the "assemble_save_path" static methods. The
dictionary contains the concept names list, the layer name for which
the activations are computed for, the stats dictionary which contains
information about the classifier train/eval statistics such as the
weights and training accuracies. Ex.:
save_dict = {
"concept_ids": [0, 1, 2],
"concept_names": ["striped", "random_0", "random_1"],
"layer": "inception4c",
"stats": {"weights": weights, "classes": classes, "accs": accs}
}
"""
save_dict = {
"concept_ids": [c.id for c in self.concepts],
"concept_names": [c.name for c in self.concepts],
"layer": self.layer,
"stats": self.stats,
}
cavs_path = CAV.assemble_save_path(
self.save_path, self.model_id, self.concepts, self.layer
)
torch.save(save_dict, cavs_path)
@staticmethod
def create_cav_dir_if_missing(save_path: str, model_id: str) -> None:
r"""
A utility function for creating the directories where the CAVs will
be stored. CAVs are saved in a folder under named by `model_id`
under `save_path`.
Args:
save_path (str): A root path where the CAVs will be stored
model_id (str): A unique model identifier associated with the
CAVs. A folder named `model_id` is created under
`save_path`. The CAVs are later stored there.
"""
cav_model_id_path = os.path.join(save_path, model_id)
if not os.path.exists(cav_model_id_path):
os.makedirs(cav_model_id_path)
@staticmethod
def load(cavs_path: str, model_id: str, concepts: List[Concept], layer: str):
r"""
Loads CAV dictionary from a pickle file for given input
`layer` and `concepts`.
Args:
cavs_path (str): The root path where the cavs are stored
in the storage (on the disk).
Ex.: "/cavs"
model_id (str): A unique model identifier associated with the
CAVs. There exist a folder named `model_id` under
`cavs_path` path. The CAVs are loaded from this folder.
concepts (list[Concept]): A List of concepts for which
we would like to load the cavs.
layer (str): The layer name. Ex.: "inception4c". In case of nested
layers we use dots to specify the depth / hierarchy.
Ex.: "layer.sublayer.subsublayer"
Returns:
cav(CAV): An instance of a CAV class, containing the respective CAV
score per concept and layer. An example of a path where the
cavs are loaded from is:
"/cavs/default_model_id/0-1-2-inception4c.pkl"
"""
cavs_path = CAV.assemble_save_path(cavs_path, model_id, concepts, layer)
if os.path.exists(cavs_path):
save_dict = torch.load(cavs_path)
concept_names = save_dict["concept_names"]
concept_ids = save_dict["concept_ids"]
concepts = [
Concept(concept_id, concept_name, None)
for concept_id, concept_name in zip(concept_ids, concept_names)
]
cav = CAV(concepts, save_dict["layer"], save_dict["stats"])
return cav
return None
|
#!/usr/bin/env python3
from typing import Callable, Union
import torch
from torch.nn import Module
class Concept:
r"""
Concepts are human-friendly abstract representations that can be
numerically encoded into torch tensors. They can be illustrated as
images, text or any other form of representation. In case of images,
for example, "stripes" concept can be represented through a number
of example images resembling "stripes" in various different
contexts. In case of Natural Language Processing, the concept of
"happy", for instance, can be illustrated through a number of
adjectives and words that convey happiness.
"""
def __init__(
self, id: int, name: str, data_iter: Union[None, torch.utils.data.DataLoader]
) -> None:
r"""
Args:
id (int): The unique identifier of the concept.
name (str): A unique name of the concept.
data_iter (DataLoader): A pytorch DataLoader object that combines a dataset
and a sampler, and provides an iterable over a given
dataset. Only the input batches are provided by `data_iter`.
Concept ids can be used as labels if necessary.
For more information, please check:
https://pytorch.org/docs/stable/data.html
Example::
>>> # Creates a Concept object named "striped", with a data_iter
>>> # object to iterate over all files in "./concepts/striped"
>>> concept_name = "striped"
>>> concept_path = os.path.join("./concepts", concept_name) + "/"
>>> concept_iter = dataset_to_dataloader(
>>> get_tensor_from_filename, concepts_path=concept_path)
>>> concept_object = Concept(
id=0, name=concept_name, data_iter=concept_iter)
"""
self.id = id
self.name = name
self.data_iter = data_iter
@property
def identifier(self) -> str:
return "%s-%s" % (self.name, self.id)
def __repr__(self) -> str:
return "Concept(%r, %r)" % (self.id, self.name)
class ConceptInterpreter:
r"""
An abstract class that exposes an abstract interpret method
that has to be implemented by a specific algorithm for
concept-based model interpretability.
"""
def __init__(self, model: Module) -> None:
r"""
Args:
model (torch.nn.Module): An instance of pytorch model.
"""
self.model = model
interpret: Callable
r"""
An abstract interpret method that performs concept-based model interpretability
and returns the interpretation results in form of tensors, dictionaries or other
data structures.
Args:
inputs (Tensor or tuple[Tensor, ...]): Inputs for which concept-based
interpretation scores are computed. It can be provided as
a single tensor or a tuple of multiple tensors. If multiple
input tensors are provided, the batch size (the first
dimension of the tensors) must be aligned across all tensors.
"""
|
#!/usr/bin/env python3
from collections import defaultdict
from typing import Any, cast, Dict, List, Set, Tuple, Union
import numpy as np
import torch
import torch.multiprocessing as multiprocessing
from captum._utils.av import AV
from captum._utils.common import _format_tensor_into_tuples, _get_module_from_name
from captum._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr import LayerActivation, LayerAttribution, LayerGradientXActivation
from captum.concept._core.cav import CAV
from captum.concept._core.concept import Concept, ConceptInterpreter
from captum.concept._utils.classifier import Classifier, DefaultClassifier
from captum.concept._utils.common import concepts_to_str
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
from torch.utils.data import DataLoader, Dataset
class LabelledDataset(Dataset):
"""
A torch Dataset whose __getitem__ returns both a batch of activation vectors,
as well as a batch of labels associated with those activation vectors.
It is used to train a classifier in train_tcav
"""
def __init__(self, datasets: List[AV.AVDataset], labels: List[int]) -> None:
"""
Creates the LabelledDataset given a list of K Datasets, and a length K
list of integer labels representing K different concepts.
The assumption is that the k-th Dataset of datasets is associated with
the k-th element of labels.
The LabelledDataset is the concatenation of the K Datasets in datasets.
However, __get_item__ not only returns a batch of activation vectors,
but also a batch of labels indicating which concept that batch of
activation vectors is associated with.
Args:
datasets (list[Dataset]): The k-th element of datasets is a Dataset
representing activation vectors associated with the k-th
concept
labels (list[int]): The k-th element of labels is the integer label
associated with the k-th concept
"""
assert len(datasets) == len(
labels
), "number of datasets does not match the number of concepts"
from itertools import accumulate
offsets = [0] + list(accumulate(map(len, datasets), (lambda x, y: x + y)))
self.length = offsets[-1]
self.datasets = datasets
self.labels = labels
self.lowers = offsets[:-1]
self.uppers = offsets[1:]
def _i_to_k(self, i):
left, right = 0, len(self.uppers)
while left < right:
mid = (left + right) // 2
if self.lowers[mid] <= i and i < self.uppers[mid]:
return mid
if i >= self.uppers[mid]:
left = mid
else:
right = mid
def __getitem__(self, i: int):
"""
Returns a batch of activation vectors, as well as a batch of labels
indicating which concept the batch of activation vectors is associated
with.
Args:
i (int): which (activation vector, label) batch in the dataset to
return
Returns:
inputs (Tensor): i-th batch in Dataset (representing activation
vectors)
labels (Tensor): labels of i-th batch in Dataset
"""
assert i < self.length
k = self._i_to_k(i)
inputs = self.datasets[k][i - self.lowers[k]]
assert len(inputs.shape) == 2
labels = torch.tensor([self.labels[k]] * inputs.size(0), device=inputs.device)
return inputs, labels
def __len__(self) -> int:
"""
returns the total number of batches in the labelled_dataset
"""
return self.length
def train_cav(
model_id,
concepts: List[Concept],
layers: Union[str, List[str]],
classifier: Classifier,
save_path: str,
classifier_kwargs: Dict,
) -> Dict[str, Dict[str, CAV]]:
r"""
A helper function for parallel CAV computations that can be called
from a python process.
Please see the TCAV class documentation for further information.
Args:
model_id (str): A unique identifier for the PyTorch model for which
we would like to load the layer activations and train a
model in order to compute CAVs.
concepts (list[Concept]): A list of Concept objects that are used
to train a classifier and learn decision boundaries between
those concepts for each layer defined in the `layers`
argument.
layers (str or list[str]): A list of layer names or a single layer
name that is used to compute the activations of all concept
examples per concept and train a classifier using those
activations.
classifier (Classifier): A custom classifier class, such as the
Sklearn "linear_model" that allows us to train a model
using the activation vectors extracted for a layer per concept.
It also allows us to access trained weights of the classifier
and the list of prediction classes.
save_path (str): The path for storing Concept Activation
Vectors (CAVs) and Activation Vectors (AVs).
classifier_kwargs (dict): Additional named arguments that are passed to
concept classifier's `train_and_eval` method.
Returns:
cavs (dict): A dictionary of CAV objects indexed by concept ids and
layer names. It gives access to the weights of each concept
in a given layer and model statistics such as accuracies
that resulted in trained concept weights.
"""
concepts_key = concepts_to_str(concepts)
cavs: Dict[str, Dict[str, CAV]] = defaultdict()
cavs[concepts_key] = defaultdict()
layers = [layers] if isinstance(layers, str) else layers
for layer in layers:
# Create data loader to initialize the trainer.
datasets = [
AV.load(save_path, model_id, concept.identifier, layer)
for concept in concepts
]
labels = [concept.id for concept in concepts]
labelled_dataset = LabelledDataset(cast(List[AV.AVDataset], datasets), labels)
def batch_collate(batch):
inputs, labels = zip(*batch)
return torch.cat(inputs), torch.cat(labels)
dataloader = DataLoader(labelled_dataset, collate_fn=batch_collate)
classifier_stats_dict = classifier.train_and_eval(
dataloader, **classifier_kwargs
)
classifier_stats_dict = (
{} if classifier_stats_dict is None else classifier_stats_dict
)
weights = classifier.weights()
assert (
weights is not None and len(weights) > 0
), "Model weights connot be None or empty"
classes = classifier.classes()
assert (
classes is not None and len(classes) > 0
), "Classes cannot be None or empty"
classes = (
cast(torch.Tensor, classes).detach().numpy()
if isinstance(classes, torch.Tensor)
else classes
)
cavs[concepts_key][layer] = CAV(
concepts,
layer,
{"weights": weights, "classes": classes, **classifier_stats_dict},
save_path,
model_id,
)
# Saving cavs on the disk
cavs[concepts_key][layer].save()
return cavs
class TCAV(ConceptInterpreter):
r"""
This class implements ConceptInterpreter abstract class using an
approach called Testing with Concept Activation Vectors (TCAVs),
as described in the paper:
https://arxiv.org/abs/1711.11279
TCAV scores for a given layer, a list of concepts and input example
are computed using the dot product between prediction's layer
sensitivities for given input examples and Concept Activation Vectors
(CAVs) in that same layer.
CAVs are defined as vectors that are orthogonal to the classification boundary
hyperplane that separate given concepts in a given layer from each other.
For a given layer, CAVs are computed by training a classifier that uses the
layer activation vectors for a set of concept examples as input examples and
concept ids as corresponding input labels. Trained weights of
that classifier represent CAVs.
CAVs are represented as a learned weight matrix with the dimensionality
C X F, where:
F represents the number of input features in the classifier.
C is the number of concepts used for the classification. Concept
ids are used as labels for concept examples during the training.
We can use any layer attribution algorithm to compute layer sensitivities
of a model prediction.
For example, the gradients of an output prediction w.r.t. the outputs of
the layer.
The CAVs and the Sensitivities (SENS) are used to compute the TCAV score:
0. TCAV = CAV • SENS, a dot product between those two vectors
The final TCAV score can be computed by aggregating the TCAV scores
for each input concept based on the sign or magnitude of the tcav scores.
1. sign_count_score = | TCAV > 0 | / | TCAV |
2. magnitude_score = SUM(ABS(TCAV * (TCAV > 0))) / SUM(ABS(TCAV))
"""
def __init__(
self,
model: Module,
layers: Union[str, List[str]],
model_id: str = "default_model_id",
classifier: Classifier = None,
layer_attr_method: LayerAttribution = None,
attribute_to_layer_input=False,
save_path: str = "./cav/",
**classifier_kwargs: Any,
) -> None:
r"""
Args:
model (Module): An instance of pytorch model that is used to compute
layer activations and attributions.
layers (str or list[str]): A list of layer name(s) that are
used for computing concept activations (cavs) and layer
attributions.
model_id (str, optional): A unique identifier for the PyTorch `model`
passed as first argument to the constructor of TCAV class. It
is used to store and load activations for given input `model`
and associated `layers`.
classifier (Classifier, optional): A custom classifier class, such as the
Sklearn "linear_model" that allows us to train a model
using the activation vectors extracted for a layer per concept.
It also allows us to access trained weights of the model
and the list of prediction classes.
layer_attr_method (LayerAttribution, optional): An instance of a layer
attribution algorithm that helps us to compute model prediction
sensitivity scores.
Default: None
If `layer_attr_method` is None, we default it to gradients
for the layers using `LayerGradientXActivation` layer
attribution algorithm.
save_path (str, optional): The path for storing CAVs and
Activation Vectors (AVs).
classifier_kwargs (Any, optional): Additional arguments such as
`test_split_ratio` that are passed to concept `classifier`.
Examples::
>>>
>>> # TCAV use example:
>>>
>>> # Define the concepts
>>> stripes = Concept(0, "stripes", striped_data_iter)
>>> random = Concept(1, "random", random_data_iter)
>>>
>>>
>>> mytcav = TCAV(model=imagenet,
>>> layers=['inception4c', 'inception4d'])
>>>
>>> scores = mytcav.interpret(inputs, [[stripes, random]], target = 0)
>>>
For more thorough examples, please check out TCAV tutorial and test cases.
"""
ConceptInterpreter.__init__(self, model)
self.layers = [layers] if isinstance(layers, str) else layers
self.model_id = model_id
self.concepts: Set[Concept] = set()
self.classifier = classifier
self.classifier_kwargs = classifier_kwargs
self.cavs: Dict[str, Dict[str, CAV]] = defaultdict(lambda: defaultdict())
if self.classifier is None:
self.classifier = DefaultClassifier()
if layer_attr_method is None:
self.layer_attr_method = cast(
LayerAttribution,
LayerGradientXActivation( # type: ignore
model, None, multiply_by_inputs=False
),
)
else:
self.layer_attr_method = layer_attr_method
assert model_id, (
"`model_id` cannot be None or empty. Consider giving `model_id` "
"a meaningful name or leave it unspecified. If model_id is unspecified we "
"will use `default_model_id` as its default value."
)
self.attribute_to_layer_input = attribute_to_layer_input
self.save_path = save_path
# Creates CAV save directory if it doesn't exist. It is created once in the
# constructor before generating the CAVs.
# It is assumed that `model_id` can be used as a valid directory name
# otherwise `create_cav_dir_if_missing` will raise an error
CAV.create_cav_dir_if_missing(self.save_path, model_id)
def generate_all_activations(self) -> None:
r"""
Computes layer activations for all concepts and layers that are
defined in `self.layers` and `self.concepts` instance variables.
"""
for concept in self.concepts:
self.generate_activation(self.layers, concept)
def generate_activation(self, layers: Union[str, List], concept: Concept) -> None:
r"""
Computes layer activations for the specified `concept` and
the list of layer(s) `layers`.
Args:
layers (str or list[str]): A list of layer names or a layer name
that is used to compute layer activations for the
specific `concept`.
concept (Concept): A single Concept object that provides access
to concept examples using a data iterator.
"""
layers = [layers] if isinstance(layers, str) else layers
layer_modules = [_get_module_from_name(self.model, layer) for layer in layers]
layer_act = LayerActivation(self.model, layer_modules)
assert concept.data_iter is not None, (
"Data iterator for concept id:",
"{} must be specified".format(concept.id),
)
for i, examples in enumerate(concept.data_iter):
activations = layer_act.attribute.__wrapped__( # type: ignore
layer_act,
examples,
attribute_to_layer_input=self.attribute_to_layer_input,
)
for activation, layer_name in zip(activations, layers):
activation = torch.reshape(activation, (activation.shape[0], -1))
AV.save(
self.save_path,
self.model_id,
concept.identifier,
layer_name,
activation.detach(),
str(i),
)
def generate_activations(self, concept_layers: Dict[Concept, List[str]]) -> None:
r"""
Computes layer activations for the concepts and layers specified in
`concept_layers` dictionary.
Args:
concept_layers (dict[Concept, list[str]]): Dictionay that maps
Concept objects to a list of layer names to generate
the activations. Ex.: concept_layers =
{"striped": ['inception4c', 'inception4d']}
"""
for concept in concept_layers:
self.generate_activation(concept_layers[concept], concept)
def load_cavs(
self, concepts: List[Concept]
) -> Tuple[List[str], Dict[Concept, List[str]]]:
r"""
This function load CAVs as a dictionary of concept ids and
layers. CAVs are stored in a directory located under
`self.save_path` path, in .pkl files with the format:
<self.save_path>/<concept_ids>-<layer_name>.pkl. Ex.:
"/cavs/0-1-2-inception4c.pkl", where 0, 1 and 2 are concept ids.
It returns a list of layers and a dictionary of concept-layers mapping
for the concepts and layer that require CAV computation through training.
This can happen if the CAVs aren't already pre-computed for a given list
of concepts and layer.
Args:
concepts (list[Concept]): A list of Concept objects for which we want
to load the CAV.
Returns:
layers (list[layer]): A list of layers for which some CAVs still need
to be computed.
concept_layers (dict[concept, layer]): A dictionay of concept-layers
mapping for which we need to perform CAV computation through
training.
"""
concepts_key = concepts_to_str(concepts)
layers = []
concept_layers = defaultdict(list)
for layer in self.layers:
self.cavs[concepts_key][layer] = CAV.load(
self.save_path, self.model_id, concepts, layer
)
# If CAV aren't loaded
if (
concepts_key not in self.cavs
or layer not in self.cavs[concepts_key]
or not self.cavs[concepts_key][layer]
):
layers.append(layer)
# For all concepts in this experimental_set
for concept in concepts:
# Collect not activated layers for this concept
if not AV.exists(
self.save_path, self.model_id, layer, concept.identifier
):
concept_layers[concept].append(layer)
return layers, concept_layers
def compute_cavs(
self,
experimental_sets: List[List[Concept]],
force_train: bool = False,
processes: int = None,
):
r"""
This method computes CAVs for given `experiments_sets` and layers
specified in `self.layers` instance variable. Internally, it
trains a classifier and creates an instance of CAV class using the
weights of the trained classifier for each experimental set.
It also allows to compute the CAVs in parallel using python's
multiprocessing API and the number of processes specified in
the argument.
Args:
experimental_sets (list[list[Concept]]): A list of lists of concept
instances for which the cavs will be computed.
force_train (bool, optional): A flag that indicates whether to
train the CAVs regardless of whether they are saved or not.
Default: False
processes (int, optional): The number of processes to be created
when running in multi-processing mode. If processes > 0 then
CAV computation will be performed in parallel using
multi-processing, otherwise it will be performed sequentially
in a single process.
Default: None
Returns:
cavs (dict) : A mapping of concept ids and layers to CAV objects.
If CAVs for the concept_ids-layer pairs are present in the
data storage they will be loaded into the memory, otherwise
they will be computed using a training process and stored
in the data storage that can be configured using `save_path`
input argument.
"""
# Update self.concepts with concepts
for concepts in experimental_sets:
self.concepts.update(concepts)
concept_ids = []
for concept in self.concepts:
assert concept.id not in concept_ids, (
"There is more than one instance "
"of a concept with id {} defined in experimental sets. Please, "
"make sure to reuse the same instance of concept".format(
str(concept.id)
)
)
concept_ids.append(concept.id)
if force_train:
self.generate_all_activations()
# List of layers per concept key (experimental_set item) to be trained
concept_key_to_layers = defaultdict(list)
for concepts in experimental_sets:
concepts_key = concepts_to_str(concepts)
# If not 'force_train', try to load a saved CAV
if not force_train:
layers, concept_layers = self.load_cavs(concepts)
concept_key_to_layers[concepts_key] = layers
# Generate activations for missing (concept, layers)
self.generate_activations(concept_layers)
else:
concept_key_to_layers[concepts_key] = self.layers
if processes is not None and processes > 1:
pool = multiprocessing.Pool(processes)
cavs_list = pool.starmap(
train_cav,
[
(
self.model_id,
concepts,
concept_key_to_layers[concepts_to_str(concepts)],
self.classifier,
self.save_path,
self.classifier_kwargs,
)
for concepts in experimental_sets
],
)
pool.close()
pool.join()
else:
cavs_list = []
for concepts in experimental_sets:
cavs_list.append(
train_cav(
self.model_id,
concepts,
concept_key_to_layers[concepts_to_str(concepts)],
cast(Classifier, self.classifier),
self.save_path,
self.classifier_kwargs,
)
)
# list[Dict[concept, Dict[layer, list]]] => Dict[concept, Dict[layer, list]]
for cavs in cavs_list:
for c_key in cavs:
self.cavs[c_key].update(cavs[c_key])
return self.cavs
@log_usage()
def interpret(
self,
inputs: TensorOrTupleOfTensorsGeneric,
experimental_sets: List[List[Concept]],
target: TargetType = None,
additional_forward_args: Any = None,
processes: int = None,
**kwargs: Any,
) -> Dict[str, Dict[str, Dict[str, Tensor]]]:
r"""
This method computes magnitude and sign-based TCAV scores for each
experimental sets in `experimental_sets` list.
TCAV scores are computed using a dot product between layer attribution
scores for specific predictions and CAV vectors.
Args:
inputs (Tensor or tuple[Tensor, ...]): Inputs for which predictions
are performed and attributions are computed.
If model takes a single tensor as
input, a single input tensor should be provided.
If model takes multiple tensors as
input, a tuple of the input tensors should be provided.
It is assumed that for all given input tensors,
dimension 0 corresponds to the number of examples
(aka batch size), and if multiple input tensors are
provided, the examples must be aligned appropriately.
experimental_sets (list[list[Concept]]): A list of list of Concept
instances.
target (int, tuple, Tensor, or list, optional): Output indices for
which attributions are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
additional_forward_args (Any, optional): Extra arguments that are passed to
model when computing the attributions for `inputs`
w.r.t. layer output.
Default: None
processes (int, optional): The number of processes to be created. if
processes is larger than one then CAV computations will be
performed in parallel using the number of processes equal to
`processes`. Otherwise, CAV computations will be performed
sequential.
Default:None
**kwargs (Any, optional): A list of arguments that are passed to layer
attribution algorithm's attribute method. This could be for
example `n_steps` in case of integrated gradients.
Default: None
Returns:
results (dict): A dictionary of sign and magnitude -based tcav scores
for each concept set per layer.
The order of TCAV scores in the resulting tensor for each
experimental set follows the order in which concepts
are passed in `experimental_sets` input argument.
results example::
>>> #
>>> # scores =
>>> # {'0-1':
>>> # {'inception4c':
>>> # {'sign_count': tensor([0.5800, 0.4200]),
>>> # 'magnitude': tensor([0.6613, 0.3387])},
>>> # 'inception4d':
>>> # {'sign_count': tensor([0.6200, 0.3800]),
>>> # 'magnitude': tensor([0.7707, 0.2293])}}),
>>> # '0-2':
>>> # {'inception4c':
>>> # {'sign_count': tensor([0.6200, 0.3800]),
>>> # 'magnitude': tensor([0.6806, 0.3194])},
>>> # 'inception4d':
>>> # {'sign_count': tensor([0.6400, 0.3600]),
>>> # 'magnitude': tensor([0.6563, 0.3437])}})})
>>> #
"""
assert "attribute_to_layer_input" not in kwargs, (
"Please, set `attribute_to_layer_input` flag as a constructor "
"argument to TCAV class. In that case it will be applied "
"consistently to both layer activation and layer attribution methods."
)
self.compute_cavs(experimental_sets, processes=processes)
scores: Dict[str, Dict[str, Dict[str, Tensor]]] = defaultdict(
lambda: defaultdict()
)
# Retrieves the lengths of the experimental sets so that we can sort
# them by the length and compute TCAV scores in batches.
exp_set_lens = np.array(
list(map(lambda exp_set: len(exp_set), experimental_sets)), dtype=object
)
exp_set_lens_arg_sort = np.argsort(exp_set_lens)
# compute offsets using sorted lengths using their indices
exp_set_lens_sort = exp_set_lens[exp_set_lens_arg_sort]
exp_set_offsets_bool = [False] + list(
exp_set_lens_sort[:-1] == exp_set_lens_sort[1:]
)
exp_set_offsets = []
for i, offset in enumerate(exp_set_offsets_bool):
if not offset:
exp_set_offsets.append(i)
exp_set_offsets.append(len(exp_set_lens))
# sort experimental sets using the length of the concepts in each set
experimental_sets_sorted = np.array(experimental_sets, dtype=object)[
exp_set_lens_arg_sort
]
for layer in self.layers:
layer_module = _get_module_from_name(self.model, layer)
self.layer_attr_method.layer = layer_module
attribs = self.layer_attr_method.attribute.__wrapped__( # type: ignore
self.layer_attr_method, # self
inputs,
target=target,
additional_forward_args=additional_forward_args,
attribute_to_layer_input=self.attribute_to_layer_input,
**kwargs,
)
attribs = _format_tensor_into_tuples(attribs)
# n_inputs x n_features
attribs = torch.cat(
[torch.reshape(attrib, (attrib.shape[0], -1)) for attrib in attribs],
dim=1,
)
# n_experiments x n_concepts x n_features
cavs = []
classes = []
for concepts in experimental_sets:
concepts_key = concepts_to_str(concepts)
cavs_stats = cast(Dict[str, Any], self.cavs[concepts_key][layer].stats)
cavs.append(cavs_stats["weights"].float().detach().tolist())
classes.append(cavs_stats["classes"])
# sort cavs and classes using the length of the concepts in each set
cavs_sorted = np.array(cavs, dtype=object)[exp_set_lens_arg_sort]
classes_sorted = np.array(classes, dtype=object)[exp_set_lens_arg_sort]
i = 0
while i < len(exp_set_offsets) - 1:
cav_subset = np.array(
cavs_sorted[exp_set_offsets[i] : exp_set_offsets[i + 1]],
dtype=object,
).tolist()
classes_subset = classes_sorted[
exp_set_offsets[i] : exp_set_offsets[i + 1]
].tolist()
# n_experiments x n_concepts x n_features
cav_subset = torch.tensor(cav_subset)
cav_subset = cav_subset.to(attribs.device)
assert len(cav_subset.shape) == 3, (
"cav should have 3 dimensions: n_experiments x "
"n_concepts x n_features."
)
experimental_subset_sorted = experimental_sets_sorted[
exp_set_offsets[i] : exp_set_offsets[i + 1]
]
self._tcav_sub_computation(
scores,
layer,
attribs,
cav_subset,
classes_subset,
experimental_subset_sorted,
)
i += 1
return scores
def _tcav_sub_computation(
self,
scores: Dict[str, Dict[str, Dict[str, Tensor]]],
layer: str,
attribs: Tensor,
cavs: Tensor,
classes: List[List[int]],
experimental_sets: List[List[Concept]],
) -> None:
# n_inputs x n_concepts
tcav_score = torch.matmul(attribs.float(), torch.transpose(cavs, 1, 2))
assert len(tcav_score.shape) == 3, (
"tcav_score should have 3 dimensions: n_experiments x "
"n_inputs x n_concepts."
)
assert attribs.shape[0] == tcav_score.shape[1], (
"attrib and tcav_score should have the same 1st and "
"2nd dimensions respectively (n_inputs)."
)
# n_experiments x n_concepts
sign_count_score = torch.mean((tcav_score > 0.0).float(), dim=1)
magnitude_score = torch.mean(tcav_score, dim=1)
for i, (cls_set, concepts) in enumerate(zip(classes, experimental_sets)):
concepts_key = concepts_to_str(concepts)
# sort classes / concepts in the order specified in concept_keys
concept_ord = [concept.id for concept in concepts]
class_ord = {cls_: idx for idx, cls_ in enumerate(cls_set)}
new_ord = torch.tensor(
[class_ord[cncpt] for cncpt in concept_ord], device=tcav_score.device
)
# sort based on classes
scores[concepts_key][layer] = {
"sign_count": torch.index_select(
sign_count_score[i, :], dim=0, index=new_ord
),
"magnitude": torch.index_select(
magnitude_score[i, :], dim=0, index=new_ord
),
}
|
#!/usr/bin/env python3
try:
from captum.log.fb.internal_log import (
disable_detailed_logging,
log,
log_usage,
patch_methods,
set_environment,
TimedLog,
)
__all__ = [
"log",
"log_usage",
"TimedLog",
"set_environment",
"disable_detailed_logging",
]
except ImportError:
from functools import wraps
def log(*args, **kwargs):
pass
# bug with mypy: https://github.com/python/mypy/issues/1153
class TimedLog: # type: ignore
def __init__(self, *args, **kwargs) -> None:
pass
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
return exception_value is not None
def log_usage(*log_args, **log_kwargs):
def _log_usage(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return _log_usage
def set_environment(env):
pass
def disable_detailed_logging():
pass
def patch_methods(tester, patch_log=True):
pass
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import nox
import os
DEFAULT_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"]
PYTHON_VERSIONS = os.environ.get(
"NOX_PYTHON_VERSIONS", ",".join(DEFAULT_PYTHON_VERSIONS)
).split(",")
VERBOSE = os.environ.get("VERBOSE", "0")
SILENT = VERBOSE == "0"
# Linted dirs/files:
lint_targets = "."
# Test dirs (corresponds to each project having its own tests folder):
# Note the './', this installs local packages
test_targets = [
"./" + p.rstrip("\n") for p in open("hydra-configs-projects.txt", "r").readlines()
]
def setup_dev_env(session):
session.run(
"python",
"-m",
"pip",
"install",
"--upgrade",
"setuptools",
"pip",
silent=SILENT,
)
session.run("pip", "install", "-r", "requirements/dev.txt", silent=SILENT)
@nox.session(python=PYTHON_VERSIONS, reuse_venv=True)
def lint(session):
setup_dev_env(session)
session.run("black", *lint_targets, "--check")
session.run("flake8", "--config", ".flake8", *lint_targets)
@nox.session(python=PYTHON_VERSIONS, reuse_venv=True)
def tests(session):
setup_dev_env(session)
for target in test_targets:
session.run(
"pip", "install", "-r", target + "/requirements/dev.txt", silent=SILENT
)
session.install(*test_targets) # install config packages
session.run("pytest", *test_targets)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import pytest
from pathlib import Path
from hydra.utils import get_class, instantiate
from omegaconf import OmegaConf
from typing import Any
import torch
import torchvision.datasets as datasets
@pytest.mark.parametrize(
"modulepath, classname, cfg, passthrough_args, passthrough_kwargs, expected_class",
[
pytest.param(
"datasets.vision",
"VisionDataset",
{"root": None},
[],
{},
datasets.VisionDataset,
id="VisionDatasetConf",
),
pytest.param(
"datasets.mnist",
"MNIST",
{"root": None},
[],
{},
datasets.MNIST,
id="MNISTConf",
),
pytest.param(
"datasets.mnist",
"FashionMNIST",
{"root": None},
[],
{},
datasets.FashionMNIST,
id="FashionMNISTConf",
),
pytest.param(
"datasets.mnist",
"KMNIST",
{"root": None},
[],
{},
datasets.KMNIST,
id="KMNISTConf",
),
# TODO: These tests will need to be changed after blockers:
# 1. EMNISTConf and QMNISTConf are manually created
# 2. hydra.utils.instantiate is updated to allow *kwargs instantiation
# pytest.param(
# "datasets.mnist",
# "EMNIST",
# {"root":None,
# "split":"byclass",
# "kwargs":None},
# [],
# {},
# datasets.EMNIST,
# id="EMNISTConf",
# ),
# pytest.param(
# "datasets.mnist",
# "QMNIST",
# {"root":None,
# "what":'test',
# "compat":None,
# "kwargs":None},
# [],
# {},
# datasets.QMNIST,
# id="QMNISTConf",
# ),
],
)
def test_instantiate_classes(
tmpdir: Path,
modulepath: str,
classname: str,
cfg: Any,
passthrough_args: Any,
passthrough_kwargs: Any,
expected_class: Any,
) -> None:
# Create fake dataset and put it in tmpdir for test:
tmp_data_root = tmpdir.mkdir("data")
processed_dir = os.path.join(tmp_data_root, classname, "processed")
os.makedirs(processed_dir)
torch.save(torch.tensor([[1.0], [1.0]]), processed_dir + "/training.pt")
torch.save(torch.tensor([1.0]), processed_dir + "/test.pt")
# cfg is populated here since it requires tmpdir testfixture
cfg["root"] = str(tmp_data_root)
full_class = f"hydra_configs.torchvision.{modulepath}.{classname}Conf"
schema = OmegaConf.structured(get_class(full_class))
cfg = OmegaConf.merge(schema, cfg)
obj = instantiate(cfg, *passthrough_args, **passthrough_kwargs)
expected_obj = expected_class(root=tmp_data_root)
assert isinstance(obj, type(expected_obj))
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pytest
from hydra.utils import get_class, instantiate
from omegaconf import OmegaConf
import torch
# import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torchvision.transforms.transforms import ToTensor
from typing import Any
def identity(x):
return x
@pytest.mark.parametrize(
"modulepath, classname, cfg, passthrough_args, passthrough_kwargs, expected",
[
# pytest.param(
# "datasets.vision",
# "StandardTransform",
# {},
# [],
# {},
# datasets.vision.StandardTransform(),
# id="StandardTransformConf",
# ),
pytest.param(
"transforms.transforms",
"CenterCrop",
{"size": (10, 10)},
[],
{},
transforms.transforms.CenterCrop(size=(10, 10)),
id="CenterCropConf",
),
pytest.param(
"transforms.transforms",
"ColorJitter",
{},
[],
{},
transforms.transforms.ColorJitter(),
id="ColorJitterConf",
),
pytest.param(
"transforms.transforms",
"Compose",
{"transforms": []},
[],
{},
transforms.transforms.Compose(transforms=[]),
id="ComposeConf",
),
pytest.param(
"transforms.transforms",
"ConvertImageDtype",
{},
[],
{"dtype": torch.int32},
transforms.transforms.ConvertImageDtype(dtype=torch.int32),
id="ConvertImageDtypeConf",
),
pytest.param(
"transforms.transforms",
"FiveCrop",
{"size": (10, 10)},
[],
{},
transforms.transforms.FiveCrop(size=(10, 10)),
id="FiveCropConf",
),
pytest.param(
"transforms.transforms",
"Grayscale",
{},
[],
{},
transforms.transforms.Grayscale(),
id="GrayscaleConf",
),
pytest.param(
"transforms.transforms",
"Lambda",
{},
[],
{"lambd": identity},
transforms.transforms.Lambda(lambd=identity),
id="LambdaConf",
),
pytest.param(
"transforms.transforms",
"LinearTransformation",
{},
[],
{
"transformation_matrix": torch.eye(2),
"mean_vector": torch.Tensor([1, 1]),
},
transforms.transforms.LinearTransformation(
transformation_matrix=torch.eye(2), mean_vector=torch.Tensor([1, 1])
),
id="LinearTransformationConf",
),
pytest.param(
"transforms.transforms",
"Normalize",
{"mean": 0, "std": 1},
[],
{},
transforms.transforms.Normalize(mean=0, std=1),
id="NormalizeConf",
),
pytest.param(
"transforms.transforms",
"Pad",
{"padding": 0},
[],
{},
transforms.transforms.Pad(padding=0),
id="PaddingConf",
),
pytest.param(
"transforms.transforms",
"PILToTensor",
{},
[],
{},
transforms.transforms.PILToTensor(),
id="PILToTensorConf",
),
pytest.param(
"transforms.transforms",
"RandomAffine",
{"degrees": 0},
[],
{},
transforms.transforms.RandomAffine(degrees=0),
id="RandomAffineConf",
),
pytest.param(
"transforms.transforms",
"RandomApply",
{},
[],
{"transforms": [ToTensor()]},
transforms.transforms.RandomApply([ToTensor()]),
id="RandomApplyConf",
),
pytest.param(
"transforms.transforms",
"RandomChoice",
{},
[],
{"transforms": [[ToTensor()]]},
transforms.transforms.RandomChoice([ToTensor()]),
id="RandomChoiceConf",
),
pytest.param(
"transforms.transforms",
"RandomCrop",
{"size": (10, 10)},
[],
{},
transforms.transforms.RandomCrop(size=(10, 10)),
id="RandomCropConf",
),
pytest.param(
"transforms.transforms",
"RandomErasing",
{},
[],
{},
transforms.transforms.RandomErasing(),
id="RandomErasingConf",
),
pytest.param(
"transforms.transforms",
"RandomGrayscale",
{},
[],
{},
transforms.transforms.RandomGrayscale(),
id="RandomGrayscaleConf",
),
pytest.param(
"transforms.transforms",
"RandomHorizontalFlip",
{},
[],
{},
transforms.transforms.RandomHorizontalFlip(),
id="RandomHorizontalFlipConf",
),
pytest.param(
"transforms.transforms",
"RandomOrder",
{},
[],
{"transforms": [ToTensor()]},
transforms.transforms.RandomOrder([ToTensor()]),
id="RandomOrderConf",
),
pytest.param(
"transforms.transforms",
"RandomPerspective",
{},
[],
{},
transforms.transforms.RandomPerspective(),
id="RandomPerspectiveConf",
),
pytest.param(
"transforms.transforms",
"RandomResizedCrop",
{"size": (10, 10)},
[],
{},
transforms.transforms.RandomResizedCrop(size=(10, 10)),
id="RandomResizedCropConf",
),
pytest.param(
"transforms.transforms",
"RandomRotation",
{"degrees": 0},
[],
{},
transforms.transforms.RandomRotation(degrees=0),
id="RandomRotationConf",
),
pytest.param(
"transforms.transforms",
"RandomTransforms",
{"transforms": []},
[],
{},
transforms.transforms.RandomTransforms([]),
id="RandomTransformsConf",
),
pytest.param(
"transforms.transforms",
"RandomVerticalFlip",
{},
[],
{},
transforms.transforms.RandomVerticalFlip(),
id="RandomVerticalFlipConf",
),
pytest.param(
"transforms.transforms",
"Resize",
{"size": (10, 10)},
[],
{},
transforms.transforms.Resize(size=(10, 10)),
id="ResizeConf",
),
pytest.param(
"transforms.transforms",
"TenCrop",
{"size": (10, 10)},
[],
{},
transforms.transforms.TenCrop(size=(10, 10)),
id="TenCropConf",
),
pytest.param(
"transforms.transforms",
"ToPILImage",
{},
[],
{},
transforms.transforms.ToPILImage(),
id="ToPILImageConf",
),
pytest.param(
"transforms.transforms",
"ToTensor",
{},
[],
{},
transforms.transforms.ToTensor(),
id="ToTensorConf",
),
],
)
def test_instantiate_classes(
modulepath: str,
classname: str,
cfg: Any,
passthrough_args: Any,
passthrough_kwargs: Any,
expected: Any,
) -> None:
full_class = f"hydra_configs.torchvision.{modulepath}.{classname}Conf"
schema = OmegaConf.structured(get_class(full_class))
cfg = OmegaConf.merge(schema, cfg)
obj = instantiate(cfg, *passthrough_args, **passthrough_kwargs)
assert isinstance(obj, type(expected))
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from packaging import version
from pkg_resources import get_distribution
import warnings
import torchvision
CONFIGS_VERSION = get_distribution('hydra-configs-torchvision').version
# checks if major.minor versions are matched. patch version is always different
if version.parse(torchvision.__version__).release[:2] != version.parse(CONFIGS_VERSION).release[:2]:
warnings.warn(f'Your config and library versions are mismatched. \n HYDRA-CONFIGS-TORCHVISION VERSION: {CONFIGS_VERSION}, \n TORCHVISION VERSION: {torchvision.__version__}. \n Please install the matching configs for reliable functionality.')
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import Any
@dataclass
class VisionDatasetConf:
_target_: str = "torchvision.datasets.vision.VisionDataset"
root: Any = MISSING
transforms: Any = None
transform: Any = None
target_transform: Any = None
@dataclass
class StandardTransformConf:
_target_: str = "torchvision.datasets.vision.StandardTransform"
transform: Any = None
target_transform: Any = None
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import Any
@dataclass
class MNISTConf:
_target_: str = "torchvision.datasets.mnist.MNIST"
root: Any = MISSING
train: Any = True
transform: Any = None
target_transform: Any = None
download: Any = False
@dataclass
class FashionMNISTConf:
_target_: str = "torchvision.datasets.mnist.FashionMNIST"
root: Any = MISSING
train: Any = True
transform: Any = None
target_transform: Any = None
download: Any = False
@dataclass
class KMNISTConf:
_target_: str = "torchvision.datasets.mnist.KMNIST"
root: Any = MISSING
train: Any = True
transform: Any = None
target_transform: Any = None
download: Any = False
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import Any
@dataclass
class CenterCropConf:
_target_: str = "torchvision.transforms.transforms.CenterCrop"
_convert_: str = "ALL"
size: Any = MISSING
@dataclass
class ColorJitterConf:
_target_: str = "torchvision.transforms.transforms.ColorJitter"
_convert_: str = "ALL"
brightness: Any = 0
contrast: Any = 0
saturation: Any = 0
hue: Any = 0
@dataclass
class ComposeConf:
_target_: str = "torchvision.transforms.transforms.Compose"
_convert_: str = "ALL"
transforms: Any = MISSING
@dataclass
class ConvertImageDtypeConf:
_target_: str = "torchvision.transforms.transforms.ConvertImageDtype"
_convert_: str = "ALL"
dtype: Any = MISSING # dtype
@dataclass
class FiveCropConf:
_target_: str = "torchvision.transforms.transforms.FiveCrop"
_convert_: str = "ALL"
size: Any = MISSING
@dataclass
class GrayscaleConf:
_target_: str = "torchvision.transforms.transforms.Grayscale"
_convert_: str = "ALL"
num_output_channels: Any = 1
@dataclass
class LambdaConf:
_target_: str = "torchvision.transforms.transforms.Lambda"
_convert_: str = "ALL"
lambd: Any = MISSING
@dataclass
class LinearTransformationConf:
_target_: str = "torchvision.transforms.transforms.LinearTransformation"
_convert_: str = "ALL"
transformation_matrix: Any = MISSING
mean_vector: Any = MISSING
@dataclass
class NormalizeConf:
_target_: str = "torchvision.transforms.transforms.Normalize"
_convert_: str = "ALL"
mean: Any = MISSING
std: Any = MISSING
inplace: Any = False
@dataclass
class PadConf:
_target_: str = "torchvision.transforms.transforms.Pad"
_convert_: str = "ALL"
padding: Any = MISSING
fill: Any = 0
padding_mode: Any = "constant"
@dataclass
class PILToTensorConf:
_target_: str = "torchvision.transforms.transforms.PILToTensor"
_convert_: str = "ALL"
@dataclass
class RandomAffineConf:
_target_: str = "torchvision.transforms.transforms.RandomAffine"
_convert_: str = "ALL"
degrees: Any = MISSING
translate: Any = None
scale: Any = None
shear: Any = None
resample: Any = 0
fillcolor: Any = 0
@dataclass
class RandomApplyConf:
_target_: str = "torchvision.transforms.transforms.RandomApply"
_convert_: str = "ALL"
transforms: Any = MISSING
p: Any = 0.5
@dataclass
class RandomChoiceConf:
_target_: str = "torchvision.transforms.transforms.RandomChoice"
_convert_: str = "ALL"
transforms: Any = MISSING
@dataclass
class RandomCropConf:
_target_: str = "torchvision.transforms.transforms.RandomCrop"
_convert_: str = "ALL"
size: Any = MISSING
padding: Any = None
pad_if_needed: Any = False
fill: Any = 0
padding_mode: Any = "constant"
@dataclass
class RandomErasingConf:
_target_: str = "torchvision.transforms.transforms.RandomErasing"
_convert_: str = "ALL"
p: Any = 0.5
scale: Any = (0.02, 0.33)
ratio: Any = (0.3, 3.3)
value: Any = 0
inplace: Any = False
@dataclass
class RandomGrayscaleConf:
_target_: str = "torchvision.transforms.transforms.RandomGrayscale"
_convert_: str = "ALL"
p: Any = 0.1
@dataclass
class RandomHorizontalFlipConf:
_target_: str = "torchvision.transforms.transforms.RandomHorizontalFlip"
_convert_: str = "ALL"
p: Any = 0.5
@dataclass
class RandomOrderConf:
_target_: str = "torchvision.transforms.transforms.RandomOrder"
_convert_: str = "ALL"
transforms: Any = MISSING
@dataclass
class RandomPerspectiveConf:
_target_: str = "torchvision.transforms.transforms.RandomPerspective"
_convert_: str = "ALL"
distortion_scale: Any = 0.5
p: Any = 0.5
interpolation: Any = 2
fill: Any = 0
@dataclass
class RandomResizedCropConf:
_target_: str = "torchvision.transforms.transforms.RandomResizedCrop"
_convert_: str = "ALL"
size: Any = MISSING
scale: Any = (0.08, 1.0)
ratio: Any = (0.75, 1.3333333333333333)
interpolation: Any = 2
@dataclass
class RandomRotationConf:
_target_: str = "torchvision.transforms.transforms.RandomRotation"
_convert_: str = "ALL"
degrees: Any = MISSING
resample: Any = False
expand: Any = False
center: Any = None
fill: Any = None
@dataclass
class RandomTransformsConf:
_target_: str = "torchvision.transforms.transforms.RandomTransforms"
_convert_: str = "ALL"
transforms: Any = MISSING
@dataclass
class RandomVerticalFlipConf:
_target_: str = "torchvision.transforms.transforms.RandomVerticalFlip"
_convert_: str = "ALL"
p: Any = 0.5
@dataclass
class ResizeConf:
_target_: str = "torchvision.transforms.transforms.Resize"
_convert_: str = "ALL"
size: Any = MISSING
interpolation: Any = 2
@dataclass
class TenCropConf:
_target_: str = "torchvision.transforms.transforms.TenCrop"
_convert_: str = "ALL"
size: Any = MISSING
vertical_flip: Any = False
@dataclass
class ToPILImageConf:
_target_: str = "torchvision.transforms.transforms.ToPILImage"
_convert_: str = "ALL"
mode: Any = None
@dataclass
class ToTensorConf:
_target_: str = "torchvision.transforms.transforms.ToTensor"
_convert_: str = "ALL"
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# flake8: noqa
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.optim import Adadelta
from torch.optim.lr_scheduler import StepLR
###### HYDRA BLOCK ######
import hydra
from hydra.core.config_store import ConfigStore
from dataclasses import dataclass
# hydra-torch structured config imports
from hydra_configs.torch.optim import AdadeltaConf
from hydra_configs.torch.optim.lr_scheduler import StepLRConf
@dataclass
class MNISTConf:
batch_size: int = 64
test_batch_size: int = 1000
epochs: int = 14
no_cuda: bool = False
dry_run: bool = False
seed: int = 1
log_interval: int = 10
save_model: bool = False
checkpoint_name: str = "unnamed.pt"
adadelta: AdadeltaConf = AdadeltaConf()
steplr: StepLRConf = StepLRConf(
step_size=1
) # we pass a default for step_size since it is required, but missing a default in PyTorch (and consequently in hydra-torch)
cs = ConfigStore.instance()
cs.store(name="mnistconf", node=MNISTConf)
###### / HYDRA BLOCK ######
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(data),
len(train_loader.dataset),
100.0 * batch_idx / len(train_loader),
loss.item(),
)
)
if args.dry_run:
break
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(
output, target, reduction="sum"
).item() # sum up batch loss
pred = output.argmax(
dim=1, keepdim=True
) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print(
"\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format(
test_loss,
correct,
len(test_loader.dataset),
100.0 * correct / len(test_loader.dataset),
)
)
@hydra.main(config_name="mnistconf")
def main(cfg): # DIFF
print(cfg.pretty())
use_cuda = not cfg.no_cuda and torch.cuda.is_available() # DIFF
torch.manual_seed(cfg.seed) # DIFF
device = torch.device("cuda" if use_cuda else "cpu")
train_kwargs = {"batch_size": cfg.batch_size} # DIFF
test_kwargs = {"batch_size": cfg.test_batch_size} # DIFF
if use_cuda:
cuda_kwargs = {"num_workers": 1, "pin_memory": True, "shuffle": True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
dataset1 = datasets.MNIST("../data", train=True, download=True, transform=transform)
dataset2 = datasets.MNIST("../data", train=False, transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer = Adadelta(
lr=cfg.adadelta.lr,
rho=cfg.adadelta.rho,
eps=cfg.adadelta.eps,
weight_decay=cfg.adadelta.weight_decay,
params=model.parameters(),
) # DIFF
scheduler = StepLR(
step_size=cfg.steplr.step_size,
gamma=cfg.steplr.gamma,
last_epoch=cfg.steplr.last_epoch,
optimizer=optimizer,
) # DIFF
for epoch in range(1, cfg.epochs + 1): # DIFF
train(cfg, model, device, train_loader, optimizer, epoch) # DIFF
test(model, device, test_loader)
scheduler.step()
if cfg.save_model: # DIFF
torch.save(model.state_dict(), cfg.checkpoint_name) # DIFF
if __name__ == "__main__":
main()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pytest
from hydra.utils import get_class, instantiate
from omegaconf import OmegaConf
import torch.optim as optim
import torch
from torch import Tensor
from torch import nn
from typing import Any
model = nn.Linear(1, 1)
@pytest.mark.parametrize(
"modulepath, classname, cfg, passthrough_kwargs, expected",
[
pytest.param(
"optim.adadelta",
"Adadelta",
{"lr": 0.1},
{"params": model.parameters()},
optim.Adadelta(lr=0.1, params=model.parameters()),
id="AdadeltaConf",
),
pytest.param(
"optim.adagrad",
"Adagrad",
{"lr": 0.1},
{"params": model.parameters()},
optim.Adagrad(lr=0.1, params=model.parameters()),
id="AdagradConf",
),
pytest.param(
"optim.adam",
"Adam",
{"lr": 0.1},
{"params": model.parameters()},
optim.Adam(lr=0.1, params=model.parameters()),
id="AdamConf",
),
pytest.param(
"optim.adamax",
"Adamax",
{"lr": 0.1},
{"params": model.parameters()},
optim.Adamax(lr=0.1, params=model.parameters()),
id="AdamaxConf",
),
pytest.param(
"optim.adamw",
"AdamW",
{"lr": 0.1},
{"params": model.parameters()},
optim.AdamW(lr=0.1, params=model.parameters()),
id="AdamWConf",
),
pytest.param(
"optim.asgd",
"ASGD",
{"lr": 0.1},
{"params": model.parameters()},
optim.ASGD(lr=0.1, params=model.parameters()),
id="ASGDConf",
),
pytest.param(
"optim.lbfgs",
"LBFGS",
{"lr": 0.1},
{"params": model.parameters()},
optim.LBFGS(lr=0.1, params=model.parameters()),
id="LBFGSConf",
),
pytest.param(
"optim.rmsprop",
"RMSprop",
{"lr": 0.1},
{"params": model.parameters()},
optim.RMSprop(lr=0.1, params=model.parameters()),
id="RMSpropConf",
),
pytest.param(
"optim.rprop",
"Rprop",
{"lr": 0.1},
{"params": model.parameters()},
optim.Rprop(lr=0.1, params=model.parameters()),
id="RpropConf",
),
pytest.param(
"optim.sgd",
"SGD",
{"lr": 0.1},
{"params": model.parameters()},
optim.SGD(lr=0.1, params=model.parameters()),
id="SGDConf",
),
pytest.param(
"optim.sparse_adam",
"SparseAdam",
{"lr": 0.1},
{"params": list(model.parameters())},
optim.SparseAdam(lr=0.1, params=list(model.parameters())),
id="SparseAdamConf",
),
],
)
def test_instantiate_classes(
modulepath: str, classname: str, cfg: Any, passthrough_kwargs: Any, expected: Any
) -> None:
full_class = f"hydra_configs.torch.{modulepath}.{classname}Conf"
schema = OmegaConf.structured(get_class(full_class))
cfg = OmegaConf.merge(schema, cfg)
obj = instantiate(cfg, **passthrough_kwargs)
def closure():
return model(Tensor([10]))
assert torch.all(torch.eq(obj.step(closure), expected.step(closure)))
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pytest
from hydra.utils import get_class, instantiate
from omegaconf import OmegaConf
import torch.nn.modules.loss as loss
from torch.tensor import Tensor
from typing import Any
@pytest.mark.parametrize(
"modulepath, classname, cfg, passthrough_args, passthrough_kwargs, expected",
[
pytest.param(
"nn.modules.loss",
"BCELoss",
{},
[],
{"weight": Tensor([1])},
loss.BCELoss(),
id="BCELossConf",
),
pytest.param(
"nn.modules.loss",
"BCEWithLogitsLoss",
{},
[],
{"weight": Tensor([1]), "pos_weight": Tensor([1])},
loss.BCEWithLogitsLoss(),
id="BCEWithLogitsLossConf",
),
pytest.param(
"nn.modules.loss",
"CosineEmbeddingLoss",
{},
[],
{},
loss.CosineEmbeddingLoss(),
id="CosineEmbeddingLossConf",
),
pytest.param(
"nn.modules.loss",
"CTCLoss",
{},
[],
{},
loss.CTCLoss(),
id="CTCLossConf",
),
pytest.param(
"nn.modules.loss",
"L1Loss",
{},
[],
{},
loss.L1Loss(),
id="L1LossConf",
),
pytest.param(
"nn.modules.loss",
"HingeEmbeddingLoss",
{},
[],
{},
loss.HingeEmbeddingLoss(),
id="HingeEmbeddingLossConf",
),
pytest.param(
"nn.modules.loss",
"KLDivLoss",
{},
[],
{},
loss.KLDivLoss(),
id="KLDivLossConf",
),
pytest.param(
"nn.modules.loss",
"MarginRankingLoss",
{},
[],
{},
loss.MarginRankingLoss(),
id="MarginRankingLossConf",
),
pytest.param(
"nn.modules.loss",
"MSELoss",
{},
[],
{},
loss.MSELoss(),
id="MSELossConf",
),
pytest.param(
"nn.modules.loss",
"MultiLabelMarginLoss",
{},
[],
{},
loss.MultiLabelMarginLoss(),
id="MultiLabelMarginLossConf",
),
pytest.param(
"nn.modules.loss",
"MultiLabelSoftMarginLoss",
{},
[],
{"weight": Tensor([1])},
loss.MultiLabelSoftMarginLoss(),
id="MultiLabelSoftMarginLossConf",
),
pytest.param(
"nn.modules.loss",
"MultiMarginLoss",
{},
[],
{"weight": Tensor([1])},
loss.MultiMarginLoss(),
id="MultiMarginLossConf",
),
pytest.param(
"nn.modules.loss",
"NLLLoss",
{},
[],
{"weight": Tensor([1])},
loss.NLLLoss(),
id="NLLLossConf",
),
pytest.param(
"nn.modules.loss",
"NLLLoss2d",
{},
[],
{"weight": Tensor([1])},
loss.NLLLoss2d(),
id="NLLLoss2dConf",
),
pytest.param(
"nn.modules.loss",
"PoissonNLLLoss",
{},
[],
{},
loss.PoissonNLLLoss(),
id="PoissonNLLLossConf",
),
pytest.param(
"nn.modules.loss",
"SmoothL1Loss",
{},
[],
{},
loss.SmoothL1Loss(),
id="SmoothL1LossConf",
),
pytest.param(
"nn.modules.loss",
"SoftMarginLoss",
{},
[],
{},
loss.SoftMarginLoss(),
id="SoftMarginLossConf",
),
pytest.param(
"nn.modules.loss",
"TripletMarginLoss",
{},
[],
{},
loss.TripletMarginLoss(),
id="TripletMarginLossConf",
),
],
)
def test_instantiate_classes(
modulepath: str,
classname: str,
cfg: Any,
passthrough_args: Any,
passthrough_kwargs: Any,
expected: Any,
) -> None:
full_class = f"hydra_configs.torch.{modulepath}.{classname}Conf"
schema = OmegaConf.structured(get_class(full_class))
cfg = OmegaConf.merge(schema, cfg)
obj = instantiate(cfg, *passthrough_args, **passthrough_kwargs)
assert isinstance(obj, type(expected))
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pytest
from hydra.utils import get_class, instantiate
from omegaconf import OmegaConf
import torch.utils.data as data
import torch
from typing import Any
dummy_tensor = torch.tensor((1, 1))
dummy_dataset = data.dataset.TensorDataset(dummy_tensor)
dummy_sampler = data.Sampler(data_source=dummy_dataset)
@pytest.mark.parametrize(
"modulepath, classname, cfg, passthrough_args, passthrough_kwargs, expected",
[
pytest.param(
"utils.data.dataloader",
"DataLoader",
{"batch_size": 4},
[],
{"dataset": dummy_dataset},
data.DataLoader(batch_size=4, dataset=dummy_dataset),
id="DataLoaderConf",
),
pytest.param(
"utils.data.dataset",
"Dataset",
{},
[],
{},
data.Dataset(),
id="DatasetConf",
),
pytest.param(
"utils.data.dataset",
"ChainDataset",
{},
[],
{"datasets": [dummy_dataset, dummy_dataset]},
data.ChainDataset(datasets=[dummy_dataset, dummy_dataset]),
id="ChainDatasetConf",
),
pytest.param(
"utils.data.dataset",
"ConcatDataset",
{},
[],
{"datasets": [dummy_dataset, dummy_dataset]},
data.ConcatDataset(datasets=[dummy_dataset, dummy_dataset]),
id="ConcatDatasetConf",
),
pytest.param(
"utils.data.dataset",
"IterableDataset",
{},
[],
{},
data.IterableDataset(),
id="IterableDatasetConf",
),
# TODO: investigate asterisk in signature instantiation limitation
# pytest.param(
# "utils.data.dataset",
# "TensorDataset",
# {},
# [],
# {"tensors":[dummy_tensor]},
# data.TensorDataset(dummy_tensor),
# id="TensorDatasetConf",
# ),
pytest.param(
"utils.data.dataset",
"Subset",
{},
[],
{"dataset": dummy_dataset, "indices": [0]},
data.Subset(dummy_dataset, 0),
id="SubsetConf",
),
pytest.param(
"utils.data.sampler",
"Sampler",
{},
[],
{"data_source": dummy_dataset},
data.Sampler(data_source=dummy_dataset),
id="SamplerConf",
),
pytest.param(
"utils.data.sampler",
"BatchSampler",
{"batch_size": 4, "drop_last": False},
[],
{"sampler": dummy_sampler},
data.BatchSampler(sampler=dummy_sampler, batch_size=4, drop_last=False),
id="BatchSamplerConf",
),
pytest.param(
"utils.data.sampler",
"RandomSampler",
{},
[],
{"data_source": dummy_dataset},
data.RandomSampler(data_source=dummy_dataset),
id="RandomSamplerConf",
),
pytest.param(
"utils.data.sampler",
"SequentialSampler",
{},
[],
{"data_source": dummy_dataset},
data.SequentialSampler(data_source=dummy_dataset),
id="SequentialSamplerConf",
),
pytest.param(
"utils.data.sampler",
"SubsetRandomSampler",
{"indices": [1]},
[],
{},
data.SubsetRandomSampler(indices=[1]),
id="SubsetRandomSamplerConf",
),
pytest.param(
"utils.data.sampler",
"WeightedRandomSampler",
{"weights": [1], "num_samples": 1},
[],
{},
data.WeightedRandomSampler(weights=[1], num_samples=1),
id="WeightedRandomSamplerConf",
),
# TODO: investigate testing distributed instantiation
# pytest.param(
# "utils.data.distributed",
# "DistributedSampler",
# {},
# [],
# {"dataset": dummy_dataset},
# data.DistributedSampler(group=dummy_group,dataset=dummy_dataset),
# id="DistributedSamplerConf",
# ),
],
)
def test_instantiate_classes(
modulepath: str,
classname: str,
cfg: Any,
passthrough_args: Any,
passthrough_kwargs: Any,
expected: Any,
) -> None:
full_class = f"hydra_configs.torch.{modulepath}.{classname}Conf"
schema = OmegaConf.structured(get_class(full_class))
cfg = OmegaConf.merge(schema, cfg)
obj = instantiate(cfg, *passthrough_args, **passthrough_kwargs)
assert isinstance(obj, type(expected))
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import Any
@dataclass
class BCELossConf:
_target_: str = "torch.nn.modules.loss.BCELoss"
weight: Any = MISSING # Optional[Tensor]
size_average: Any = None
reduce: Any = None
reduction: str = "mean"
@dataclass
class BCEWithLogitsLossConf:
_target_: str = "torch.nn.modules.loss.BCEWithLogitsLoss"
weight: Any = MISSING # Optional[Tensor]
size_average: Any = None
reduce: Any = None
reduction: str = "mean"
pos_weight: Any = MISSING # Optional[Tensor]
@dataclass
class CosineEmbeddingLossConf:
_target_: str = "torch.nn.modules.loss.CosineEmbeddingLoss"
margin: float = 0.0
size_average: Any = None
reduce: Any = None
reduction: str = "mean"
@dataclass
class CTCLossConf:
_target_: str = "torch.nn.modules.loss.CTCLoss"
blank: int = 0
reduction: str = "mean"
zero_infinity: bool = False
@dataclass
class L1LossConf:
_target_: str = "torch.nn.modules.loss.L1Loss"
size_average: Any = None
reduce: Any = None
reduction: str = "mean"
@dataclass
class HingeEmbeddingLossConf:
_target_: str = "torch.nn.modules.loss.HingeEmbeddingLoss"
margin: float = 1.0
size_average: Any = None
reduce: Any = None
reduction: str = "mean"
@dataclass
class KLDivLossConf:
_target_: str = "torch.nn.modules.loss.KLDivLoss"
size_average: Any = None
reduce: Any = None
reduction: str = "mean"
log_target: bool = False
@dataclass
class MarginRankingLossConf:
_target_: str = "torch.nn.modules.loss.MarginRankingLoss"
margin: float = 0.0
size_average: Any = None
reduce: Any = None
reduction: str = "mean"
@dataclass
class MSELossConf:
_target_: str = "torch.nn.modules.loss.MSELoss"
size_average: Any = None
reduce: Any = None
reduction: str = "mean"
@dataclass
class MultiLabelMarginLossConf:
_target_: str = "torch.nn.modules.loss.MultiLabelMarginLoss"
size_average: Any = None
reduce: Any = None
reduction: str = "mean"
@dataclass
class MultiLabelSoftMarginLossConf:
_target_: str = "torch.nn.modules.loss.MultiLabelSoftMarginLoss"
weight: Any = MISSING # Optional[Tensor]
size_average: Any = None
reduce: Any = None
reduction: str = "mean"
@dataclass
class MultiMarginLossConf:
_target_: str = "torch.nn.modules.loss.MultiMarginLoss"
p: int = 1
margin: float = 1.0
weight: Any = MISSING # Optional[Tensor]
size_average: Any = None
reduce: Any = None
reduction: str = "mean"
@dataclass
class NLLLossConf:
_target_: str = "torch.nn.modules.loss.NLLLoss"
weight: Any = MISSING # Optional[Tensor]
size_average: Any = None
ignore_index: int = -100
reduce: Any = None
reduction: str = "mean"
@dataclass
class NLLLoss2dConf:
_target_: str = "torch.nn.modules.loss.NLLLoss2d"
weight: Any = MISSING # Optional[Tensor]
size_average: Any = None
ignore_index: int = -100
reduce: Any = None
reduction: str = "mean"
@dataclass
class PoissonNLLLossConf:
_target_: str = "torch.nn.modules.loss.PoissonNLLLoss"
log_input: bool = True
full: bool = False
size_average: Any = None
eps: float = 1e-08
reduce: Any = None
reduction: str = "mean"
@dataclass
class SmoothL1LossConf:
_target_: str = "torch.nn.modules.loss.SmoothL1Loss"
size_average: Any = None
reduce: Any = None
reduction: str = "mean"
@dataclass
class SoftMarginLossConf:
_target_: str = "torch.nn.modules.loss.SoftMarginLoss"
size_average: Any = None
reduce: Any = None
reduction: str = "mean"
@dataclass
class TripletMarginLossConf:
_target_: str = "torch.nn.modules.loss.TripletMarginLoss"
margin: float = 1.0
p: float = 2.0
eps: float = 1e-06
swap: bool = False
size_average: Any = None
reduce: Any = None
reduction: str = "mean"
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import Any
@dataclass
class LambdaLRConf:
_target_: str = "torch.optim.lr_scheduler.LambdaLR"
optimizer: Any = MISSING
lr_lambda: Any = MISSING
last_epoch: Any = -1
@dataclass
class MultiplicativeLRConf:
_target_: str = "torch.optim.lr_scheduler.MultiplicativeLR"
optimizer: Any = MISSING
lr_lambda: Any = MISSING
last_epoch: Any = -1
@dataclass
class StepLRConf:
_target_: str = "torch.optim.lr_scheduler.StepLR"
optimizer: Any = MISSING
step_size: Any = MISSING
gamma: Any = 0.1
last_epoch: Any = -1
@dataclass
class MultiStepLRConf:
_target_: str = "torch.optim.lr_scheduler.MultiStepLR"
optimizer: Any = MISSING
milestones: Any = MISSING
gamma: Any = 0.1
last_epoch: Any = -1
@dataclass
class ExponentialLRConf:
_target_: str = "torch.optim.lr_scheduler.ExponentialLR"
optimizer: Any = MISSING
gamma: Any = MISSING
last_epoch: Any = -1
@dataclass
class CosineAnnealingLRConf:
_target_: str = "torch.optim.lr_scheduler.CosineAnnealingLR"
optimizer: Any = MISSING
T_max: Any = MISSING
eta_min: Any = 0
last_epoch: Any = -1
@dataclass
class ReduceLROnPlateauConf:
_target_: str = "torch.optim.lr_scheduler.ReduceLROnPlateau"
optimizer: Any = MISSING
mode: Any = "min"
factor: Any = 0.1
patience: Any = 10
verbose: Any = False
threshold: Any = 0.0001
threshold_mode: Any = "rel"
cooldown: Any = 0
min_lr: Any = 0
eps: Any = 1e-08
@dataclass
class CyclicLRConf:
_target_: str = "torch.optim.lr_scheduler.CyclicLR"
optimizer: Any = MISSING
base_lr: Any = MISSING
max_lr: Any = MISSING
step_size_up: Any = 2000
step_size_down: Any = None
mode: Any = "triangular"
gamma: Any = 1.0
scale_fn: Any = None
scale_mode: Any = "cycle"
cycle_momentum: Any = True
base_momentum: Any = 0.8
max_momentum: Any = 0.9
last_epoch: Any = -1
@dataclass
class CosineAnnealingWarmRestartsConf:
_target_: str = "torch.optim.lr_scheduler.CosineAnnealingWarmRestarts"
optimizer: Any = MISSING
T_0: Any = MISSING
T_mult: Any = 1
eta_min: Any = 0
last_epoch: Any = -1
@dataclass
class OneCycleLRConf:
_target_: str = "torch.optim.lr_scheduler.OneCycleLR"
optimizer: Any = MISSING
max_lr: Any = MISSING
total_steps: Any = None
epochs: Any = None
steps_per_epoch: Any = None
pct_start: Any = 0.3
anneal_strategy: Any = "cos"
cycle_momentum: Any = True
base_momentum: Any = 0.85
max_momentum: Any = 0.95
div_factor: Any = 25.0
final_div_factor: Any = 10000.0
last_epoch: Any = -1
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import Any
@dataclass
class RMSpropConf:
_target_: str = "torch.optim.rmsprop.RMSprop"
params: Any = MISSING
lr: Any = 0.01
alpha: Any = 0.99
eps: Any = 1e-08
weight_decay: Any = 0
momentum: Any = 0
centered: Any = False
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import Any
@dataclass
class SparseAdamConf:
_target_: str = "torch.optim.sparse_adam.SparseAdam"
params: Any = MISSING
lr: Any = 0.001
betas: Any = (0.9, 0.999)
eps: Any = 1e-08
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import Any
@dataclass
class RpropConf:
_target_: str = "torch.optim.rprop.Rprop"
params: Any = MISSING
lr: Any = 0.01
etas: Any = (0.5, 1.2)
step_sizes: Any = (1e-06, 50)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import Any
@dataclass
class SGDConf:
_target_: str = "torch.optim.sgd.SGD"
params: Any = MISSING
lr: Any = MISSING # _RequiredParameter
momentum: Any = 0
dampening: Any = 0
weight_decay: Any = 0
nesterov: Any = False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.