python_code
stringlengths 0
229k
|
---|
#!/usr/bin/env python3
import copy
import random
import unittest
from typing import Callable
import numpy as np
import torch
from captum.log import patch_methods
def deep_copy_args(func: Callable):
def copy_args(*args, **kwargs):
return func(
*(copy.deepcopy(x) for x in args),
**{k: copy.deepcopy(v) for k, v in kwargs.items()},
)
return copy_args
def assertTensorAlmostEqual(test, actual, expected, delta=0.0001, mode="sum"):
assert isinstance(actual, torch.Tensor), (
"Actual parameter given for " "comparison must be a tensor."
)
if not isinstance(expected, torch.Tensor):
expected = torch.tensor(expected, dtype=actual.dtype)
assert (
actual.shape == expected.shape
), f"Expected tensor with shape: {expected.shape}. Actual shape {actual.shape}."
actual = actual.cpu()
expected = expected.cpu()
if mode == "sum":
test.assertAlmostEqual(
torch.sum(torch.abs(actual - expected)).item(), 0.0, delta=delta
)
elif mode == "max":
# if both tensors are empty, they are equal but there is no max
if actual.numel() == expected.numel() == 0:
return
if actual.size() == torch.Size([]):
test.assertAlmostEqual(
torch.max(torch.abs(actual - expected)).item(), 0.0, delta=delta
)
else:
for index, (input, ref) in enumerate(zip(actual, expected)):
almost_equal = abs(input - ref) <= delta
if hasattr(almost_equal, "__iter__"):
almost_equal = almost_equal.all()
assert (
almost_equal
), "Values at index {}, {} and {}, differ more than by {}".format(
index, input, ref, delta
)
else:
raise ValueError("Mode for assertion comparison must be one of `max` or `sum`.")
def assertTensorTuplesAlmostEqual(test, actual, expected, delta=0.0001, mode="sum"):
if isinstance(expected, tuple):
assert len(actual) == len(
expected
), f"the length of actual {len(actual)} != expected {len(expected)}"
for i in range(len(expected)):
assertTensorAlmostEqual(test, actual[i], expected[i], delta, mode)
else:
assertTensorAlmostEqual(test, actual, expected, delta, mode)
def assertAttributionComparision(test, attributions1, attributions2):
for attribution1, attribution2 in zip(attributions1, attributions2):
for attr_row1, attr_row2 in zip(attribution1, attribution2):
assertTensorAlmostEqual(test, attr_row1, attr_row2, 0.05, "max")
def assert_delta(test, delta):
delta_condition = (delta.abs() < 0.00001).all()
test.assertTrue(
delta_condition,
"The sum of attribution values {} for relu layer is not "
"nearly equal to the difference between the endpoint for "
"some samples".format(delta),
)
def set_all_random_seeds(seed: int = 1234) -> None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
class BaseTest(unittest.TestCase):
"""
This class provides a basic framework for all Captum tests by providing
a set up fixture, which sets a fixed random seed. Since many torch
initializations are random, this ensures that tests run deterministically.
"""
def setUp(self) -> None:
set_all_random_seeds(1234)
patch_methods(self)
|
#! /usr/bin/env python3
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
import os
import re
import sys
from typing import List
base_path = os.path.abspath(os.path.join(__file__, "..", "..", ".."))
# read module from src instead of installation
sys.path.insert(0, base_path)
print("base path for Captum module:", base_path)
# -- Project information -----------------------------------------------------
project = "Captum"
copyright = "2019, Facebook, Inc."
author = "The PyTorch Team"
# import captum from base_path to get the version
# but the version is no longer used
# since version is trimmed in sphinx pages to embed into docusaurus
import captum # noqa: E402
version = captum.__version__
# -- General configuration ---------------------------------------------------
# Sphinx extension modules
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx_autodoc_typehints",
"sphinx.ext.doctest",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.intersphinx",
"sphinxcontrib.katex",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames
# source_suffix = [".rst", ".md"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# Default options for autodoc directives. Applied to all autodoc directives
autodoc_default_options = {}
# Inlcude init docstrings into body of autoclass directives
autoclass_content = "both"
# Preserve signature defaults
# Prevents entire tensors from being printed, & gives callable functions
# proper names
autodoc_preserve_defaults = True
# Configuration for intersphinx: refer to the Python standard library and PyTorch
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"pytorch": ("https://pytorch.org/docs/stable", None),
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
html_static_path = [] # for now we have no static files to track
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, the reST sources are included in the HTML build as _sources/name.
# The default is True.
# Uncomment the following line after sphinx 4.5.0 release
# https://github.com/sphinx-doc/sphinx/issues/9456
# html_copy_source = False
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "captumdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "captum.tex", "Captum Documentation", "Facebook, Inc.", "manual")
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "captum", "captum Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"captum",
"Captum Documentation",
author,
"Captum",
"Model interpretability and understanding for PyTorch.",
"Miscellaneous",
)
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Docstring Improvements --------------------------------------------------
# Regex code for typing replacements.
# The "(?<![\.])" part checks to see if the string
# starts with a period, and "\b" denotes word boundaries.
# Only words that don't start with a period are replaced.
_rt = [r"(?<![\.])(\b", r"\b)"]
def autodoc_process_docstring(
app, what: str, name: str, obj, options, lines: List[str]
) -> None:
"""
Modify docstrings before creating html files.
Sphinx converts the 'Args:' and 'Returns:' sections of docstrings into
reStructuredText (rST) syntax, which can then be found via ':type' & ':rtype'.
See here for more information:
https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html
"""
for i in range(len(lines)):
# Skip unless line is an parameter doc or a return doc
if not lines[i].startswith(":type"):
continue
if ":py:data:" in lines[i]:
continue
# Ensure Any, Callable, & Iterator types are hyperlinked with intersphinx.
# The tilde '~' character hides the 'typing.' portion of the string.
lines[i] = re.sub(_rt[0] + r"Any" + _rt[1], "~typing.Any", lines[i])
lines[i] = re.sub(_rt[0] + r"Callable" + _rt[1], "~typing.Callable", lines[i])
lines[i] = re.sub(_rt[0] + r"Iterator" + _rt[1], "~typing.Iterator", lines[i])
lines[i] = re.sub(_rt[0] + r"Iterable" + _rt[1], "~typing.Iterable", lines[i])
# Ensure Tensor type is hyperlinked by interpshinx
lines[i] = re.sub(_rt[0] + r"Tensor" + _rt[1], "~torch.Tensor", lines[i])
def setup(app) -> None:
app.connect("autodoc-process-docstring", autodoc_process_docstring)
|
#!/usr/bin/env python3
import argparse
import json
import os
import nbformat
from bs4 import BeautifulSoup
from nbconvert import HTMLExporter, ScriptExporter
TEMPLATE = """const CWD = process.cwd();
const React = require('react');
const Tutorial = require(`${{CWD}}/core/Tutorial.js`);
class TutorialPage extends React.Component {{
render() {{
const {{config: siteConfig}} = this.props;
const {{baseUrl}} = siteConfig;
return <Tutorial baseUrl={{baseUrl}} tutorialID="{}"/>;
}}
}}
module.exports = TutorialPage;
"""
JS_SCRIPTS = """
<script
src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js">
</script>
<script
src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js">
</script>
""" # noqa: E501
def gen_tutorials(repo_dir: str) -> None:
"""Generate HTML tutorials for captum Docusaurus site from Jupyter notebooks.
Also create ipynb and py versions of tutorial in Docusaurus site for
download.
"""
with open(os.path.join(repo_dir, "website", "tutorials.json"), "r") as infile:
tutorial_config = json.loads(infile.read())
tutorial_ids = []
for category_items in tutorial_config.values():
for item in category_items:
if "id" in item:
tutorial_ids.append(item["id"])
else:
for sub_item in item["children"]:
tutorial_ids.append(sub_item["id"])
for tid in tutorial_ids:
print("Generating {} tutorial".format(tid))
# convert notebook to HTML
ipynb_in_path = os.path.join(repo_dir, "tutorials", "{}.ipynb".format(tid))
with open(ipynb_in_path, "r") as infile:
nb_str = infile.read()
nb = nbformat.reads(nb_str, nbformat.NO_CONVERT)
# displayname is absent from notebook metadata
nb["metadata"]["kernelspec"]["display_name"] = "python3"
exporter = HTMLExporter()
html, meta = exporter.from_notebook_node(nb)
# pull out html div for notebook
soup = BeautifulSoup(html, "html.parser")
nb_meat = soup.find("div", {"id": "notebook-container"})
del nb_meat.attrs["id"]
nb_meat.attrs["class"] = ["notebook"]
html_out = JS_SCRIPTS + str(nb_meat)
# generate html file
html_out_path = os.path.join(
repo_dir, "website", "_tutorials", "{}.html".format(tid)
)
with open(html_out_path, "w") as html_outfile:
html_outfile.write(html_out)
# generate JS file
script = TEMPLATE.format(tid)
js_out_path = os.path.join(
repo_dir, "website", "pages", "tutorials", "{}.js".format(tid)
)
with open(js_out_path, "w") as js_outfile:
js_outfile.write(script)
# output tutorial in both ipynb & py form
ipynb_out_path = os.path.join(
repo_dir, "website", "static", "files", "{}.ipynb".format(tid)
)
with open(ipynb_out_path, "w") as ipynb_outfile:
ipynb_outfile.write(nb_str)
exporter = ScriptExporter()
script, meta = exporter.from_notebook_node(nb)
py_out_path = os.path.join(
repo_dir, "website", "static", "files", "{}.py".format(tid)
)
with open(py_out_path, "w") as py_outfile:
py_outfile.write(script)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate JS, HTML, ipynb, and py files for tutorials."
)
parser.add_argument(
"-w", "--repo_dir", metavar="path", required=True, help="captum repo directory."
)
args = parser.parse_args()
gen_tutorials(args.repo_dir)
|
#!/usr/bin/env python3
import argparse
import json
from bs4 import BeautifulSoup
BASE_URL = "/"
def updateVersionHTML(base_path, base_url=BASE_URL):
with open(base_path + "/captum-master/website/_versions.json", "rb") as infile:
versions = json.loads(infile.read())
with open(base_path + "/new-site/versions.html", "rb") as infile:
html = infile.read()
versions.append("latest")
def prepend_url(a_tag, base_url, version):
href = a_tag.attrs["href"]
if href.startswith("https://") or href.startswith("http://"):
return href
else:
return "{base_url}versions/{version}{original_url}".format(
base_url=base_url, version=version, original_url=href
)
for v in versions:
soup = BeautifulSoup(html, "html.parser")
# title
title_link = soup.find("header").find("a")
title_link.attrs["href"] = prepend_url(title_link, base_url, v)
# nav
nav_links = soup.find("nav").findAll("a")
for link in nav_links:
link.attrs["href"] = prepend_url(link, base_url, v)
# version link
t = soup.find("h2", {"class": "headerTitleWithLogo"}).find_next("a")
t.string = v
t.attrs["href"] = prepend_url(t, base_url, v)
# output files
with open(
base_path + "/new-site/versions/{}/versions.html".format(v), "w"
) as outfile:
outfile.write(str(soup))
with open(
base_path + "/new-site/versions/{}/en/versions.html".format(v), "w"
) as outfile:
outfile.write(str(soup))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=(
"Fix links in version.html files for Docusaurus site."
"This is used to ensure that the versions.js for older "
"versions in versions subdirectory are up-to-date and "
"will have a way to navigate back to newer versions."
)
)
parser.add_argument(
"-p",
"--base_path",
metavar="path",
required=True,
help="Input directory for rolling out new version of site.",
)
args = parser.parse_args()
updateVersionHTML(args.base_path)
|
#!/usr/bin/env python3
import argparse
import os
from bs4 import BeautifulSoup
# no need to import css from built path
# coz docusaurus merge all css files within static folder automatically
# https://v1.docusaurus.io/docs/en/api-pages#styles
base_scripts = """
<script type="text/javascript" id="documentation_options" data-url_root="./"
src="/_sphinx/documentation_options.js"></script>
<script type="text/javascript" src="/_sphinx/jquery.js"></script>
<script type="text/javascript" src="/_sphinx/underscore.js"></script>
<script type="text/javascript" src="/_sphinx/doctools.js"></script>
<script type="text/javascript" src="/_sphinx/language_data.js"></script>
<script type="text/javascript" src="/_sphinx/searchtools.js"></script>
""" # noqa: E501
search_js_scripts = """
<script type="text/javascript">
jQuery(function() { Search.loadIndex("/_sphinx/searchindex.js"); });
</script>
<script type="text/javascript" id="searchindexloader"></script>
"""
katex_scripts = """
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/contrib/auto-render.min.js"></script>
<script src="/_sphinx/katex_autorenderer.js"></script>
<link rel="stylesheet" type="text/css" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css" />
""" # noqa: E501
def parse_sphinx(input_dir, output_dir):
for cur, _, files in os.walk(input_dir):
for fname in files:
if fname.endswith(".html"):
with open(os.path.join(cur, fname), "r") as f:
soup = BeautifulSoup(f.read(), "html.parser")
doc = soup.find("div", {"class": "document"})
wrapped_doc = doc.wrap(
soup.new_tag("div", **{"class": "sphinx wrapper"})
)
# add scripts that sphinx pages need
if fname == "search.html":
out = (
base_scripts
+ search_js_scripts
+ katex_scripts
+ str(wrapped_doc)
)
else:
out = base_scripts + katex_scripts + str(wrapped_doc)
output_path = os.path.join(output_dir, os.path.relpath(cur, input_dir))
os.makedirs(output_path, exist_ok=True)
with open(os.path.join(output_path, fname), "w") as fout:
fout.write(out)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Strip HTML body from Sphinx docs.")
parser.add_argument(
"-i",
"--input_dir",
metavar="path",
required=True,
help="Input directory for Sphinx HTML.",
)
parser.add_argument(
"-o",
"--output_dir",
metavar="path",
required=True,
help="Output directory in website.",
)
args = parser.parse_args()
parse_sphinx(args.input_dir, args.output_dir)
|
#!/usr/bin/env python3
import captum.attr as attr # noqa
import captum.concept as concept # noqa
import captum.influence as influence # noqa
import captum.log as log # noqa
import captum.metrics as metrics # noqa
import captum.robust as robust # noqa
__version__ = "0.6.0"
|
#!/usr/bin/env python3
from captum.metrics._core.infidelity import ( # noqa
infidelity,
infidelity_perturb_func_decorator,
)
from captum.metrics._core.sensitivity import sensitivity_max # noqa
|
#!/usr/bin/env python3
import warnings
from typing import Callable, Tuple
import torch
from torch import Tensor
def _divide_and_aggregate_metrics(
inputs: Tuple[Tensor, ...],
n_perturb_samples: int,
metric_func: Callable,
agg_func: Callable = torch.add,
max_examples_per_batch: int = None,
) -> Tensor:
r"""
This function is used to slice large number of samples `n_perturb_samples` per
input example into smaller pieces, computing the metrics for each small piece and
aggregating the results across all `n_perturb_samples` per example. The function
returns overall aggregated metric per sample. The size of each slice is determined
by the `max_examples_per_batch` input parameter.
Args:
inputs (tuple): The original inputs formatted in a tuple that are passed to
the metrics function and that are used to compute the
attributions for.
n_perturb_samples (int): The number of samples per example that are used for
perturbation purposes for example.
metric_func (Callable): This function takes the number of samples per
input batch and returns an overall metric for each example.
agg_func (Callable, optional): This function is used to aggregate the
metrics across multiple sub-batches and that are
generated by `metric_func`.
max_examples_per_batch (int, optional): The maximum number of allowed examples
per batch.
Returns:
metric (Tensor): A metric score estimated by `metric_func` per
input example.
"""
bsz = inputs[0].size(0)
if max_examples_per_batch is not None and (
max_examples_per_batch // bsz < 1
or max_examples_per_batch // bsz > n_perturb_samples
):
warnings.warn(
(
"`max_examples_per_batch` must be at least equal to the"
" input batch size and at most to "
"`input batch size` * `n_perturb_samples`."
"`max_examples_per_batch` is: {} and the input batch size is: {}."
"This is necessary because we require that each sub-batch that is used "
"to compute the metrics, contains at least an instance of "
"the original example and doesn't exceed the number of "
"expanded n_perturb_samples."
).format(max_examples_per_batch, bsz)
)
max_inps_per_batch = (
n_perturb_samples
if max_examples_per_batch is None
else min(max(max_examples_per_batch // bsz, 1), n_perturb_samples)
)
current_n_steps = max_inps_per_batch
metrics_sum = metric_func(max_inps_per_batch)
while current_n_steps < n_perturb_samples:
current_n_steps += max_inps_per_batch
metric = metric_func(
max_inps_per_batch
if current_n_steps <= n_perturb_samples
else max_inps_per_batch - (current_n_steps - n_perturb_samples)
)
current_n_steps = min(current_n_steps, n_perturb_samples)
metrics_sum = agg_func(metrics_sum, metric)
return metrics_sum
|
#!/usr/bin/env python3
|
#!/usr/bin/env python3
from typing import Any, Callable, cast, Tuple, Union
import torch
from captum._utils.common import (
_expand_additional_forward_args,
_expand_target,
_format_additional_forward_args,
_format_baseline,
_format_tensor_into_tuples,
_run_forward,
ExpansionTypes,
safe_div,
)
from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric
from captum.log import log_usage
from captum.metrics._utils.batching import _divide_and_aggregate_metrics
from torch import Tensor
def infidelity_perturb_func_decorator(multipy_by_inputs: bool = True) -> Callable:
r"""An auxiliary, decorator function that helps with computing
perturbations given perturbed inputs. It can be useful for cases
when `pertub_func` returns only perturbed inputs and we
internally compute the perturbations as
(input - perturbed_input) / (input - baseline) if
multipy_by_inputs is set to True and
(input - perturbed_input) otherwise.
If users decorate their `pertub_func` with
`@infidelity_perturb_func_decorator` function then their `pertub_func`
needs to only return perturbed inputs.
Args:
multipy_by_inputs (bool): Indicates whether model inputs'
multiplier is factored in the computation of
attribution scores.
"""
def sub_infidelity_perturb_func_decorator(pertub_func: Callable) -> Callable:
r"""
Args:
pertub_func(Callable): Input perturbation function that takes inputs
and optionally baselines and returns perturbed inputs
Returns:
default_perturb_func(Callable): Internal default perturbation
function that computes the perturbations internally and returns
perturbations and perturbed inputs.
Examples::
>>> @infidelity_perturb_func_decorator(True)
>>> def perturb_fn(inputs):
>>> noise = torch.tensor(np.random.normal(0, 0.003,
>>> inputs.shape)).float()
>>> return inputs - noise
>>> # Computes infidelity score using `perturb_fn`
>>> infidelity = infidelity(model, perturb_fn, input, ...)
"""
def default_perturb_func(
inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None
):
r""" """
inputs_perturbed = (
pertub_func(inputs, baselines)
if baselines is not None
else pertub_func(inputs)
)
inputs_perturbed = _format_tensor_into_tuples(inputs_perturbed)
inputs = _format_tensor_into_tuples(inputs)
baselines = _format_baseline(baselines, inputs)
if baselines is None:
perturbations = tuple(
safe_div(
input - input_perturbed,
input,
default_denom=1.0,
)
if multipy_by_inputs
else input - input_perturbed
for input, input_perturbed in zip(inputs, inputs_perturbed)
)
else:
perturbations = tuple(
safe_div(
input - input_perturbed,
input - baseline,
default_denom=1.0,
)
if multipy_by_inputs
else input - input_perturbed
for input, input_perturbed, baseline in zip(
inputs, inputs_perturbed, baselines
)
)
return perturbations, inputs_perturbed
return default_perturb_func
return sub_infidelity_perturb_func_decorator
@log_usage()
def infidelity(
forward_func: Callable,
perturb_func: Callable,
inputs: TensorOrTupleOfTensorsGeneric,
attributions: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
additional_forward_args: Any = None,
target: TargetType = None,
n_perturb_samples: int = 10,
max_examples_per_batch: int = None,
normalize: bool = False,
) -> Tensor:
r"""
Explanation infidelity represents the expected mean-squared error
between the explanation multiplied by a meaningful input perturbation
and the differences between the predictor function at its input
and perturbed input.
More details about the measure can be found in the following paper:
https://arxiv.org/abs/1901.09392
It is derived from the completeness property of well-known attribution
algorithms and is a computationally more efficient and generalized
notion of Sensitivy-n. The latter measures correlations between the sum
of the attributions and the differences of the predictor function at
its input and fixed baseline. More details about the Sensitivity-n can
be found here:
https://arxiv.org/abs/1711.06104
The users can perturb the inputs any desired way by providing any
perturbation function that takes the inputs (and optionally baselines)
and returns perturbed inputs or perturbed inputs and corresponding
perturbations.
This specific implementation is primarily tested for attribution-based
explanation methods but the idea can be expanded to use for non
attribution-based interpretability methods as well.
Args:
forward_func (Callable):
The forward function of the model or any modification of it.
perturb_func (Callable):
The perturbation function of model inputs. This function takes
model inputs and optionally baselines as input arguments and returns
either a tuple of perturbations and perturbed inputs or just
perturbed inputs. For example:
>>> def my_perturb_func(inputs):
>>> <MY-LOGIC-HERE>
>>> return perturbations, perturbed_inputs
If we want to only return perturbed inputs and compute
perturbations internally then we can wrap perturb_func with
`infidelity_perturb_func_decorator` decorator such as:
>>> from captum.metrics import infidelity_perturb_func_decorator
>>> @infidelity_perturb_func_decorator(<multipy_by_inputs flag>)
>>> def my_perturb_func(inputs):
>>> <MY-LOGIC-HERE>
>>> return perturbed_inputs
In case `multipy_by_inputs` is False we compute perturbations by
`input - perturbed_input` difference and in case `multipy_by_inputs`
flag is True we compute it by dividing
(input - perturbed_input) by (input - baselines).
The user needs to only return perturbed inputs in `perturb_func`
as described above.
`infidelity_perturb_func_decorator` needs to be used with
`multipy_by_inputs` flag set to False in case infidelity
score is being computed for attribution maps that are local aka
that do not factor in inputs in the final attribution score.
Such attribution algorithms include Saliency, GradCam, Guided Backprop,
or Integrated Gradients and DeepLift attribution scores that are already
computed with `multipy_by_inputs=False` flag.
If there are more than one inputs passed to infidelity function those
will be passed to `perturb_func` as tuples in the same order as they
are passed to infidelity function.
If inputs
- is a single tensor, the function needs to return a tuple
of perturbations and perturbed input such as:
perturb, perturbed_input and only perturbed_input in case
`infidelity_perturb_func_decorator` is used.
- is a tuple of tensors, corresponding perturbations and perturbed
inputs must be computed and returned as tuples in the
following format:
(perturb1, perturb2, ... perturbN), (perturbed_input1,
perturbed_input2, ... perturbed_inputN)
Similar to previous case here as well we need to return only
perturbed inputs in case `infidelity_perturb_func_decorator`
decorates out `perturb_func`.
It is important to note that for performance reasons `perturb_func`
isn't called for each example individually but on a batch of
input examples that are repeated `max_examples_per_batch / batch_size`
times within the batch.
inputs (Tensor or tuple[Tensor, ...]): Input for which
attributions are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define reference values which sometimes represent ablated
values and are used to compare with the actual inputs to compute
importance scores in attribution algorithms. They can be represented
as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
Default: None
attributions (Tensor or tuple[Tensor, ...]):
Attribution scores computed based on an attribution algorithm.
This attribution scores can be computed using the implementations
provided in the `captum.attr` package. Some of those attribution
approaches are so called global methods, which means that
they factor in model inputs' multiplier, as described in:
https://arxiv.org/abs/1711.06104
Many global attribution algorithms can be used in local modes,
meaning that the inputs multiplier isn't factored in the
attribution scores.
This can be done duing the definition of the attribution algorithm
by passing `multipy_by_inputs=False` flag.
For example in case of Integrated Gradients (IG) we can obtain
local attribution scores if we define the constructor of IG as:
ig = IntegratedGradients(multipy_by_inputs=False)
Some attribution algorithms are inherently local.
Examples of inherently local attribution methods include:
Saliency, Guided GradCam, Guided Backprop and Deconvolution.
For local attributions we can use real-valued perturbations
whereas for global attributions that perturbation is binary.
https://arxiv.org/abs/1901.09392
If we want to compute the infidelity of global attributions we
can use a binary perturbation matrix that will allow us to select
a subset of features from `inputs` or `inputs - baselines` space.
This will allow us to approximate sensitivity-n for a global
attribution algorithm.
`infidelity_perturb_func_decorator` function decorator is a helper
function that computes perturbations under the hood if perturbed
inputs are provided.
For more details about how to use `infidelity_perturb_func_decorator`,
please, read the documentation about `perturb_func`
Attributions have the same shape and dimensionality as the inputs.
If inputs is a single tensor then the attributions is a single
tensor as well. If inputs is provided as a tuple of tensors
then attributions will be tuples of tensors as well.
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
forward_func in order, following the arguments in inputs.
Note that the perturbations are not computed with respect
to these arguments. This means that these arguments aren't
being passed to `perturb_func` as an input argument.
Default: None
target (int, tuple, Tensor, or list, optional): Indices for selecting
predictions from output(for classification cases,
this is usually the target class).
If the network returns a scalar value per example, no target
index is necessary.
For general 2D outputs, targets can be either:
- A single integer or a tensor containing a single
integer, which is applied to all input examples
- A list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
n_perturb_samples (int, optional): The number of times input tensors
are perturbed. Each input example in the inputs tensor is expanded
`n_perturb_samples`
times before calling `perturb_func` function.
Default: 10
max_examples_per_batch (int, optional): The number of maximum input
examples that are processed together. In case the number of
examples (`input batch size * n_perturb_samples`) exceeds
`max_examples_per_batch`, they will be sliced
into batches of `max_examples_per_batch` examples and processed
in a sequential order. If `max_examples_per_batch` is None, all
examples are processed together. `max_examples_per_batch` should
at least be equal `input batch size` and at most
`input batch size * n_perturb_samples`.
Default: None
normalize (bool, optional): Normalize the dot product of the input
perturbation and the attribution so the infidelity value is invariant
to constant scaling of the attribution values. The normalization factor
beta is defined as the ratio of two mean values:
.. math::
\beta = \frac{
\mathbb{E}_{I \sim \mu_I} [ I^T \Phi(f, x) (f(x) - f(x - I)) ]
}{
\mathbb{E}_{I \sim \mu_I} [ (I^T \Phi(f, x))^2 ]
}
Please refer the original paper for the meaning of the symbols. Same
normalization can be found in the paper's official implementation
https://github.com/chihkuanyeh/saliency_evaluation
Default: False
Returns:
infidelities (Tensor): A tensor of scalar infidelity scores per
input example. The first dimension is equal to the
number of examples in the input batch and the second
dimension is one.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> saliency = Saliency(net)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes saliency maps for class 3.
>>> attribution = saliency.attribute(input, target=3)
>>> # define a perturbation function for the input
>>> def perturb_fn(inputs):
>>> noise = torch.tensor(np.random.normal(0, 0.003, inputs.shape)).float()
>>> return noise, inputs - noise
>>> # Computes infidelity score for saliency maps
>>> infid = infidelity(net, perturb_fn, input, attribution)
"""
def _generate_perturbations(
current_n_perturb_samples: int,
) -> Tuple[TensorOrTupleOfTensorsGeneric, TensorOrTupleOfTensorsGeneric]:
r"""
The perturbations are generated for each example
`current_n_perturb_samples` times.
For performance reasons we are not calling `perturb_func` on each example but
on a batch that contains `current_n_perturb_samples`
repeated instances per example.
"""
def call_perturb_func():
r""" """
baselines_pert = None
inputs_pert: Union[Tensor, Tuple[Tensor, ...]]
if len(inputs_expanded) == 1:
inputs_pert = inputs_expanded[0]
if baselines_expanded is not None:
baselines_pert = cast(Tuple, baselines_expanded)[0]
else:
inputs_pert = inputs_expanded
baselines_pert = baselines_expanded
return (
perturb_func(inputs_pert, baselines_pert)
if baselines_pert is not None
else perturb_func(inputs_pert)
)
inputs_expanded = tuple(
torch.repeat_interleave(input, current_n_perturb_samples, dim=0)
for input in inputs
)
baselines_expanded = baselines
if baselines is not None:
baselines_expanded = tuple(
baseline.repeat_interleave(current_n_perturb_samples, dim=0)
if isinstance(baseline, torch.Tensor)
and baseline.shape[0] == input.shape[0]
and baseline.shape[0] > 1
else baseline
for input, baseline in zip(inputs, cast(Tuple, baselines))
)
return call_perturb_func()
def _validate_inputs_and_perturbations(
inputs: Tuple[Tensor, ...],
inputs_perturbed: Tuple[Tensor, ...],
perturbations: Tuple[Tensor, ...],
) -> None:
# asserts the sizes of the perturbations and inputs
assert len(perturbations) == len(inputs), (
"""The number of perturbed
inputs and corresponding perturbations must have the same number of
elements. Found number of inputs is: {} and perturbations:
{}"""
).format(len(perturbations), len(inputs))
# asserts the shapes of the perturbations and perturbed inputs
for perturb, input_perturbed in zip(perturbations, inputs_perturbed):
assert perturb[0].shape == input_perturbed[0].shape, (
"""Perturbed input
and corresponding perturbation must have the same shape and
dimensionality. Found perturbation shape is: {} and the input shape
is: {}"""
).format(perturb[0].shape, input_perturbed[0].shape)
def _next_infidelity_tensors(
current_n_perturb_samples: int,
) -> Union[Tuple[Tensor], Tuple[Tensor, Tensor, Tensor]]:
perturbations, inputs_perturbed = _generate_perturbations(
current_n_perturb_samples
)
perturbations = _format_tensor_into_tuples(perturbations)
inputs_perturbed = _format_tensor_into_tuples(inputs_perturbed)
_validate_inputs_and_perturbations(
cast(Tuple[Tensor, ...], inputs),
cast(Tuple[Tensor, ...], inputs_perturbed),
cast(Tuple[Tensor, ...], perturbations),
)
targets_expanded = _expand_target(
target,
current_n_perturb_samples,
expansion_type=ExpansionTypes.repeat_interleave,
)
additional_forward_args_expanded = _expand_additional_forward_args(
additional_forward_args,
current_n_perturb_samples,
expansion_type=ExpansionTypes.repeat_interleave,
)
inputs_perturbed_fwd = _run_forward(
forward_func,
inputs_perturbed,
targets_expanded,
additional_forward_args_expanded,
)
inputs_fwd = _run_forward(forward_func, inputs, target, additional_forward_args)
inputs_fwd = torch.repeat_interleave(
inputs_fwd, current_n_perturb_samples, dim=0
)
perturbed_fwd_diffs = inputs_fwd - inputs_perturbed_fwd
attributions_expanded = tuple(
torch.repeat_interleave(attribution, current_n_perturb_samples, dim=0)
for attribution in attributions
)
attributions_times_perturb = tuple(
(attribution_expanded * perturbation).view(attribution_expanded.size(0), -1)
for attribution_expanded, perturbation in zip(
attributions_expanded, perturbations
)
)
attr_times_perturb_sums = sum(
torch.sum(attribution_times_perturb, dim=1)
for attribution_times_perturb in attributions_times_perturb
)
attr_times_perturb_sums = cast(Tensor, attr_times_perturb_sums)
# reshape as Tensor(bsz, current_n_perturb_samples)
attr_times_perturb_sums = attr_times_perturb_sums.view(bsz, -1)
perturbed_fwd_diffs = perturbed_fwd_diffs.view(bsz, -1)
if normalize:
# in order to normalize, we have to aggregate the following tensors
# to calculate MSE in its polynomial expansion:
# (a-b)^2 = a^2 - 2ab + b^2
return (
attr_times_perturb_sums.pow(2).sum(-1),
(attr_times_perturb_sums * perturbed_fwd_diffs).sum(-1),
perturbed_fwd_diffs.pow(2).sum(-1),
)
else:
# returns (a-b)^2 if no need to normalize
return ((attr_times_perturb_sums - perturbed_fwd_diffs).pow(2).sum(-1),)
def _sum_infidelity_tensors(agg_tensors, tensors):
return tuple(agg_t + t for agg_t, t in zip(agg_tensors, tensors))
# perform argument formattings
inputs = _format_tensor_into_tuples(inputs) # type: ignore
if baselines is not None:
baselines = _format_baseline(baselines, cast(Tuple[Tensor, ...], inputs))
additional_forward_args = _format_additional_forward_args(additional_forward_args)
attributions = _format_tensor_into_tuples(attributions) # type: ignore
# Make sure that inputs and corresponding attributions have matching sizes.
assert len(inputs) == len(attributions), (
"""The number of tensors in the inputs and
attributions must match. Found number of tensors in the inputs is: {} and in the
attributions: {}"""
).format(len(inputs), len(attributions))
for inp, attr in zip(inputs, attributions):
assert inp.shape == attr.shape, (
"""Inputs and attributions must have
matching shapes. One of the input tensor's shape is {} and the
attribution tensor's shape is: {}"""
).format(inp.shape, attr.shape)
bsz = inputs[0].size(0)
with torch.no_grad():
# if not normalize, directly return aggrgated MSE ((a-b)^2,)
# else return aggregated MSE's polynomial expansion tensors (a^2, ab, b^2)
agg_tensors = _divide_and_aggregate_metrics(
cast(Tuple[Tensor, ...], inputs),
n_perturb_samples,
_next_infidelity_tensors,
agg_func=_sum_infidelity_tensors,
max_examples_per_batch=max_examples_per_batch,
)
if normalize:
beta_num = agg_tensors[1]
beta_denorm = agg_tensors[0]
beta = safe_div(beta_num, beta_denorm)
infidelity_values = (
beta**2 * agg_tensors[0] - 2 * beta * agg_tensors[1] + agg_tensors[2]
)
else:
infidelity_values = agg_tensors[0]
infidelity_values /= n_perturb_samples
return infidelity_values
|
#!/usr/bin/env python3
from copy import deepcopy
from inspect import signature
from typing import Any, Callable, cast, Tuple, Union
import torch
from captum._utils.common import (
_expand_and_update_additional_forward_args,
_expand_and_update_baselines,
_expand_and_update_target,
_format_baseline,
_format_tensor_into_tuples,
)
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.log import log_usage
from captum.metrics._utils.batching import _divide_and_aggregate_metrics
from torch import Tensor
def default_perturb_func(
inputs: TensorOrTupleOfTensorsGeneric, perturb_radius: float = 0.02
) -> Tuple[Tensor, ...]:
r"""A default function for generating perturbations of `inputs`
within perturbation radius of `perturb_radius`.
This function samples uniformly random from the L_Infinity ball
with `perturb_radius` radius.
The users can override this function if they prefer to use a
different perturbation function.
Args:
inputs (Tensor or tuple[Tensor, ...]): The input tensors that we'd
like to perturb by adding a random noise sampled uniformly
random from an L_infinity ball with a radius `perturb_radius`.
radius (float): A radius used for sampling from
an L_infinity ball.
Returns:
perturbed_input (tuple[Tensor, ...]): A list of perturbed inputs that
are created by adding noise sampled uniformly random
from L_infiniy ball with a radius `perturb_radius` to the
original inputs.
"""
inputs = _format_tensor_into_tuples(inputs)
perturbed_input = tuple(
input
+ torch.FloatTensor(input.size()) # type: ignore
.uniform_(-perturb_radius, perturb_radius)
.to(input.device)
for input in inputs
)
return perturbed_input
@log_usage()
def sensitivity_max(
explanation_func: Callable,
inputs: TensorOrTupleOfTensorsGeneric,
perturb_func: Callable = default_perturb_func,
perturb_radius: float = 0.02,
n_perturb_samples: int = 10,
norm_ord: str = "fro",
max_examples_per_batch: int = None,
**kwargs: Any,
) -> Tensor:
r"""
Explanation sensitivity measures the extent of explanation change when
the input is slightly perturbed. It has been shown that the models that
have high explanation sensitivity are prone to adversarial attacks:
`Interpretation of Neural Networks is Fragile`
https://www.aaai.org/ojs/index.php/AAAI/article/view/4252
`sensitivity_max` metric measures maximum sensitivity of an explanation
using Monte Carlo sampling-based approximation. By default in order to
do so it samples multiple data points from a sub-space of an L-Infinity
ball that has a `perturb_radius` radius using `default_perturb_func`
default perturbation function. In a general case users can
use any L_p ball or any other custom sampling technique that they
prefer by providing a custom `perturb_func`.
Note that max sensitivity is similar to Lipschitz Continuity metric
however it is more robust and easier to estimate.
Since the explanation, for instance an attribution function,
may not always be continuous, can lead to unbounded
Lipschitz continuity. Therefore the latter isn't always appropriate.
More about the Lipschitz Continuity Metric can also be found here
`On the Robustness of Interpretability Methods`
https://arxiv.org/abs/1806.08049
and
`Towards Robust Interpretability with Self-Explaining Neural Networks`
https://papers.nips.cc/paper\
8003-towards-robust-interpretability-
with-self-explaining-neural-networks.pdf
More details about sensitivity max can be found here:
`On the (In)fidelity and Sensitivity of Explanations`
https://arxiv.org/abs/1901.09392
Args:
explanation_func (Callable):
This function can be the `attribute` method of an
attribution algorithm or any other explanation method
that returns the explanations.
inputs (Tensor or tuple[Tensor, ...]): Input for which
explanations are computed. If `explanation_func` takes a
single tensor as input, a single input tensor should
be provided.
If `explanation_func` takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
perturb_func (Callable):
The perturbation function of model inputs. This function takes
model inputs and optionally `perturb_radius` if
the function takes more than one argument and returns
perturbed inputs.
If there are more than one inputs passed to sensitivity function those
will be passed to `perturb_func` as tuples in the same order as they
are passed to sensitivity function.
It is important to note that for performance reasons `perturb_func`
isn't called for each example individually but on a batch of
input examples that are repeated `max_examples_per_batch / batch_size`
times within the batch.
Default: default_perturb_func
perturb_radius (float, optional): The epsilon radius used for sampling.
In the `default_perturb_func` it is used as the radius of
the L-Infinity ball. In a general case it can serve as a radius of
any L_p norm.
This argument is passed to `perturb_func` if it takes more than
one argument.
Default: 0.02
n_perturb_samples (int, optional): The number of times input tensors
are perturbed. Each input example in the inputs tensor is
expanded `n_perturb_samples` times before calling
`perturb_func` function.
Default: 10
norm_ord (int, float, or str, optional): The type of norm that is used to
compute the norm of the sensitivity matrix which is defined as the
difference between the explanation function at its input and perturbed
input. Acceptable values are either a string of 'fro' or 'nuc', or a
number in the range of [-inf, inf] (including float("-inf") &
float("inf")).
Default: 'fro'
max_examples_per_batch (int, optional): The number of maximum input
examples that are processed together. In case the number of
examples (`input batch size * n_perturb_samples`) exceeds
`max_examples_per_batch`, they will be sliced
into batches of `max_examples_per_batch` examples and processed
in a sequential order. If `max_examples_per_batch` is None, all
examples are processed together. `max_examples_per_batch` should
at least be equal `input batch size` and at most
`input batch size * n_perturb_samples`.
Default: None
**kwargs (Any, optional): Contains a list of arguments that are passed
to `explanation_func` explanation function which in some cases
could be the `attribute` function of an attribution algorithm.
Any additional arguments that need be passed to the explanation
function should be included here.
For instance, such arguments include:
`additional_forward_args`, `baselines` and `target`.
Returns:
sensitivities (Tensor): A tensor of scalar sensitivity scores per
input example. The first dimension is equal to the
number of examples in the input batch and the second
dimension is one. Returned sensitivities are normalized by
the magnitudes of the input explanations.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> saliency = Saliency(net)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes sensitivity score for saliency maps of class 3
>>> sens = sensitivity_max(saliency.attribute, input, target = 3)
"""
def _generate_perturbations(
current_n_perturb_samples: int,
) -> TensorOrTupleOfTensorsGeneric:
r"""
The perturbations are generated for each example
`current_n_perturb_samples` times.
For perfomance reasons we are not calling `perturb_func` on each example but
on a batch that contains `current_n_perturb_samples` repeated instances
per example.
"""
inputs_expanded: Union[Tensor, Tuple[Tensor, ...]] = tuple(
torch.repeat_interleave(input, current_n_perturb_samples, dim=0)
for input in inputs
)
if len(inputs_expanded) == 1:
inputs_expanded = inputs_expanded[0]
return (
perturb_func(inputs_expanded, perturb_radius)
if len(signature(perturb_func).parameters) > 1
else perturb_func(inputs_expanded)
)
def max_values(input_tnsr: Tensor) -> Tensor:
return torch.max(input_tnsr, dim=1).values # type: ignore
kwarg_expanded_for = None
kwargs_copy: Any = None
def _next_sensitivity_max(current_n_perturb_samples: int) -> Tensor:
inputs_perturbed = _generate_perturbations(current_n_perturb_samples)
# copy kwargs and update some of the arguments that need to be expanded
nonlocal kwarg_expanded_for
nonlocal kwargs_copy
if (
kwarg_expanded_for is None
or kwarg_expanded_for != current_n_perturb_samples
):
kwarg_expanded_for = current_n_perturb_samples
kwargs_copy = deepcopy(kwargs)
_expand_and_update_additional_forward_args(
current_n_perturb_samples, kwargs_copy
)
_expand_and_update_target(current_n_perturb_samples, kwargs_copy)
if "baselines" in kwargs:
baselines = kwargs["baselines"]
baselines = _format_baseline(
baselines, cast(Tuple[Tensor, ...], inputs)
)
if (
isinstance(baselines[0], Tensor)
and baselines[0].shape == inputs[0].shape
):
_expand_and_update_baselines(
cast(Tuple[Tensor, ...], inputs),
current_n_perturb_samples,
kwargs_copy,
)
expl_perturbed_inputs = explanation_func(inputs_perturbed, **kwargs_copy)
# tuplize `expl_perturbed_inputs` in case it is not
expl_perturbed_inputs = _format_tensor_into_tuples(expl_perturbed_inputs)
expl_inputs_expanded = tuple(
expl_input.repeat_interleave(current_n_perturb_samples, dim=0)
for expl_input in expl_inputs
)
sensitivities = torch.cat(
[
(expl_input - expl_perturbed).view(expl_perturbed.size(0), -1)
for expl_perturbed, expl_input in zip(
expl_perturbed_inputs, expl_inputs_expanded
)
],
dim=1,
)
# compute the norm of original input explanations
expl_inputs_norm_expanded = torch.norm(
torch.cat(
[expl_input.view(expl_input.size(0), -1) for expl_input in expl_inputs],
dim=1,
),
p=norm_ord,
dim=1,
keepdim=True,
).repeat_interleave(current_n_perturb_samples, dim=0)
expl_inputs_norm_expanded = torch.where(
expl_inputs_norm_expanded == 0.0,
torch.tensor(
1.0,
device=expl_inputs_norm_expanded.device,
dtype=expl_inputs_norm_expanded.dtype,
),
expl_inputs_norm_expanded,
)
# compute the norm for each input noisy example
sensitivities_norm = (
torch.norm(sensitivities, p=norm_ord, dim=1, keepdim=True)
/ expl_inputs_norm_expanded
)
return max_values(sensitivities_norm.view(bsz, -1))
inputs = _format_tensor_into_tuples(inputs) # type: ignore
bsz = inputs[0].size(0)
with torch.no_grad():
expl_inputs = explanation_func(inputs, **kwargs)
metrics_max = _divide_and_aggregate_metrics(
cast(Tuple[Tensor, ...], inputs),
n_perturb_samples,
_next_sensitivity_max,
max_examples_per_batch=max_examples_per_batch,
agg_func=torch.max,
)
return metrics_max
|
#!/usr/bin/env python3
import threading
import typing
import warnings
from collections import defaultdict
from typing import Any, Callable, cast, Dict, List, Optional, Sequence, Tuple, Union
import torch
from captum._utils.common import (
_reduce_list,
_run_forward,
_sort_key_list,
_verify_select_neuron,
)
from captum._utils.sample_gradient import SampleGradientWrapper
from captum._utils.typing import (
Literal,
ModuleOrModuleList,
TargetType,
TensorOrTupleOfTensorsGeneric,
)
from torch import device, Tensor
from torch.nn import Module
def apply_gradient_requirements(
inputs: Tuple[Tensor, ...], warn: bool = True
) -> List[bool]:
"""
Iterates through tuple on input tensors and sets requires_grad to be true on
each Tensor, and ensures all grads are set to zero. To ensure that the input
is returned to its initial state, a list of flags representing whether or not
a tensor originally required grad is returned.
"""
assert isinstance(
inputs, tuple
), "Inputs should be wrapped in a tuple prior to preparing for gradients"
grad_required = []
for index, input in enumerate(inputs):
assert isinstance(input, torch.Tensor), "Given input is not a torch.Tensor"
grad_required.append(input.requires_grad)
inputs_dtype = input.dtype
# Note: torch 1.2 doesn't support is_complex for dtype that's why we check
# on the existance of is_complex method.
if not inputs_dtype.is_floating_point and not (
hasattr(inputs_dtype, "is_complex") and inputs_dtype.is_complex
):
if warn:
warnings.warn(
"""Input Tensor %d has a dtype of %s.
Gradients cannot be activated
for these data types."""
% (index, str(inputs_dtype))
)
elif not input.requires_grad:
if warn:
warnings.warn(
"Input Tensor %d did not already require gradients, "
"required_grads has been set automatically." % index
)
input.requires_grad_()
return grad_required
def undo_gradient_requirements(
inputs: Tuple[Tensor, ...], grad_required: List[bool]
) -> None:
"""
Iterates through list of tensors, zeros each gradient, and sets required
grad to false if the corresponding index in grad_required is False.
This method is used to undo the effects of prepare_gradient_inputs, making
grads not required for any input tensor that did not initially require
gradients.
"""
assert isinstance(
inputs, tuple
), "Inputs should be wrapped in a tuple prior to preparing for gradients."
assert len(inputs) == len(
grad_required
), "Input tuple length should match gradient mask."
for index, input in enumerate(inputs):
assert isinstance(input, torch.Tensor), "Given input is not a torch.Tensor"
if not grad_required[index]:
input.requires_grad_(False)
def compute_gradients(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
) -> Tuple[Tensor, ...]:
r"""
Computes gradients of the output with respect to inputs for an
arbitrary forward function.
Args:
forward_fn: forward function. This can be for example model's
forward function.
input: Input at which gradients are evaluated,
will be passed to forward_fn.
target_ind: Index of the target class for which gradients
must be computed (classification only).
additional_forward_args: Additional input arguments that forward
function requires. It takes an empty tuple (no additional
arguments) if no additional arguments are required
"""
with torch.autograd.set_grad_enabled(True):
# runs forward pass
outputs = _run_forward(forward_fn, inputs, target_ind, additional_forward_args)
assert outputs[0].numel() == 1, (
"Target not provided when necessary, cannot"
" take gradient with respect to multiple outputs."
)
# torch.unbind(forward_out) is a list of scalar tensor tuples and
# contains batch_size * #steps elements
grads = torch.autograd.grad(torch.unbind(outputs), inputs)
return grads
def _neuron_gradients(
inputs: Union[Tensor, Tuple[Tensor, ...]],
saved_layer: Dict[device, Tuple[Tensor, ...]],
key_list: List[device],
gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
) -> Tuple[Tensor, ...]:
with torch.autograd.set_grad_enabled(True):
gradient_tensors = []
for key in key_list:
current_out_tensor = _verify_select_neuron(
saved_layer[key], gradient_neuron_selector
)
gradient_tensors.append(
torch.autograd.grad(
torch.unbind(current_out_tensor)
if current_out_tensor.numel() > 1
else current_out_tensor,
inputs,
)
)
_total_gradients = _reduce_list(gradient_tensors, sum)
return _total_gradients
@typing.overload
def _forward_layer_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: Module,
additional_forward_args: Any = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
grad_enabled: bool = False,
) -> Tuple[Tensor, ...]:
...
@typing.overload
def _forward_layer_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: List[Module],
additional_forward_args: Any = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
grad_enabled: bool = False,
) -> List[Tuple[Tensor, ...]]:
...
def _forward_layer_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: ModuleOrModuleList,
additional_forward_args: Any = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
grad_enabled: bool = False,
) -> Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]:
return _forward_layer_eval_with_neuron_grads(
forward_fn,
inputs,
layer,
additional_forward_args=additional_forward_args,
gradient_neuron_selector=None,
grad_enabled=grad_enabled,
device_ids=device_ids,
attribute_to_layer_input=attribute_to_layer_input,
)
@typing.overload
def _forward_layer_distributed_eval(
forward_fn: Callable,
inputs: Any,
layer: ModuleOrModuleList,
target_ind: TargetType = None,
additional_forward_args: Any = None,
attribute_to_layer_input: bool = False,
forward_hook_with_return: Literal[False] = False,
require_layer_grads: bool = False,
) -> Dict[Module, Dict[device, Tuple[Tensor, ...]]]:
...
@typing.overload
def _forward_layer_distributed_eval(
forward_fn: Callable,
inputs: Any,
layer: ModuleOrModuleList,
target_ind: TargetType = None,
additional_forward_args: Any = None,
attribute_to_layer_input: bool = False,
*,
forward_hook_with_return: Literal[True],
require_layer_grads: bool = False,
) -> Tuple[Dict[Module, Dict[device, Tuple[Tensor, ...]]], Tensor]:
...
def _forward_layer_distributed_eval(
forward_fn: Callable,
inputs: Any,
layer: ModuleOrModuleList,
target_ind: TargetType = None,
additional_forward_args: Any = None,
attribute_to_layer_input: bool = False,
forward_hook_with_return: bool = False,
require_layer_grads: bool = False,
) -> Union[
Tuple[Dict[Module, Dict[device, Tuple[Tensor, ...]]], Tensor],
Dict[Module, Dict[device, Tuple[Tensor, ...]]],
]:
r"""
A helper function that allows to set a hook on model's `layer`, run the forward
pass and returns intermediate layer results, stored in a dictionary,
and optionally also the output of the forward function. The keys in the
dictionary are the device ids and the values are corresponding intermediate layer
results, either the inputs or the outputs of the layer depending on whether we set
`attribute_to_layer_input` to True or False.
This is especially useful when we execute forward pass in a distributed setting,
using `DataParallel`s for example.
"""
saved_layer: Dict[Module, Dict[device, Tuple[Tensor, ...]]] = defaultdict(dict)
lock = threading.Lock()
all_layers: List[Module] = [layer] if isinstance(layer, Module) else layer
# Set a forward hook on specified module and run forward pass to
# get layer output tensor(s).
# For DataParallel models, each partition adds entry to dictionary
# with key as device and value as corresponding Tensor.
def hook_wrapper(original_module):
def forward_hook(module, inp, out=None):
eval_tsrs = inp if attribute_to_layer_input else out
is_eval_tuple = isinstance(eval_tsrs, tuple)
if not is_eval_tuple:
eval_tsrs = (eval_tsrs,)
if require_layer_grads:
apply_gradient_requirements(eval_tsrs, warn=False)
with lock:
nonlocal saved_layer
# Note that cloning behaviour of `eval_tsr` is different
# when `forward_hook_with_return` is set to True. This is because
# otherwise `backward()` on the last output layer won't execute.
if forward_hook_with_return:
saved_layer[original_module][eval_tsrs[0].device] = eval_tsrs
eval_tsrs_to_return = tuple(
eval_tsr.clone() for eval_tsr in eval_tsrs
)
if not is_eval_tuple:
eval_tsrs_to_return = eval_tsrs_to_return[0]
return eval_tsrs_to_return
else:
saved_layer[original_module][eval_tsrs[0].device] = tuple(
eval_tsr.clone() for eval_tsr in eval_tsrs
)
return forward_hook
all_hooks = []
try:
for single_layer in all_layers:
if attribute_to_layer_input:
all_hooks.append(
single_layer.register_forward_pre_hook(hook_wrapper(single_layer))
)
else:
all_hooks.append(
single_layer.register_forward_hook(hook_wrapper(single_layer))
)
output = _run_forward(
forward_fn,
inputs,
target=target_ind,
additional_forward_args=additional_forward_args,
)
finally:
for hook in all_hooks:
hook.remove()
if len(saved_layer) == 0:
raise AssertionError("Forward hook did not obtain any outputs for given layer")
if forward_hook_with_return:
return saved_layer, output
return saved_layer
def _gather_distributed_tensors(
saved_layer: Dict[device, Tuple[Tensor, ...]],
device_ids: Union[None, List[int]] = None,
key_list: Union[None, List[device]] = None,
) -> Tuple[Tensor, ...]:
r"""
A helper function to concatenate intermediate layer results stored on
different devices in `saved_layer`. `saved_layer` is a dictionary that
contains `device_id` as a key and intermediate layer results (either
the input or the output of the layer) stored on the device corresponding to
the key.
`key_list` is a list of devices in appropriate ordering for concatenation
and if not provided, keys are sorted based on device ids.
If only one key exists (standard model), key list simply has one element.
"""
if key_list is None:
key_list = _sort_key_list(list(saved_layer.keys()), device_ids)
return _reduce_list([saved_layer[device_id] for device_id in key_list])
def _extract_device_ids(
forward_fn: Callable,
saved_layer: Dict[Module, Dict[device, Tuple[Tensor, ...]]],
device_ids: Union[None, List[int]],
) -> Union[None, List[int]]:
r"""
A helper function to extract device_ids from `forward_function` in case it is
provided as part of a `DataParallel` model or if is accessible from
`forward_fn`.
In case input device_ids is not None, this function returns that value.
"""
# Multiple devices / keys implies a DataParallel model, so we look for
# device IDs if given or available from forward function
# (DataParallel model object).
if (
max(len(saved_layer[single_layer]) for single_layer in saved_layer) > 1
and device_ids is None
):
if (
hasattr(forward_fn, "device_ids")
and cast(Any, forward_fn).device_ids is not None
):
device_ids = cast(Any, forward_fn).device_ids
else:
raise AssertionError(
"Layer tensors are saved on multiple devices, however unable to access"
" device ID list from the `forward_fn`. Device ID list must be"
" accessible from `forward_fn`. For example, they can be retrieved"
" if `forward_fn` is a model of type `DataParallel`. It is used"
" for identifying device batch ordering."
)
return device_ids
@typing.overload
def _forward_layer_eval_with_neuron_grads(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: Module,
additional_forward_args: Any = None,
*,
gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
grad_enabled: bool = False,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
@typing.overload
def _forward_layer_eval_with_neuron_grads(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: Module,
additional_forward_args: Any = None,
gradient_neuron_selector: None = None,
grad_enabled: bool = False,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
) -> Tuple[Tensor, ...]:
...
@typing.overload
def _forward_layer_eval_with_neuron_grads(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: List[Module],
additional_forward_args: Any = None,
gradient_neuron_selector: None = None,
grad_enabled: bool = False,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
) -> List[Tuple[Tensor, ...]]:
...
def _forward_layer_eval_with_neuron_grads(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: ModuleOrModuleList,
additional_forward_args: Any = None,
gradient_neuron_selector: Union[
None, int, Tuple[Union[int, slice], ...], Callable
] = None,
grad_enabled: bool = False,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
) -> Union[
Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]],
Tuple[Tensor, ...],
List[Tuple[Tensor, ...]],
]:
"""
This method computes forward evaluation for a particular layer using a
forward hook. If a gradient_neuron_selector is provided, then gradients with
respect to that neuron in the layer output are also returned.
These functionalities are combined due to the behavior of DataParallel models
with hooks, in which hooks are executed once per device. We need to internally
combine the separated tensors from devices by concatenating based on device_ids.
Any necessary gradients must be taken with respect to each independent batched
tensor, so the gradients are computed and combined appropriately.
More information regarding the behavior of forward hooks with DataParallel models
can be found in the PyTorch data parallel documentation. We maintain the separate
evals in a dictionary protected by a lock, analogous to the gather implementation
for the core PyTorch DataParallel implementation.
"""
grad_enabled = True if gradient_neuron_selector is not None else grad_enabled
with torch.autograd.set_grad_enabled(grad_enabled):
saved_layer = _forward_layer_distributed_eval(
forward_fn,
inputs,
layer,
additional_forward_args=additional_forward_args,
attribute_to_layer_input=attribute_to_layer_input,
)
device_ids = _extract_device_ids(forward_fn, saved_layer, device_ids)
# Identifies correct device ordering based on device ids.
# key_list is a list of devices in appropriate ordering for concatenation.
# If only one key exists (standard model), key list simply has one element.
key_list = _sort_key_list(list(next(iter(saved_layer.values())).keys()), device_ids)
if gradient_neuron_selector is not None:
assert isinstance(
layer, Module
), "Cannot compute neuron gradients for multiple layers simultaneously!"
inp_grads = _neuron_gradients(
inputs, saved_layer[layer], key_list, gradient_neuron_selector
)
return (
_gather_distributed_tensors(saved_layer[layer], key_list=key_list),
inp_grads,
)
else:
if isinstance(layer, Module):
return _gather_distributed_tensors(saved_layer[layer], key_list=key_list)
else:
return [
_gather_distributed_tensors(saved_layer[curr_layer], key_list=key_list)
for curr_layer in layer
]
@typing.overload
def compute_layer_gradients_and_eval(
forward_fn: Callable,
layer: Module,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
*,
gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
output_fn: Union[None, Callable] = None,
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
@typing.overload
def compute_layer_gradients_and_eval(
forward_fn: Callable,
layer: List[Module],
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
gradient_neuron_selector: None = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
output_fn: Union[None, Callable] = None,
) -> Tuple[List[Tuple[Tensor, ...]], List[Tuple[Tensor, ...]]]:
...
@typing.overload
def compute_layer_gradients_and_eval(
forward_fn: Callable,
layer: Module,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
gradient_neuron_selector: None = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
output_fn: Union[None, Callable] = None,
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
def compute_layer_gradients_and_eval(
forward_fn: Callable,
layer: ModuleOrModuleList,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
gradient_neuron_selector: Union[
None, int, Tuple[Union[int, slice], ...], Callable
] = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
output_fn: Union[None, Callable] = None,
) -> Union[
Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]],
Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...], Tuple[Tensor, ...]],
Tuple[List[Tuple[Tensor, ...]], List[Tuple[Tensor, ...]]],
]:
r"""
Computes gradients of the output with respect to a given layer as well
as the output evaluation of the layer for an arbitrary forward function
and given input.
For data parallel models, hooks are executed once per device ,so we
need to internally combine the separated tensors from devices by
concatenating based on device_ids. Any necessary gradients must be taken
with respect to each independent batched tensor, so the gradients are
computed and combined appropriately.
More information regarding the behavior of forward hooks with DataParallel
models can be found in the PyTorch data parallel documentation. We maintain
the separate inputs in a dictionary protected by a lock, analogous to the
gather implementation for the core PyTorch DataParallel implementation.
NOTE: To properly handle inplace operations, a clone of the layer output
is stored. This structure inhibits execution of a backward hook on the last
module for the layer output when computing the gradient with respect to
the input, since we store an intermediate clone, as
opposed to the true module output. If backward module hooks are necessary
for the final module when computing input gradients, utilize
_forward_layer_eval_with_neuron_grads instead.
Args:
forward_fn: forward function. This can be for example model's
forward function.
layer: Layer for which gradients / output will be evaluated.
inputs: Input at which gradients are evaluated,
will be passed to forward_fn.
target_ind: Index of the target class for which gradients
must be computed (classification only).
output_fn: An optional function that is applied to the layer inputs or
outputs depending whether the `attribute_to_layer_input` is
set to `True` or `False`
args: Additional input arguments that forward function requires.
It takes an empty tuple (no additional arguments) if no
additional arguments are required
Returns:
tuple[**gradients**, **evals**]:
- **gradients**:
Gradients of output with respect to target layer output.
- **evals**:
Target layer output for given input.
"""
with torch.autograd.set_grad_enabled(True):
# saved_layer is a dictionary mapping device to a tuple of
# layer evaluations on that device.
saved_layer, output = _forward_layer_distributed_eval(
forward_fn,
inputs,
layer,
target_ind=target_ind,
additional_forward_args=additional_forward_args,
attribute_to_layer_input=attribute_to_layer_input,
forward_hook_with_return=True,
require_layer_grads=True,
)
assert output[0].numel() == 1, (
"Target not provided when necessary, cannot"
" take gradient with respect to multiple outputs."
)
device_ids = _extract_device_ids(forward_fn, saved_layer, device_ids)
# Identifies correct device ordering based on device ids.
# key_list is a list of devices in appropriate ordering for concatenation.
# If only one key exists (standard model), key list simply has one element.
key_list = _sort_key_list(
list(next(iter(saved_layer.values())).keys()), device_ids
)
all_outputs: Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]
if isinstance(layer, Module):
all_outputs = _reduce_list(
[
saved_layer[layer][device_id]
if output_fn is None
else output_fn(saved_layer[layer][device_id])
for device_id in key_list
]
)
else:
all_outputs = [
_reduce_list(
[
saved_layer[single_layer][device_id]
if output_fn is None
else output_fn(saved_layer[single_layer][device_id])
for device_id in key_list
]
)
for single_layer in layer
]
all_layers: List[Module] = [layer] if isinstance(layer, Module) else layer
grad_inputs = tuple(
layer_tensor
for single_layer in all_layers
for device_id in key_list
for layer_tensor in saved_layer[single_layer][device_id]
)
saved_grads = torch.autograd.grad(torch.unbind(output), grad_inputs)
offset = 0
all_grads: List[Tuple[Tensor, ...]] = []
for single_layer in all_layers:
num_tensors = len(next(iter(saved_layer[single_layer].values())))
curr_saved_grads = [
saved_grads[i : i + num_tensors]
for i in range(
offset, offset + len(key_list) * num_tensors, num_tensors
)
]
offset += len(key_list) * num_tensors
if output_fn is not None:
curr_saved_grads = [
output_fn(curr_saved_grad) for curr_saved_grad in curr_saved_grads
]
all_grads.append(_reduce_list(curr_saved_grads))
layer_grads: Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]
layer_grads = all_grads
if isinstance(layer, Module):
layer_grads = all_grads[0]
if gradient_neuron_selector is not None:
assert isinstance(
layer, Module
), "Cannot compute neuron gradients for multiple layers simultaneously!"
inp_grads = _neuron_gradients(
inputs, saved_layer[layer], key_list, gradient_neuron_selector
)
return (
cast(Tuple[Tensor, ...], layer_grads),
cast(Tuple[Tensor, ...], all_outputs),
inp_grads,
)
return layer_grads, all_outputs # type: ignore
def construct_neuron_grad_fn(
layer: Module,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
device_ids: Union[None, List[int]] = None,
attribute_to_neuron_input: bool = False,
) -> Callable:
def grad_fn(
forward_fn: Callable,
inputs: TensorOrTupleOfTensorsGeneric,
target_ind: TargetType = None,
additional_forward_args: Any = None,
) -> Tuple[Tensor, ...]:
_, grads = _forward_layer_eval_with_neuron_grads(
forward_fn,
inputs,
layer,
additional_forward_args,
gradient_neuron_selector=neuron_selector,
device_ids=device_ids,
attribute_to_layer_input=attribute_to_neuron_input,
)
return grads
return grad_fn
def _extract_parameters_from_layers(layer_modules):
layer_parameters = []
if layer_modules is not None:
layer_parameters = [
parameter
for layer_module in layer_modules
for parameter in layer_module.parameters()
]
assert (
len(layer_parameters) > 0
), "No parameters are available for modules for provided input `layers`"
return layer_parameters
def _compute_jacobian_wrt_params(
model: Module,
inputs: Tuple[Any, ...],
labels: Optional[Tensor] = None,
loss_fn: Optional[Union[Module, Callable]] = None,
layer_modules: List[Module] = None,
) -> Tuple[Tensor, ...]:
r"""
Computes the Jacobian of a batch of test examples given a model, and optional
loss function and target labels. This method is equivalent to calculating the
gradient for every individual example in the minibatch.
Args:
model (torch.nn.Module): The trainable model providing the forward pass
inputs (tuple[Any, ...]): The minibatch for which the forward pass is computed.
It is unpacked before passing to `model`, so it must be a tuple. The
individual elements of `inputs` can be anything.
labels (Tensor, optional): Labels for input if computing a loss function.
loss_fn (torch.nn.Module or Callable, optional): The loss function. If a library
defined loss function is provided, it would be expected to be a
torch.nn.Module. If a custom loss is provided, it can be either type,
but must behave as a library loss function would if `reduction='none'`.
layer_modules (List[torch.nn.Module], optional): A list of PyTorch modules
w.r.t. which jacobian gradients are computed.
Returns:
grads (tuple[Tensor, ...]): Returns the Jacobian for the minibatch as a
tuple of gradients corresponding to the tuple of trainable parameters
returned by `model.parameters()`. Each object grads[i] references to the
gradients for the parameters in the i-th trainable layer of the model.
Each grads[i] object is a tensor with the gradients for the `inputs`
batch. For example, grads[i][j] would reference the gradients for the
parameters of the i-th layer, for the j-th member of the minibatch.
"""
with torch.autograd.set_grad_enabled(True):
out = model(*inputs)
assert out.dim() != 0, "Please ensure model output has at least one dimension."
if labels is not None and loss_fn is not None:
loss = loss_fn(out, labels)
if hasattr(loss_fn, "reduction"):
msg0 = "Please ensure loss_fn.reduction is set to `none`"
assert loss_fn.reduction == "none", msg0 # type: ignore
else:
msg1 = (
"Loss function is applying a reduction. Please ensure "
f"Output shape: {out.shape} and Loss shape: {loss.shape} "
"are matching."
)
assert loss.dim() != 0, msg1
assert out.shape[0] == loss.shape[0], msg1
out = loss
if layer_modules is not None:
layer_parameters = _extract_parameters_from_layers(layer_modules)
grads_list = [
torch.autograd.grad(
outputs=out[i],
inputs=cast(
Union[Tensor, Sequence[Tensor]],
model.parameters() if layer_modules is None else layer_parameters,
),
grad_outputs=torch.ones_like(out[i]),
retain_graph=True,
)
for i in range(out.shape[0])
]
grads = tuple([torch.stack(x) for x in zip(*grads_list)])
return tuple(grads)
def _compute_jacobian_wrt_params_with_sample_wise_trick(
model: Module,
inputs: Tuple[Any, ...],
labels: Optional[Tensor] = None,
loss_fn: Optional[Union[Module, Callable]] = None,
reduction_type: Optional[str] = "sum",
layer_modules: List[Module] = None,
) -> Tuple[Any, ...]:
r"""
Computes the Jacobian of a batch of test examples given a model, and optional
loss function and target labels. This method uses sample-wise gradients per
batch trick to fully vectorize the Jacobian calculation. Currently, only
linear and conv2d layers are supported.
User must `add_hooks(model)` before calling this function.
Args:
model (torch.nn.Module): The trainable model providing the forward pass
inputs (tuple[Any, ...]): The minibatch for which the forward pass is computed.
It is unpacked before passing to `model`, so it must be a tuple. The
individual elements of `inputs` can be anything.
labels (Tensor, optional): Labels for input if computing a loss function.
loss_fn (torch.nn.Module or Callable, optional): The loss function. If a library
defined loss function is provided, it would be expected to be a
torch.nn.Module. If a custom loss is provided, it can be either type,
but must behave as a library loss function would if `reduction='sum'` or
`reduction='mean'`.
reduction_type (str, optional): The type of reduction applied. If a loss_fn is
passed, this should match `loss_fn.reduction`. Else if gradients are
being computed on direct model outputs (scores), then 'sum' should be
used.
Defaults to 'sum'.
layer_modules (torch.nn.Module, optional): A list of PyTorch modules w.r.t.
which jacobian gradients are computed.
Returns:
grads (tuple[Tensor, ...]): Returns the Jacobian for the minibatch as a
tuple of gradients corresponding to the tuple of trainable parameters
returned by `model.parameters()`. Each object grads[i] references to the
gradients for the parameters in the i-th trainable layer of the model.
Each grads[i] object is a tensor with the gradients for the `inputs`
batch. For example, grads[i][j] would reference the gradients for the
parameters of the i-th layer, for the j-th member of the minibatch.
"""
with torch.autograd.set_grad_enabled(True):
inputs = tuple(inp.clone() for inp in inputs)
apply_gradient_requirements(inputs)
sample_grad_wrapper = SampleGradientWrapper(model, layer_modules)
try:
sample_grad_wrapper.add_hooks()
out = model(*inputs)
assert (
out.dim() != 0
), "Please ensure model output has at least one dimension."
if labels is not None and loss_fn is not None:
loss = loss_fn(out, labels)
# TODO: allow loss_fn to be Callable
if (isinstance(loss_fn, Module) or callable(loss_fn)) and hasattr(
loss_fn, "reduction"
):
reduction = loss_fn.reduction # type: ignore
msg0 = (
"Please ensure that loss_fn.reduction is set to `sum` or `mean`"
)
assert reduction != "none", msg0
msg1 = (
f"loss_fn.reduction ({reduction}) does not match"
f"reduction type ({reduction_type}). Please ensure they are"
" matching."
)
assert reduction == reduction_type, msg1
msg2 = (
"Please ensure custom loss function is applying either a "
"sum or mean reduction."
)
assert out.shape != loss.shape, msg2
if reduction_type != "sum" and reduction_type != "mean":
raise ValueError(
f"{reduction_type} is not a valid value for reduction_type. "
"Must be either 'sum' or 'mean'."
)
out = loss
sample_grad_wrapper.compute_param_sample_gradients(
out, loss_mode=reduction_type
)
if layer_modules is not None:
layer_parameters = _extract_parameters_from_layers(layer_modules)
grads = tuple(
param.sample_grad # type: ignore
for param in (
model.parameters() if layer_modules is None else layer_parameters
)
if hasattr(param, "sample_grad")
)
finally:
sample_grad_wrapper.remove_hooks()
return grads
|
from collections import defaultdict
from enum import Enum
from typing import cast, DefaultDict, Iterable, List, Optional, Tuple, Union
import torch
from captum._utils.common import _format_tensor_into_tuples, _register_backward_hook
from torch import Tensor
from torch.nn import Module
def _reset_sample_grads(module: Module) -> None:
module.weight.sample_grad = 0 # type: ignore
if module.bias is not None:
module.bias.sample_grad = 0 # type: ignore
def linear_param_grads(
module: Module, activation: Tensor, gradient_out: Tensor, reset: bool = False
) -> None:
r"""
Computes parameter gradients per sample for nn.Linear module, given module
input activations and output gradients.
Gradients are accumulated in the sample_grad attribute of each parameter
(weight and bias). If reset = True, any current sample_grad values are reset,
otherwise computed gradients are accumulated and added to the existing
stored gradients.
Inputs with more than 2 dimensions are only supported with torch 1.8 or later
"""
if reset:
_reset_sample_grads(module)
module.weight.sample_grad += torch.einsum( # type: ignore
"n...i,n...j->nij", gradient_out, activation
)
if module.bias is not None:
module.bias.sample_grad += torch.einsum( # type: ignore
"n...i->ni", gradient_out
)
def conv2d_param_grads(
module: Module, activation: Tensor, gradient_out: Tensor, reset: bool = False
) -> None:
r"""
Computes parameter gradients per sample for nn.Conv2d module, given module
input activations and output gradients.
nn.Conv2d modules with padding set to a string option ('same' or 'valid') are
currently unsupported.
Gradients are accumulated in the sample_grad attribute of each parameter
(weight and bias). If reset = True, any current sample_grad values are reset,
otherwise computed gradients are accumulated and added to the existing
stored gradients.
"""
if reset:
_reset_sample_grads(module)
batch_size = cast(int, activation.shape[0])
unfolded_act = torch.nn.functional.unfold(
activation,
cast(Union[int, Tuple[int, ...]], module.kernel_size),
dilation=cast(Union[int, Tuple[int, ...]], module.dilation),
padding=cast(Union[int, Tuple[int, ...]], module.padding),
stride=cast(Union[int, Tuple[int, ...]], module.stride),
)
reshaped_grad = gradient_out.reshape(batch_size, -1, unfolded_act.shape[-1])
grad1 = torch.einsum("ijk,ilk->ijl", reshaped_grad, unfolded_act)
shape = [batch_size] + list(cast(Iterable[int], module.weight.shape))
module.weight.sample_grad += grad1.reshape(shape) # type: ignore
if module.bias is not None:
module.bias.sample_grad += torch.sum(reshaped_grad, dim=2) # type: ignore
SUPPORTED_MODULES = {
torch.nn.Conv2d: conv2d_param_grads,
torch.nn.Linear: linear_param_grads,
}
class LossMode(Enum):
SUM = 0
MEAN = 1
class SampleGradientWrapper:
r"""
Wrapper which allows computing sample-wise gradients in a single backward pass.
This is accomplished by adding hooks to capture activations and output
gradients for supported modules, and using these activations and gradients
to compute the parameter gradients per-sample.
Currently, only nn.Linear and nn.Conv2d modules are supported.
Similar reference implementations of sample-based gradients include:
- https://github.com/cybertronai/autograd-hacks
- https://github.com/pytorch/opacus/tree/main/opacus/grad_sample
"""
def __init__(self, model, layer_modules=None) -> None:
self.model = model
self.hooks_added = False
self.activation_dict: DefaultDict[Module, List[Tensor]] = defaultdict(list)
self.gradient_dict: DefaultDict[Module, List[Tensor]] = defaultdict(list)
self.forward_hooks: List[torch.utils.hooks.RemovableHandle] = []
self.backward_hooks: List[torch.utils.hooks.RemovableHandle] = []
self.layer_modules: Optional[List[Module]] = layer_modules
def add_hooks(self) -> None:
self.hooks_added = True
self.model.apply(self._register_module_hooks)
def _register_module_hooks(self, module: torch.nn.Module) -> None:
if (self.layer_modules is None or module in self.layer_modules) and isinstance(
module, tuple(SUPPORTED_MODULES.keys())
):
self.forward_hooks.append(
module.register_forward_hook(self._forward_hook_fn)
)
self.backward_hooks.extend(
_register_backward_hook(module, self._backward_hook_fn, None)
)
def _forward_hook_fn(
self,
module: Module,
module_input: Union[Tensor, Tuple[Tensor, ...]],
module_output: Union[Tensor, Tuple[Tensor, ...]],
) -> None:
inp_tuple = _format_tensor_into_tuples(module_input)
self.activation_dict[module].append(inp_tuple[0].clone().detach())
def _backward_hook_fn(
self,
module: Module,
grad_input: Union[Tensor, Tuple[Tensor, ...]],
grad_output: Union[Tensor, Tuple[Tensor, ...]],
) -> None:
grad_output_tuple = _format_tensor_into_tuples(grad_output)
self.gradient_dict[module].append(grad_output_tuple[0].clone().detach())
def remove_hooks(self) -> None:
self.hooks_added = False
for hook in self.forward_hooks:
hook.remove()
for hook in self.backward_hooks:
hook.remove()
self.forward_hooks = []
self.backward_hooks = []
def _reset(self) -> None:
self.activation_dict = defaultdict(list)
self.gradient_dict = defaultdict(list)
def compute_param_sample_gradients(self, loss_blob, loss_mode="mean") -> None:
assert (
loss_mode.upper() in LossMode.__members__
), f"Provided loss mode {loss_mode} is not valid"
mode = LossMode[loss_mode.upper()]
self.model.zero_grad()
loss_blob.backward(gradient=torch.ones_like(loss_blob))
for module in self.gradient_dict:
sample_grad_fn = SUPPORTED_MODULES[type(module)]
activations = self.activation_dict[module]
gradients = self.gradient_dict[module]
assert len(activations) == len(gradients), (
"Number of saved activations do not match number of saved gradients."
" This may occur if multiple forward passes are run without calling"
" reset or computing param gradients."
)
# Reversing grads since when a module is used multiple times,
# the activations will be aligned with the reverse order of the gradients,
# since the order is reversed in backprop.
for i, (act, grad) in enumerate(
zip(activations, list(reversed(gradients)))
):
mult = 1 if mode is LossMode.SUM else act.shape[0]
sample_grad_fn(module, act, grad * mult, reset=(i == 0))
self._reset()
|
#!/usr/bin/env python3
import typing
from enum import Enum
from functools import reduce
from inspect import signature
from typing import Any, Callable, cast, Dict, List, overload, Tuple, Union
import numpy as np
import torch
from captum._utils.typing import (
BaselineType,
Literal,
TargetType,
TensorOrTupleOfTensorsGeneric,
TupleOrTensorOrBoolGeneric,
)
from torch import device, Tensor
from torch.nn import Module
def _parse_version(v: str) -> Tuple[int, ...]:
"""
Parse version strings into tuples for comparison.
Versions should be in the form of "<major>.<minor>.<patch>", "<major>.<minor>",
or "<major>". The "dev", "post" and other letter portions of the given version will
be ignored.
Args:
v (str): A version string.
Returns:
version_tuple (tuple[int]): A tuple of integer values to use for version
comparison.
"""
v = [n for n in v.split(".") if n.isdigit()]
assert v != []
return tuple(map(int, v))
class ExpansionTypes(Enum):
repeat = 1
repeat_interleave = 2
def safe_div(
numerator: Tensor,
denom: Union[Tensor, int, float],
default_denom: Union[Tensor, int, float] = 1.0,
) -> Tensor:
r"""
A simple utility function to perform `numerator / denom`
if the statement is undefined => result will be `numerator / default_denorm`
"""
if isinstance(denom, (int, float)):
return numerator / (denom if denom != 0 else default_denom)
# convert default_denom to tensor if it is float
if not torch.is_tensor(default_denom):
default_denom = torch.tensor(
default_denom, dtype=denom.dtype, device=denom.device
)
return numerator / torch.where(denom != 0, denom, default_denom)
@typing.overload
def _is_tuple(inputs: Tensor) -> Literal[False]:
...
@typing.overload
def _is_tuple(inputs: Tuple[Tensor, ...]) -> Literal[True]:
...
def _is_tuple(inputs: Union[Tensor, Tuple[Tensor, ...]]) -> bool:
return isinstance(inputs, tuple)
def _validate_target(num_samples: int, target: TargetType) -> None:
if isinstance(target, list) or (
isinstance(target, torch.Tensor) and torch.numel(target) > 1
):
assert num_samples == len(target), (
"The number of samples provied in the"
"input {} does not match with the number of targets. {}".format(
num_samples, len(target)
)
)
def _validate_input(
inputs: Tuple[Tensor, ...],
baselines: Tuple[Union[Tensor, int, float], ...],
draw_baseline_from_distrib: bool = False,
) -> None:
assert len(inputs) == len(baselines), (
"Input and baseline must have the same "
"dimensions, baseline has {} features whereas input has {}.".format(
len(baselines), len(inputs)
)
)
for input, baseline in zip(inputs, baselines):
if draw_baseline_from_distrib:
assert (
isinstance(baseline, (int, float))
or input.shape[1:] == baseline.shape[1:]
), (
"The samples in input and baseline batches must have"
" the same shape or the baseline corresponding to the"
" input tensor must be a scalar."
" Found baseline: {} and input: {} ".format(baseline, input)
)
else:
assert (
isinstance(baseline, (int, float))
or input.shape == baseline.shape
or baseline.shape[0] == 1
), (
"Baseline can be provided as a tensor for just one input and"
" broadcasted to the batch or input and baseline must have the"
" same shape or the baseline corresponding to each input tensor"
" must be a scalar. Found baseline: {} and input: {}".format(
baseline, input
)
)
def _zeros(inputs: Tuple[Tensor, ...]) -> Tuple[int, ...]:
r"""
Takes a tuple of tensors as input and returns a tuple that has the same
length as `inputs` with each element as the integer 0.
"""
return tuple(0 if input.dtype is not torch.bool else False for input in inputs)
def _format_baseline(
baselines: BaselineType, inputs: Tuple[Tensor, ...]
) -> Tuple[Union[Tensor, int, float], ...]:
if baselines is None:
return _zeros(inputs)
if not isinstance(baselines, tuple):
baselines = (baselines,)
for baseline in baselines:
assert isinstance(
baseline, (torch.Tensor, int, float)
), "baseline input argument must be either a torch.Tensor or a number \
however {} detected".format(
type(baseline)
)
return baselines
def _format_feature_mask(
feature_mask: Union[None, Tensor, Tuple[Tensor, ...]],
inputs: Tuple[Tensor, ...],
) -> Tuple[Tensor, ...]:
"""
Format a feature mask into a tuple of tensors.
The `inputs` should be correctly formatted first
If `feature_mask` is None, assign each non-batch dimension with a consecutive
integer from 0.
If `feature_mask` is a tensor, wrap it in a tuple.
"""
if feature_mask is None:
formatted_mask = []
current_num_features = 0
for inp in inputs:
# the following can handle empty tensor where numel is 0
# empty tensor will be added to the feature mask
num_features = torch.numel(inp[0:1])
formatted_mask.append(
current_num_features
+ torch.reshape(
torch.arange(num_features, device=inp.device),
inp[0:1].shape,
)
)
current_num_features += num_features
formatted_mask = tuple(formatted_mask)
else:
formatted_mask = _format_tensor_into_tuples(feature_mask)
return formatted_mask
@overload
def _format_tensor_into_tuples(inputs: None) -> None:
...
@overload
def _format_tensor_into_tuples(
inputs: Union[Tensor, Tuple[Tensor, ...]]
) -> Tuple[Tensor, ...]:
...
def _format_tensor_into_tuples(
inputs: Union[None, Tensor, Tuple[Tensor, ...]]
) -> Union[None, Tuple[Tensor, ...]]:
if inputs is None:
return None
if not isinstance(inputs, tuple):
assert isinstance(inputs, torch.Tensor), (
"`inputs` must be a torch.Tensor or a tuple[torch.Tensor] "
f"but found: {type(inputs)}"
)
inputs = (inputs,)
return inputs
def _format_inputs(inputs: Any, unpack_inputs: bool = True) -> Any:
return (
inputs
if (isinstance(inputs, tuple) or isinstance(inputs, list)) and unpack_inputs
else (inputs,)
)
def _format_float_or_tensor_into_tuples(
inputs: Union[float, Tensor, Tuple[Union[float, Tensor], ...]]
) -> Tuple[Union[float, Tensor], ...]:
if not isinstance(inputs, tuple):
assert isinstance(
inputs, (torch.Tensor, float)
), "`inputs` must have type float or torch.Tensor but {} found: ".format(
type(inputs)
)
inputs = (inputs,)
return inputs
@overload
def _format_additional_forward_args(additional_forward_args: None) -> None:
...
@overload
def _format_additional_forward_args(
additional_forward_args: Union[Tensor, Tuple]
) -> Tuple:
...
@overload
def _format_additional_forward_args(additional_forward_args: Any) -> Union[None, Tuple]:
...
def _format_additional_forward_args(additional_forward_args: Any) -> Union[None, Tuple]:
if additional_forward_args is not None and not isinstance(
additional_forward_args, tuple
):
additional_forward_args = (additional_forward_args,)
return additional_forward_args
def _expand_additional_forward_args(
additional_forward_args: Any,
n_steps: int,
expansion_type: ExpansionTypes = ExpansionTypes.repeat,
) -> Union[None, Tuple]:
def _expand_tensor_forward_arg(
additional_forward_arg: Tensor,
n_steps: int,
expansion_type: ExpansionTypes = ExpansionTypes.repeat,
) -> Tensor:
if len(additional_forward_arg.size()) == 0:
return additional_forward_arg
if expansion_type == ExpansionTypes.repeat:
return torch.cat([additional_forward_arg] * n_steps, dim=0)
elif expansion_type == ExpansionTypes.repeat_interleave:
return additional_forward_arg.repeat_interleave(n_steps, dim=0)
else:
raise NotImplementedError(
"Currently only `repeat` and `repeat_interleave`"
" expansion_types are supported"
)
if additional_forward_args is None:
return None
return tuple(
_expand_tensor_forward_arg(additional_forward_arg, n_steps, expansion_type)
if isinstance(additional_forward_arg, torch.Tensor)
else additional_forward_arg
for additional_forward_arg in additional_forward_args
)
def _expand_target(
target: TargetType,
n_steps: int,
expansion_type: ExpansionTypes = ExpansionTypes.repeat,
) -> TargetType:
if isinstance(target, list):
if expansion_type == ExpansionTypes.repeat:
return target * n_steps
elif expansion_type == ExpansionTypes.repeat_interleave:
expanded_target = []
for i in target:
expanded_target.extend([i] * n_steps)
return cast(Union[List[Tuple[int, ...]], List[int]], expanded_target)
else:
raise NotImplementedError(
"Currently only `repeat` and `repeat_interleave`"
" expansion_types are supported"
)
elif isinstance(target, torch.Tensor) and torch.numel(target) > 1:
if expansion_type == ExpansionTypes.repeat:
return torch.cat([target] * n_steps, dim=0)
elif expansion_type == ExpansionTypes.repeat_interleave:
return target.repeat_interleave(n_steps, dim=0)
else:
raise NotImplementedError(
"Currently only `repeat` and `repeat_interleave`"
" expansion_types are supported"
)
return target
def _expand_feature_mask(
feature_mask: Union[Tensor, Tuple[Tensor, ...]], n_samples: int
):
is_feature_mask_tuple = _is_tuple(feature_mask)
feature_mask = _format_tensor_into_tuples(feature_mask)
feature_mask_new = tuple(
feature_mask_elem.repeat_interleave(n_samples, dim=0)
if feature_mask_elem.size(0) > 1
else feature_mask_elem
for feature_mask_elem in feature_mask
)
return _format_output(is_feature_mask_tuple, feature_mask_new)
def _expand_and_update_baselines(
inputs: Tuple[Tensor, ...],
n_samples: int,
kwargs: dict,
draw_baseline_from_distrib: bool = False,
):
def get_random_baseline_indices(bsz, baseline):
num_ref_samples = baseline.shape[0]
return np.random.choice(num_ref_samples, n_samples * bsz).tolist()
# expand baselines to match the sizes of input
if "baselines" not in kwargs:
return
baselines = kwargs["baselines"]
baselines = _format_baseline(baselines, inputs)
_validate_input(
inputs, baselines, draw_baseline_from_distrib=draw_baseline_from_distrib
)
if draw_baseline_from_distrib:
bsz = inputs[0].shape[0]
baselines = tuple(
baseline[get_random_baseline_indices(bsz, baseline)]
if isinstance(baseline, torch.Tensor)
else baseline
for baseline in baselines
)
else:
baselines = tuple(
baseline.repeat_interleave(n_samples, dim=0)
if isinstance(baseline, torch.Tensor)
and baseline.shape[0] == input.shape[0]
and baseline.shape[0] > 1
else baseline
for input, baseline in zip(inputs, baselines)
)
# update kwargs with expanded baseline
kwargs["baselines"] = baselines
def _expand_and_update_additional_forward_args(n_samples: int, kwargs: dict):
if "additional_forward_args" not in kwargs:
return
additional_forward_args = kwargs["additional_forward_args"]
additional_forward_args = _format_additional_forward_args(additional_forward_args)
if additional_forward_args is None:
return
additional_forward_args = _expand_additional_forward_args(
additional_forward_args,
n_samples,
expansion_type=ExpansionTypes.repeat_interleave,
)
# update kwargs with expanded baseline
kwargs["additional_forward_args"] = additional_forward_args
def _expand_and_update_target(n_samples: int, kwargs: dict):
if "target" not in kwargs:
return
target = kwargs["target"]
target = _expand_target(
target, n_samples, expansion_type=ExpansionTypes.repeat_interleave
)
# update kwargs with expanded baseline
kwargs["target"] = target
def _expand_and_update_feature_mask(n_samples: int, kwargs: dict):
if "feature_mask" not in kwargs:
return
feature_mask = kwargs["feature_mask"]
if feature_mask is None:
return
feature_mask = _expand_feature_mask(feature_mask, n_samples)
kwargs["feature_mask"] = feature_mask
@typing.overload
def _format_output(
is_inputs_tuple: Literal[True], output: Tuple[Tensor, ...]
) -> Tuple[Tensor, ...]:
...
@typing.overload
def _format_output(
is_inputs_tuple: Literal[False], output: Tuple[Tensor, ...]
) -> Tensor:
...
@typing.overload
def _format_output(
is_inputs_tuple: bool, output: Tuple[Tensor, ...]
) -> Union[Tensor, Tuple[Tensor, ...]]:
...
def _format_output(
is_inputs_tuple: bool, output: Tuple[Tensor, ...]
) -> Union[Tensor, Tuple[Tensor, ...]]:
r"""
In case input is a tensor and the output is returned in form of a
tuple we take the first element of the output's tuple to match the
same shape signatues of the inputs
"""
assert isinstance(output, tuple), "Output must be in shape of a tuple"
assert is_inputs_tuple or len(output) == 1, (
"The input is a single tensor however the output isn't."
"The number of output tensors is: {}".format(len(output))
)
return output if is_inputs_tuple else output[0]
@typing.overload
def _format_outputs(
is_multiple_inputs: Literal[False], outputs: List[Tuple[Tensor, ...]]
) -> Union[Tensor, Tuple[Tensor, ...]]:
...
@typing.overload
def _format_outputs(
is_multiple_inputs: Literal[True], outputs: List[Tuple[Tensor, ...]]
) -> List[Union[Tensor, Tuple[Tensor, ...]]]:
...
@typing.overload
def _format_outputs(
is_multiple_inputs: bool, outputs: List[Tuple[Tensor, ...]]
) -> Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]]:
...
def _format_outputs(
is_multiple_inputs: bool, outputs: List[Tuple[Tensor, ...]]
) -> Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]]:
assert isinstance(outputs, list), "Outputs must be a list"
assert is_multiple_inputs or len(outputs) == 1, (
"outputs should contain multiple inputs or have a single output"
f"however the number of outputs is: {len(outputs)}"
)
return (
[_format_output(len(output) > 1, output) for output in outputs]
if is_multiple_inputs
else _format_output(len(outputs[0]) > 1, outputs[0])
)
def _run_forward(
forward_func: Callable,
inputs: Any,
target: TargetType = None,
additional_forward_args: Any = None,
) -> Tensor:
forward_func_args = signature(forward_func).parameters
if len(forward_func_args) == 0:
output = forward_func()
return output if target is None else _select_targets(output, target)
# make everything a tuple so that it is easy to unpack without
# using if-statements
inputs = _format_inputs(inputs)
additional_forward_args = _format_additional_forward_args(additional_forward_args)
output = forward_func(
*(*inputs, *additional_forward_args)
if additional_forward_args is not None
else inputs
)
return _select_targets(output, target)
def _select_targets(output: Tensor, target: TargetType) -> Tensor:
if target is None:
return output
num_examples = output.shape[0]
dims = len(output.shape)
device = output.device
if isinstance(target, (int, tuple)):
return _verify_select_column(output, target)
elif isinstance(target, torch.Tensor):
if torch.numel(target) == 1 and isinstance(target.item(), int):
return _verify_select_column(output, cast(int, target.item()))
elif len(target.shape) == 1 and torch.numel(target) == num_examples:
assert dims == 2, "Output must be 2D to select tensor of targets."
return torch.gather(output, 1, target.reshape(len(output), 1))
else:
raise AssertionError(
"Tensor target dimension %r is not valid. %r"
% (target.shape, output.shape)
)
elif isinstance(target, list):
assert len(target) == num_examples, "Target list length does not match output!"
if isinstance(target[0], int):
assert dims == 2, "Output must be 2D to select tensor of targets."
return torch.gather(
output, 1, torch.tensor(target, device=device).reshape(len(output), 1)
)
elif isinstance(target[0], tuple):
return torch.stack(
[
output[(i,) + cast(Tuple, targ_elem)]
for i, targ_elem in enumerate(target)
]
)
else:
raise AssertionError(
f"Target element type {type(target[0])} in list is not valid."
)
else:
raise AssertionError(f"Target type {type(target)} is not valid.")
def _contains_slice(target: Union[int, Tuple[Union[int, slice], ...]]) -> bool:
if isinstance(target, tuple):
for index in target:
if isinstance(index, slice):
return True
return False
return isinstance(target, slice)
def _verify_select_column(
output: Tensor, target: Union[int, Tuple[Union[int, slice], ...]]
) -> Tensor:
target = (target,) if isinstance(target, int) else target
assert (
len(target) <= len(output.shape) - 1
), "Cannot choose target column with output shape %r." % (output.shape,)
return output[(slice(None), *target)]
def _verify_select_neuron(
layer_output: Tuple[Tensor, ...],
selector: Union[int, Tuple[Union[int, slice], ...], Callable],
) -> Tensor:
if callable(selector):
return selector(layer_output if len(layer_output) > 1 else layer_output[0])
assert len(layer_output) == 1, (
"Cannot select neuron index from layer with multiple tensors,"
"consider providing a neuron selector function instead."
)
selected_neurons = _verify_select_column(layer_output[0], selector)
if _contains_slice(selector):
return selected_neurons.reshape(selected_neurons.shape[0], -1).sum(1)
return selected_neurons
def _extract_device(
module: Module,
hook_inputs: Union[None, Tensor, Tuple[Tensor, ...]],
hook_outputs: Union[None, Tensor, Tuple[Tensor, ...]],
) -> device:
params = list(module.parameters())
if (
(hook_inputs is None or len(hook_inputs) == 0)
and (hook_outputs is None or len(hook_outputs) == 0)
and len(params) == 0
):
raise RuntimeError(
"""Unable to extract device information for the module
{}. Both inputs and outputs to the forward hook and
`module.parameters()` are empty.
The reason that the inputs to the forward hook are empty
could be due to the fact that the arguments to that
module {} are all named and are passed as named
variables to its forward function.
""".format(
module, module
)
)
if hook_inputs is not None and len(hook_inputs) > 0:
return hook_inputs[0].device
if hook_outputs is not None and len(hook_outputs) > 0:
return hook_outputs[0].device
return params[0].device
def _reduce_list(
val_list: List[TupleOrTensorOrBoolGeneric],
red_func: Callable[[List], Any] = torch.cat,
) -> TupleOrTensorOrBoolGeneric:
"""
Applies reduction function to given list. If each element in the list is
a Tensor, applies reduction function to all elements of the list, and returns
the output Tensor / value. If each element is a boolean, apply any method (or).
If each element is a tuple, applies reduction
function to corresponding elements of each tuple in the list, and returns
tuple of reduction function outputs with length matching the length of tuple
val_list[0]. It is assumed that all tuples in the list have the same length
and red_func can be applied to all elements in each corresponding position.
"""
assert len(val_list) > 0, "Cannot reduce empty list!"
if isinstance(val_list[0], torch.Tensor):
first_device = val_list[0].device
return red_func([elem.to(first_device) for elem in val_list])
elif isinstance(val_list[0], bool):
return any(val_list)
elif isinstance(val_list[0], tuple):
final_out = []
for i in range(len(val_list[0])):
final_out.append(
_reduce_list([val_elem[i] for val_elem in val_list], red_func)
)
else:
raise AssertionError(
"Elements to be reduced can only be"
"either Tensors or tuples containing Tensors."
)
return tuple(final_out)
def _sort_key_list(
keys: List[device], device_ids: Union[None, List[int]] = None
) -> List[device]:
"""
Sorts list of torch devices (keys) by given index list, device_ids. If keys
contains only one device, then the list is returned unchanged. If keys
contains a device for which the id is not contained in device_ids, then
an error is returned. This method is used to identify the order of DataParallel
batched devices, given the device ID ordering.
"""
if len(keys) == 1:
return keys
id_dict: Dict[int, device] = {}
assert device_ids is not None, "Device IDs must be provided with multiple devices."
for key in keys:
if key.index in id_dict:
raise AssertionError("Duplicate CUDA Device ID identified in device list.")
id_dict[key.index] = key
out_list = [
id_dict[device_id]
for device_id in filter(lambda device_id: device_id in id_dict, device_ids)
]
assert len(out_list) == len(keys), "Given Device ID List does not match"
"devices with computed tensors."
return out_list
def _flatten_tensor_or_tuple(inp: TensorOrTupleOfTensorsGeneric) -> Tensor:
if isinstance(inp, Tensor):
return inp.flatten()
return torch.cat([single_inp.flatten() for single_inp in inp])
def _get_module_from_name(model: Module, layer_name: str) -> Any:
r"""
Returns the module (layer) object, given its (string) name
in the model.
Args:
name (str): Module or nested modules name string in self.model
Returns:
The module (layer) in self.model.
"""
return reduce(getattr, layer_name.split("."), model)
def _register_backward_hook(
module: Module, hook: Callable, attr_obj: Any
) -> List[torch.utils.hooks.RemovableHandle]:
grad_out: Dict[device, Tensor] = {}
def forward_hook(
module: Module,
inp: Union[Tensor, Tuple[Tensor, ...]],
out: Union[Tensor, Tuple[Tensor, ...]],
) -> None:
nonlocal grad_out
grad_out = {}
def output_tensor_hook(output_grad: Tensor) -> None:
grad_out[output_grad.device] = output_grad
if isinstance(out, tuple):
assert (
len(out) == 1
), "Backward hooks not supported for module with >1 output"
out[0].register_hook(output_tensor_hook)
else:
out.register_hook(output_tensor_hook)
def pre_hook(module, inp):
def input_tensor_hook(input_grad: Tensor):
if len(grad_out) == 0:
return
hook_out = hook(module, input_grad, grad_out[input_grad.device])
if hook_out is not None:
return hook_out[0] if isinstance(hook_out, tuple) else hook_out
if isinstance(inp, tuple):
assert (
len(inp) == 1
), "Backward hooks not supported for module with >1 input"
inp[0].register_hook(input_tensor_hook)
return inp[0].clone()
else:
inp.register_hook(input_tensor_hook)
return inp.clone()
return [
module.register_forward_pre_hook(pre_hook),
module.register_forward_hook(forward_hook),
]
def _get_max_feature_index(feature_mask: Tuple[Tensor, ...]):
"""
Returns the max feature mask index
The feature mask should be formatted to tuple of tensors at first.
Note: This util is commonly used to identify the number of features (max_index + 1),
as we expect user to be resposible to ensure consecutive feature mask indices from 0
"""
return int(max(torch.max(mask).item() for mask in feature_mask if mask.numel()))
|
#!/usr/bin/env python3
import glob
import os
import re
import warnings
from typing import Any, List, Optional, Tuple, Union
import captum._utils.common as common
import torch
from captum.attr import LayerActivation
from torch import Tensor
from torch.nn import Module
from torch.utils.data import DataLoader, Dataset
class AV:
r"""
This class provides functionality to store and load activation vectors
generated for pre-defined neural network layers.
It also provides functionality to check if activation vectors already
exist in the manifold and other auxiliary functions.
This class also defines a torch `Dataset`, representing Activation Vectors,
which enables lazy access to activation vectors and layer stored in the manifold.
"""
r"""
The name of the subfolder in the manifold where the activation vectors
are stored.
"""
class AVDataset(Dataset):
r"""
This dataset enables access to activation vectors for a given `model` stored
under a pre-defined path.
The iterator of this dataset returns a batch of data tensors.
Additionally, subsets of the model activations can be loaded based on layer
or identifier or num_id (representing batch number in source dataset).
"""
def __init__(
self,
path: str,
model_id: str,
identifier: Optional[str] = None,
layer: Optional[str] = None,
num_id: Optional[str] = None,
) -> None:
r"""
Loads into memory the list of all activation file paths associated
with the input `model_id`.
Args:
path (str): The path where the activation vectors
for the `layer` are stored.
model_id (str): The name/version of the model for which layer
activations are being computed and stored.
identifier (str or None): An optional identifier for the layer
activations. Can be used to distinguish between activations for
different training batches.
layer (str or None): The layer for which the activation vectors
are computed.
num_id (str): An optional string representing the batch number for
which the activation vectors are computed
"""
self.av_filesearch = AV._construct_file_search(
path, model_id, identifier, layer, num_id
)
files = glob.glob(self.av_filesearch)
self.files = AV.sort_files(files)
def __getitem__(self, idx: int) -> Union[Tensor, Tuple[Tensor, ...]]:
assert idx < len(self.files), "Layer index is out of bounds!"
fl = self.files[idx]
av = torch.load(fl)
return av
def __len__(self) -> int:
return len(self.files)
AV_DIR_NAME: str = "av"
def __init__(self) -> None:
pass
@staticmethod
def _assemble_model_dir(path: str, model_id: str) -> str:
r"""
Returns a directory path for the given source path `path` and `model_id.`
This path is suffixed with the '/' delimiter.
"""
return "/".join([path, AV.AV_DIR_NAME, model_id, ""])
@staticmethod
def _assemble_file_path(source_dir: str, identifier: str, layer: str) -> str:
r"""
Returns a full filepath given a source directory, layer, and required
identifier. The source dir is not required to end with a "/" delimiter.
"""
if not source_dir.endswith("/"):
source_dir += "/"
filepath = os.path.join(source_dir, identifier)
filepath = os.path.join(filepath, layer)
return filepath
@staticmethod
def _construct_file_search(
source_dir: str,
model_id: str,
identifier: Optional[str] = None,
layer: Optional[str] = None,
num_id: Optional[str] = None,
) -> str:
r"""
Returns a search string that can be used by glob to search `source_dir/model_id`
for the desired layer/identifier pair. Leaving `layer` as None will search ids
over all layers, and leaving `identifier` as none will search layers over all
ids. Leaving both as none will return a path to glob for every activation.
Assumes identifier is always specified when saving activations, so that
activations live at source_dir/model_id/identifier/layer
(and never source_dir/model_id/layer)
"""
av_filesearch = AV._assemble_model_dir(source_dir, model_id)
av_filesearch = os.path.join(
av_filesearch, "*" if identifier is None else identifier
)
av_filesearch = os.path.join(av_filesearch, "*" if layer is None else layer)
av_filesearch = os.path.join(
av_filesearch, "*.pt" if num_id is None else "%s.pt" % num_id
)
return av_filesearch
@staticmethod
def exists(
path: str,
model_id: str,
identifier: Optional[str] = None,
layer: Optional[str] = None,
num_id: Optional[str] = None,
) -> bool:
r"""
Verifies whether the model + layer activations exist
under the path.
Args:
path (str): The path where the activation vectors
for the `model_id` are stored.
model_id (str): The name/version of the model for which layer activations
are being computed and stored.
identifier (str or None): An optional identifier for the layer activations.
Can be used to distinguish between activations for different
training batches. For example, the id could be a suffix composed of
a train/test label and numerical value, such as "-train-xxxxx".
The numerical id is often a monotonic sequence taken from datetime.
layer (str or None): The layer for which the activation vectors are
computed.
num_id (str): An optional string representing the batch number for which
the activation vectors are computed
Returns:
exists (bool): Indicating whether the activation vectors for the `layer`
and `identifier` (if provided) and num_id (if provided) were stored
in the manifold. If no `identifier` is provided, will return `True`
if any layer activation exists, whether it has an identifier or
not, and vice-versa.
"""
av_dir = AV._assemble_model_dir(path, model_id)
av_filesearch = AV._construct_file_search(
path, model_id, identifier, layer, num_id
)
return os.path.exists(av_dir) and len(glob.glob(av_filesearch)) > 0
@staticmethod
def save(
path: str,
model_id: str,
identifier: str,
layers: Union[str, List[str]],
act_tensors: Union[Tensor, List[Tensor]],
num_id: str,
) -> None:
r"""
Saves the activation vectors `act_tensor` for the
`layer` under the manifold `path`.
Args:
path (str): The path where the activation vectors
for the `layer` are stored.
model_id (str): The name/version of the model for which layer activations
are being computed and stored.
identifier (str or None): An optional identifier for the layer
activations. Can be used to distinguish between activations for
different training batches. For example, the identifier could be
a suffix composed of a train/test label and numerical value, such
as "-src-abc".
Additionally, (abc) could be a unique identifying number. For
example, it is automatically created in
AV.generate_dataset_activations from batch index.
It assumes identifier is same for all layers if a list of
`layers` is provided.
layers (str or list[str]): The layer(s) for which the activation vectors
are computed.
act_tensors (tensor or list of tensor): A batch of activation vectors.
This must match the dimension of `layers`.
num_id (str): string representing the batch number for which the activation
vectors are computed
"""
if isinstance(layers, str):
layers = [layers]
if isinstance(act_tensors, Tensor):
act_tensors = [act_tensors]
if len(layers) != len(act_tensors):
raise ValueError("The dimension of `layers` and `act_tensors` must match!")
av_dir = AV._assemble_model_dir(path, model_id)
for i, layer in enumerate(layers):
av_save_fl_path = os.path.join(
AV._assemble_file_path(av_dir, identifier, layer), "%s.pt" % num_id
)
layer_dir = os.path.dirname(av_save_fl_path)
if not os.path.exists(layer_dir):
os.makedirs(layer_dir)
torch.save(act_tensors[i], av_save_fl_path)
@staticmethod
def load(
path: str,
model_id: str,
identifier: Optional[str] = None,
layer: Optional[str] = None,
num_id: Optional[str] = None,
) -> AVDataset:
r"""
Loads lazily the activation vectors for given `model_id` and
`layer` saved under the `path`.
Args:
path (str): The path where the activation vectors
for the `layer` are stored.
model_id (str): The name/version of the model for which layer activations
are being computed and stored.
identifier (str or None): An optional identifier for the layer
activations. Can be used to distinguish between activations for
different training batches.
layer (str or None): The layer for which the activation vectors
are computed.
num_id (str): An optional string representing the batch number for which
the activation vectors are computed
Returns:
dataset (AV.AVDataset): AV.AVDataset that allows to iterate
over the activation vectors for given layer, identifier (if
provided), num_id (if provided). Returning an AV.AVDataset as
opposed to a DataLoader constructed from it offers more
flexibility. Raises RuntimeError if activation vectors are not
found.
"""
av_save_dir = AV._assemble_model_dir(path, model_id)
if os.path.exists(av_save_dir):
avdataset = AV.AVDataset(path, model_id, identifier, layer, num_id)
return avdataset
else:
raise RuntimeError(
f"Activation vectors for model {model_id} was not found at path {path}"
)
@staticmethod
def _manage_loading_layers(
path: str,
model_id: str,
layers: Union[str, List[str]],
load_from_disk: bool = True,
identifier: Optional[str] = None,
num_id: Optional[str] = None,
) -> List[str]:
r"""
Returns unsaved layers, and deletes saved layers if load_from_disk is False.
Args:
path (str): The path where the activation vectors
for the `layer` are stored.
model_id (str): The name/version of the model for which layer activations
are being computed and stored.
layers (str or list[str]): The layer(s) for which the activation vectors
are computed.
load_from_disk (bool, optional): Whether or not to load from disk.
Default: True
identifier (str or None): An optional identifier for the layer
activations. Can be used to distinguish between activations for
different training batches.
num_id (str, optional): An optional string representing the batch number
for which the activation vectors are computed.
Returns:
List of layer names for which activations should be generated
"""
layers = [layers] if isinstance(layers, str) else layers
unsaved_layers = []
if load_from_disk:
for layer in layers:
if not AV.exists(path, model_id, identifier, layer, num_id):
unsaved_layers.append(layer)
else:
unsaved_layers = layers
warnings.warn(
"Overwriting activations: load_from_disk is set to False. Removing all "
f"activations matching specified parameters {{path: {path}, "
f"model_id: {model_id}, layers: {layers}, identifier: {identifier}}} "
"before generating new activations."
)
for layer in layers:
files = glob.glob(
AV._construct_file_search(path, model_id, identifier, layer)
)
for filename in files:
os.remove(filename)
return unsaved_layers
@staticmethod
def _compute_and_save_activations(
path: str,
model: Module,
model_id: str,
layers: Union[str, List[str]],
inputs: Union[Tensor, Tuple[Tensor, ...]],
identifier: str,
num_id: str,
additional_forward_args: Any = None,
load_from_disk: bool = True,
) -> None:
r"""
Computes layer activations for the given inputs and specified `layers`
Args:
path (str): The path where the activation vectors
for the `layer` are stored.
model (torch.nn.Module): An instance of pytorch model. This model should
define all of its layers as attributes of the model.
model_id (str): The name/version of the model for which layer activations
are being computed and stored.
layers (str or list[str]): The layer(s) for which the activation vectors
are computed.
inputs (Tensor or tuple[Tensor, ...]): Batch of examples for
which influential instances are computed. They are passed to the
input `model`. The first dimension in `inputs` tensor or tuple of
tensors corresponds to the batch size.
identifier (str or None): An optional identifier for the layer
activations. Can be used to distinguish between activations for
different training batches.
num_id (str): An required string representing the batch number for which the
activation vectors are computed
additional_forward_args (Any, optional): Additional arguments that will be
passed to `model` after inputs.
Default: None
load_from_disk (bool): Forces function to regenerate activations if False.
Default: True
"""
unsaved_layers = AV._manage_loading_layers(
path,
model_id,
layers,
load_from_disk,
identifier,
num_id,
)
layer_modules = [
common._get_module_from_name(model, layer) for layer in unsaved_layers
]
if len(unsaved_layers) > 0:
layer_act = LayerActivation(model, layer_modules)
new_activations = layer_act.attribute.__wrapped__( # type: ignore
layer_act, inputs, additional_forward_args
)
AV.save(path, model_id, identifier, unsaved_layers, new_activations, num_id)
@staticmethod
def _unpack_data(data: Union[Any, Tuple[Any, Any]]) -> Any:
r"""
Helper to extract input from labels when getting items from a Dataset. Assumes
that data is either a single value, or a tuple containing two elements.
The input could itself be a Tuple containing multiple values. If your
dataset returns a Tuple with more than 2 elements, please reformat it such that
all inputs are formatted into a tuple stored at the first position.
"""
if isinstance(data, tuple) or isinstance(data, list):
data = data[0]
return data
r"""TODO:
1. Can propagate saving labels along with activations.
2. Use of additional_forward_args when sourcing from dataset?
"""
@staticmethod
def generate_dataset_activations(
path: str,
model: Module,
model_id: str,
layers: Union[str, List[str]],
dataloader: DataLoader,
identifier: str = "default",
load_from_disk: bool = True,
return_activations: bool = False,
) -> Optional[Union[AVDataset, List[AVDataset]]]:
r"""
Computes layer activations for a source dataset and specified `layers`. Assumes
that the dataset returns a single value, or a tuple containing two elements
(see AV._unpack_data).
Args:
path (str): The path where the activation vectors
for the `layer` are stored.
module (torch.nn.Module): An instance of pytorch model. This model should
define all of its layers as attributes of the model.
model_id (str): The name/version of the model for which layer activations
are being computed and stored.
layers (str or list[str]): The layer(s) for which the activation vectors
are computed.
dataloader (torch.utils.data.DataLoader): DataLoader that yields Dataset
for which influential instances are computed. They are passed to
input `model`.
identifier (str or None): An identifier for the layer
activations. Can be used to distinguish between activations for
different training batches.
Default: "default"
load_from_disk (bool): Forces function to regenerate activations if False.
Default: True
return_activations (bool, optional): Whether to return the activations.
Default: False
Returns: If `return_activations == True`, returns a single `AVDataset` if
`layers` is a str, otherwise, a list of `AVDataset`s of the length
of `layers`, where each element corresponds to a layer. In either
case, `AVDataset`'s represent the activations for a single layer,
over the entire `dataloader`. If `return_activations == False`,
does not return anything.
"""
unsaved_layers = AV._manage_loading_layers(
path,
model_id,
layers,
load_from_disk,
identifier,
)
if len(unsaved_layers) > 0:
for i, data in enumerate(dataloader):
AV._compute_and_save_activations(
path,
model,
model_id,
layers,
AV._unpack_data(data),
identifier,
str(i),
)
if not return_activations:
return None
if isinstance(layers, str):
return AV.load(path, model_id, identifier, layers)
else:
return [AV.load(path, model_id, identifier, layer) for layer in layers]
@staticmethod
def sort_files(files: List[str]) -> List[str]:
r"""
Utility for sorting files based on natural sorting instead of the default
lexigraphical sort.
"""
def split_alphanum(s):
r"""
Splits string into a list of strings and numbers
"z23a" -> ["z", 23, "a"]
"""
return [int(x) if x.isdigit() else x for x in re.split("([0-9]+)", s)]
return sorted(files, key=split_alphanum)
|
#!/usr/bin/env python3
import sys
import warnings
from time import time
from typing import cast, Iterable, Sized, TextIO
from captum._utils.typing import Literal
try:
from tqdm.auto import tqdm
except ImportError:
tqdm = None
class DisableErrorIOWrapper(object):
def __init__(self, wrapped: TextIO) -> None:
"""
The wrapper around a TextIO object to ignore write errors like tqdm
https://github.com/tqdm/tqdm/blob/bcce20f771a16cb8e4ac5cc5b2307374a2c0e535/tqdm/utils.py#L131
"""
self._wrapped = wrapped
def __getattr__(self, name):
return getattr(self._wrapped, name)
@staticmethod
def _wrapped_run(func, *args, **kwargs):
try:
return func(*args, **kwargs)
except OSError as e:
if e.errno != 5:
raise
except ValueError as e:
if "closed" not in str(e):
raise
def write(self, *args, **kwargs):
return self._wrapped_run(self._wrapped.write, *args, **kwargs)
def flush(self, *args, **kwargs):
return self._wrapped_run(self._wrapped.flush, *args, **kwargs)
class NullProgress:
"""Passthrough class that implements the progress API.
This class implements the tqdm and SimpleProgressBar api but
does nothing. This class can be used as a stand-in for an
optional progressbar, most commonly in the case of nested
progress bars.
"""
def __init__(self, iterable: Iterable = None, *args, **kwargs):
del args, kwargs
self.iterable = iterable
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback) -> Literal[False]:
return False
def __iter__(self):
if not self.iterable:
return
for it in self.iterable:
yield it
def update(self, amount: int = 1):
pass
def close(self):
pass
class SimpleProgress:
def __init__(
self,
iterable: Iterable = None,
desc: str = None,
total: int = None,
file: TextIO = None,
mininterval: float = 0.5,
) -> None:
"""
Simple progress output used when tqdm is unavailable.
Same as tqdm, output to stderr channel.
If you want to do nested Progressbars with simple progress
the parent progress bar should be used as a context
(i.e. with statement) and the nested progress bar should be
created inside this context.
"""
self.cur = 0
self.iterable = iterable
self.total = total
if total is None and hasattr(iterable, "__len__"):
self.total = len(cast(Sized, iterable))
self.desc = desc
file = DisableErrorIOWrapper(file if file else sys.stderr)
cast(TextIO, file)
self.file = file
self.mininterval = mininterval
self.last_print_t = 0.0
self.closed = False
self._is_parent = False
def __enter__(self):
self._is_parent = True
self._refresh()
return self
def __exit__(self, exc_type, exc_value, exc_traceback) -> Literal[False]:
self.close()
return False
def __iter__(self):
if self.closed or not self.iterable:
return
self._refresh()
for it in self.iterable:
yield it
self.update()
self.close()
def _refresh(self):
progress_str = self.desc + ": " if self.desc else ""
if self.total:
# e.g., progress: 60% 3/5
progress_str += f"{100 * self.cur // self.total}% {self.cur}/{self.total}"
else:
# e.g., progress: .....
progress_str += "." * self.cur
end = "\n" if self._is_parent else ""
print("\r" + progress_str, end=end, file=self.file)
def update(self, amount: int = 1):
if self.closed:
return
self.cur += amount
cur_t = time()
if cur_t - self.last_print_t >= self.mininterval:
self._refresh()
self.last_print_t = cur_t
def close(self):
if not self.closed and not self._is_parent:
self._refresh()
print(file=self.file) # end with new line
self.closed = True
def progress(
iterable: Iterable = None,
desc: str = None,
total: int = None,
use_tqdm=True,
file: TextIO = None,
mininterval: float = 0.5,
**kwargs,
):
# Try to use tqdm is possible. Fall back to simple progress print
if tqdm and use_tqdm:
return tqdm(
iterable,
desc=desc,
total=total,
file=file,
mininterval=mininterval,
**kwargs,
)
else:
if not tqdm and use_tqdm:
warnings.warn(
"Tried to show progress with tqdm "
"but tqdm is not installed. "
"Fall back to simply print out the progress."
)
return SimpleProgress(
iterable, desc=desc, total=total, file=file, mininterval=mininterval
)
|
#!/usr/bin/env python3
from typing import List, Tuple, TYPE_CHECKING, TypeVar, Union
from torch import Tensor
from torch.nn import Module
if TYPE_CHECKING:
import sys
if sys.version_info >= (3, 8):
from typing import Literal # noqa: F401
else:
from typing_extensions import Literal # noqa: F401
else:
Literal = {True: bool, False: bool, (True, False): bool}
TensorOrTupleOfTensorsGeneric = TypeVar(
"TensorOrTupleOfTensorsGeneric", Tensor, Tuple[Tensor, ...]
)
TupleOrTensorOrBoolGeneric = TypeVar("TupleOrTensorOrBoolGeneric", Tuple, Tensor, bool)
ModuleOrModuleList = TypeVar("ModuleOrModuleList", Module, List[Module])
TargetType = Union[None, int, Tuple[int, ...], Tensor, List[Tuple[int, ...]], List[int]]
BaselineType = Union[None, Tensor, int, float, Tuple[Union[Tensor, int, float], ...]]
TensorLikeList1D = List[float]
TensorLikeList2D = List[TensorLikeList1D]
TensorLikeList3D = List[TensorLikeList2D]
TensorLikeList4D = List[TensorLikeList3D]
TensorLikeList5D = List[TensorLikeList4D]
TensorLikeList = Union[
TensorLikeList1D,
TensorLikeList2D,
TensorLikeList3D,
TensorLikeList4D,
TensorLikeList5D,
]
|
from captum._utils.models.linear_model import (
LinearModel,
SGDLasso,
SGDLinearModel,
SGDLinearRegression,
SGDRidge,
SkLearnLasso,
SkLearnLinearModel,
SkLearnLinearRegression,
SkLearnRidge,
)
from captum._utils.models.model import Model
__all__ = [
"Model",
"LinearModel",
"SGDLinearModel",
"SGDLasso",
"SGDRidge",
"SGDLinearRegression",
"SkLearnLinearModel",
"SkLearnLasso",
"SkLearnRidge",
"SkLearnLinearRegression",
]
|
#!/usr/bin/env python3
from abc import ABC, abstractmethod
from typing import Dict, Optional, Union
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from torch import Tensor
from torch.utils.data import DataLoader
class Model(ABC):
r"""
Abstract Class to describe the interface of a trainable model to be used
within the algorithms of captum.
Please note that this is an experimental feature.
"""
@abstractmethod
def fit(
self, train_data: DataLoader, **kwargs
) -> Optional[Dict[str, Union[int, float, Tensor]]]:
r"""
Override this method to actually train your model.
The specification of the dataloader will be supplied by the algorithm
you are using within captum. This will likely be a supervised learning
task, thus you should expect batched (x, y) pairs or (x, y, w) triples.
Args:
train_data (DataLoader):
The data to train on
Returns:
Optional statistics about training, e.g. iterations it took to
train, training loss, etc.
"""
pass
@abstractmethod
def representation(self) -> Tensor:
r"""
Returns the underlying representation of the interpretable model. For a
linear model this is simply a tensor (the concatenation of weights
and bias). For something slightly more complicated, such as a decision
tree, this could be the nodes of a decision tree.
Returns:
A Tensor describing the representation of the model.
"""
pass
@abstractmethod
def __call__(
self, x: TensorOrTupleOfTensorsGeneric
) -> TensorOrTupleOfTensorsGeneric:
r"""
Predicts with the interpretable model.
Args:
x (TensorOrTupleOfTensorsGeneric)
A batched input of tensor(s) to the model to predict
Returns:
The prediction of the input as a TensorOrTupleOfTensorsGeneric.
"""
pass
|
from captum._utils.models.linear_model.model import (
LinearModel,
SGDLasso,
SGDLinearModel,
SGDLinearRegression,
SGDRidge,
SkLearnLasso,
SkLearnLinearModel,
SkLearnLinearRegression,
SkLearnRidge,
)
__all__ = [
"LinearModel",
"SGDLinearModel",
"SGDLasso",
"SGDRidge",
"SGDLinearRegression",
"SkLearnLinearModel",
"SkLearnLasso",
"SkLearnRidge",
"SkLearnLinearRegression",
]
|
from typing import Callable, cast, List, Optional
import torch.nn as nn
from captum._utils.models.model import Model
from torch import Tensor
from torch.utils.data import DataLoader
class LinearModel(nn.Module, Model):
SUPPORTED_NORMS: List[Optional[str]] = [None, "batch_norm", "layer_norm"]
def __init__(self, train_fn: Callable, **kwargs) -> None:
r"""
Constructs a linear model with a training function and additional
construction arguments that will be sent to
`self._construct_model_params` after a `self.fit` is called. Please note
that this assumes the `self.train_fn` will call
`self._construct_model_params`.
Please note that this is an experimental feature.
Args:
train_fn (Callable)
The function to train with. See
`captum._utils.models.linear_model.train.sgd_train_linear_model`
and
`captum._utils.models.linear_model.train.sklearn_train_linear_model`
for examples
kwargs
Any additional keyword arguments to send to
`self._construct_model_params` once a `self.fit` is called.
"""
super().__init__()
self.norm: Optional[nn.Module] = None
self.linear: Optional[nn.Linear] = None
self.train_fn = train_fn
self.construct_kwargs = kwargs
def _construct_model_params(
self,
in_features: Optional[int] = None,
out_features: Optional[int] = None,
norm_type: Optional[str] = None,
affine_norm: bool = False,
bias: bool = True,
weight_values: Optional[Tensor] = None,
bias_value: Optional[Tensor] = None,
classes: Optional[Tensor] = None,
):
r"""
Lazily initializes a linear model. This will be called for you in a
train method.
Args:
in_features (int):
The number of input features
output_features (int):
The number of output features.
norm_type (str, optional):
The type of normalization that can occur. Please assign this
to one of `PyTorchLinearModel.SUPPORTED_NORMS`.
affine_norm (bool):
Whether or not to learn an affine transformation of the
normalization parameters used.
bias (bool):
Whether to add a bias term. Not needed if normalized input.
weight_values (Tensor, optional):
The values to initialize the linear model with. This must be a
1D or 2D tensor, and of the form `(num_outputs, num_features)` or
`(num_features,)`. Additionally, if this is provided you need not
to provide `in_features` or `out_features`.
bias_value (Tensor, optional):
The bias value to initialize the model with.
classes (Tensor, optional):
The list of prediction classes supported by the model in case it
performs classificaton. In case of regression it is set to None.
Default: None
"""
if norm_type not in LinearModel.SUPPORTED_NORMS:
raise ValueError(
f"{norm_type} not supported. Please use {LinearModel.SUPPORTED_NORMS}"
)
if weight_values is not None:
in_features = weight_values.shape[-1]
out_features = (
1 if len(weight_values.shape) == 1 else weight_values.shape[0]
)
if in_features is None or out_features is None:
raise ValueError(
"Please provide `in_features` and `out_features` or `weight_values`"
)
if norm_type == "batch_norm":
self.norm = nn.BatchNorm1d(in_features, eps=1e-8, affine=affine_norm)
elif norm_type == "layer_norm":
self.norm = nn.LayerNorm(
in_features, eps=1e-8, elementwise_affine=affine_norm
)
else:
self.norm = None
self.linear = nn.Linear(in_features, out_features, bias=bias)
if weight_values is not None:
self.linear.weight.data = weight_values
if bias_value is not None:
if not bias:
raise ValueError("`bias_value` is not None and bias is False")
self.linear.bias.data = bias_value
if classes is not None:
self.linear.classes = classes
def fit(self, train_data: DataLoader, **kwargs):
r"""
Calls `self.train_fn`
"""
return self.train_fn(
self,
dataloader=train_data,
construct_kwargs=self.construct_kwargs,
**kwargs,
)
def forward(self, x: Tensor) -> Tensor:
assert self.linear is not None
if self.norm is not None:
x = self.norm(x)
return self.linear(x)
def representation(self) -> Tensor:
r"""
Returns a tensor which describes the hyper-plane input space. This does
not include the bias. For bias/intercept, please use `self.bias`
"""
assert self.linear is not None
return self.linear.weight.detach()
def bias(self) -> Optional[Tensor]:
r"""
Returns the bias of the linear model
"""
if self.linear is None or self.linear.bias is None:
return None
return self.linear.bias.detach()
def classes(self) -> Optional[Tensor]:
if self.linear is None or self.linear.classes is None:
return None
return cast(Tensor, self.linear.classes).detach()
class SGDLinearModel(LinearModel):
def __init__(self, **kwargs) -> None:
r"""
Factory class. Construct a a `LinearModel` with the
`sgd_train_linear_model` as the train method
Args:
kwargs
Arguments send to `self._construct_model_params` after
`self.fit` is called. Please refer to that method for parameter
documentation.
"""
# avoid cycles
from captum._utils.models.linear_model.train import sgd_train_linear_model
super().__init__(train_fn=sgd_train_linear_model, **kwargs)
class SGDLasso(SGDLinearModel):
def __init__(self, **kwargs) -> None:
r"""
Factory class to train a `LinearModel` with SGD
(`sgd_train_linear_model`) whilst setting appropriate parameters to
optimize for ridge regression loss. This optimizes L2 loss + alpha * L1
regularization.
Please note that with SGD it is not guaranteed that weights will
converge to 0.
"""
super().__init__(**kwargs)
def fit(self, train_data: DataLoader, **kwargs):
# avoid cycles
from captum._utils.models.linear_model.train import l2_loss
return super().fit(train_data=train_data, loss_fn=l2_loss, reg_term=1, **kwargs)
class SGDRidge(SGDLinearModel):
def __init__(self, **kwargs) -> None:
r"""
Factory class to train a `LinearModel` with SGD
(`sgd_train_linear_model`) whilst setting appropriate parameters to
optimize for ridge regression loss. This optimizes L2 loss + alpha *
L2 regularization.
"""
super().__init__(**kwargs)
def fit(self, train_data: DataLoader, **kwargs):
# avoid cycles
from captum._utils.models.linear_model.train import l2_loss
return super().fit(train_data=train_data, loss_fn=l2_loss, reg_term=2, **kwargs)
class SGDLinearRegression(SGDLinearModel):
def __init__(self, **kwargs) -> None:
r"""
Factory class to train a `LinearModel` with SGD
(`sgd_train_linear_model`). For linear regression this assigns the loss
to L2 and no regularization.
"""
super().__init__(**kwargs)
def fit(self, train_data: DataLoader, **kwargs):
# avoid cycles
from captum._utils.models.linear_model.train import l2_loss
return super().fit(
train_data=train_data, loss_fn=l2_loss, reg_term=None, **kwargs
)
class SkLearnLinearModel(LinearModel):
def __init__(self, sklearn_module: str, **kwargs) -> None:
r"""
Factory class to construct a `LinearModel` with sklearn training method.
Please note that this assumes:
0. You have sklearn and numpy installed
1. The dataset can fit into memory
SkLearn support does introduce some slight overhead as we convert the
tensors to numpy and then convert the resulting trained model to a
`LinearModel` object. However, this conversion should be negligible.
Args:
sklearn_module
The module under sklearn to construct and use for training, e.g.
use "svm.LinearSVC" for an SVM or "linear_model.Lasso" for Lasso.
There are factory classes defined for you for common use cases,
such as `SkLearnLasso`.
kwargs
The kwargs to pass to the construction of the sklearn model
"""
# avoid cycles
from captum._utils.models.linear_model.train import sklearn_train_linear_model
super().__init__(train_fn=sklearn_train_linear_model, **kwargs)
self.sklearn_module = sklearn_module
def fit(self, train_data: DataLoader, **kwargs):
r"""
Args:
train_data
Train data to use
kwargs
Arguments to feed to `.fit` method for sklearn
"""
return super().fit(
train_data=train_data, sklearn_trainer=self.sklearn_module, **kwargs
)
class SkLearnLasso(SkLearnLinearModel):
def __init__(self, **kwargs) -> None:
r"""
Factory class. Trains a `LinearModel` model with
`sklearn.linear_model.Lasso`. You will need sklearn version >= 0.23 to
support sample weights.
"""
super().__init__(sklearn_module="linear_model.Lasso", **kwargs)
def fit(self, train_data: DataLoader, **kwargs):
return super().fit(train_data=train_data, **kwargs)
class SkLearnRidge(SkLearnLinearModel):
def __init__(self, **kwargs) -> None:
r"""
Factory class. Trains a model with `sklearn.linear_model.Ridge`.
Any arguments provided to the sklearn constructor can be provided
as kwargs here.
"""
super().__init__(sklearn_module="linear_model.Ridge", **kwargs)
def fit(self, train_data: DataLoader, **kwargs):
return super().fit(train_data=train_data, **kwargs)
class SkLearnLinearRegression(SkLearnLinearModel):
def __init__(self, **kwargs) -> None:
r"""
Factory class. Trains a model with `sklearn.linear_model.LinearRegression`.
Any arguments provided to the sklearn constructor can be provided
as kwargs here.
"""
super().__init__(sklearn_module="linear_model.LinearRegression", **kwargs)
def fit(self, train_data: DataLoader, **kwargs):
return super().fit(train_data=train_data, **kwargs)
class SkLearnLogisticRegression(SkLearnLinearModel):
def __init__(self, **kwargs) -> None:
r"""
Factory class. Trains a model with `sklearn.linear_model.LogisticRegression`.
Any arguments provided to the sklearn constructor can be provided
as kwargs here.
"""
super().__init__(sklearn_module="linear_model.LogisticRegression", **kwargs)
def fit(self, train_data: DataLoader, **kwargs):
return super().fit(train_data=train_data, **kwargs)
class SkLearnSGDClassifier(SkLearnLinearModel):
def __init__(self, **kwargs) -> None:
r"""
Factory class. Trains a model with `sklearn.linear_model.SGDClassifier(`.
Any arguments provided to the sklearn constructor can be provided
as kwargs here.
"""
super().__init__(sklearn_module="linear_model.SGDClassifier", **kwargs)
def fit(self, train_data: DataLoader, **kwargs):
return super().fit(train_data=train_data, **kwargs)
|
import time
import warnings
from typing import Any, Callable, Dict, List, Optional
import torch
import torch.nn as nn
from captum._utils.models.linear_model.model import LinearModel
from torch.utils.data import DataLoader
def l2_loss(x1, x2, weights=None):
if weights is None:
return torch.mean((x1 - x2) ** 2) / 2.0
else:
return torch.sum((weights / weights.norm(p=1)) * ((x1 - x2) ** 2)) / 2.0
def sgd_train_linear_model(
model: LinearModel,
dataloader: DataLoader,
construct_kwargs: Dict[str, Any],
max_epoch: int = 100,
reduce_lr: bool = True,
initial_lr: float = 0.01,
alpha: float = 1.0,
loss_fn: Callable = l2_loss,
reg_term: Optional[int] = 1,
patience: int = 10,
threshold: float = 1e-4,
running_loss_window: Optional[int] = None,
device: Optional[str] = None,
init_scheme: str = "zeros",
debug: bool = False,
) -> Dict[str, float]:
r"""
Trains a linear model with SGD. This will continue to iterate your
dataloader until we converged to a solution or alternatively until we have
exhausted `max_epoch`.
Convergence is defined by the loss not changing by `threshold` amount for
`patience` number of iterations.
Args:
model
The model to train
dataloader
The data to train it with. We will assume the dataloader produces
either pairs or triples of the form (x, y) or (x, y, w). Where x and
y are typical pairs for supervised learning and w is a weight
vector.
We will call `model._construct_model_params` with construct_kwargs
and the input features set to `x.shape[1]` (`x.shape[0]` corresponds
to the batch size). We assume that `len(x.shape) == 2`, i.e. the
tensor is flat. The number of output features will be set to
y.shape[1] or 1 (if `len(y.shape) == 1`); we require `len(y.shape)
<= 2`.
max_epoch
The maximum number of epochs to exhaust
reduce_lr
Whether or not to reduce the learning rate as iterations progress.
Halves the learning rate when the training loss does not move. This
uses torch.optim.lr_scheduler.ReduceLROnPlateau and uses the
parameters `patience` and `threshold`
initial_lr
The initial learning rate to use.
alpha
A constant for the regularization term.
loss_fn
The loss to optimise for. This must accept three parameters:
x1 (predicted), x2 (labels) and a weight vector
reg_term
Regularization is defined by the `reg_term` norm of the weights.
Please use `None` if you do not wish to use regularization.
patience
Defines the number of iterations in a row the loss must remain
within `threshold` in order to be classified as converged.
threshold
Threshold for convergence detection.
running_loss_window
Used to report the training loss once we have finished training and
to determine when we have converged (along with reducing the
learning rate).
The reported training loss will take the last `running_loss_window`
iterations and average them.
If `None` we will approximate this to be the number of examples in
an epoch.
init_scheme
Initialization to use prior to training the linear model.
device
The device to send the model and data to. If None then no `.to` call
will be used.
debug
Whether to print the loss, learning rate per iteration
Returns
This will return the final training loss (averaged with
`running_loss_window`)
"""
loss_window: List[torch.Tensor] = []
min_avg_loss = None
convergence_counter = 0
converged = False
def get_point(datapoint):
if len(datapoint) == 2:
x, y = datapoint
w = None
else:
x, y, w = datapoint
if device is not None:
x = x.to(device)
y = y.to(device)
if w is not None:
w = w.to(device)
return x, y, w
# get a point and construct the model
data_iter = iter(dataloader)
x, y, w = get_point(next(data_iter))
model._construct_model_params(
in_features=x.shape[1],
out_features=y.shape[1] if len(y.shape) == 2 else 1,
**construct_kwargs,
)
model.train()
assert model.linear is not None
if init_scheme is not None:
assert init_scheme in ["xavier", "zeros"]
with torch.no_grad():
if init_scheme == "xavier":
torch.nn.init.xavier_uniform_(model.linear.weight)
else:
model.linear.weight.zero_()
if model.linear.bias is not None:
model.linear.bias.zero_()
with torch.enable_grad():
optim = torch.optim.SGD(model.parameters(), lr=initial_lr)
if reduce_lr:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optim, factor=0.5, patience=patience, threshold=threshold
)
t1 = time.time()
epoch = 0
i = 0
while epoch < max_epoch:
while True: # for x, y, w in dataloader
if running_loss_window is None:
running_loss_window = x.shape[0] * len(dataloader)
y = y.view(x.shape[0], -1)
if w is not None:
w = w.view(x.shape[0], -1)
i += 1
out = model(x)
loss = loss_fn(y, out, w)
if reg_term is not None:
reg = torch.norm(model.linear.weight, p=reg_term)
loss += reg.sum() * alpha
if len(loss_window) >= running_loss_window:
loss_window = loss_window[1:]
loss_window.append(loss.clone().detach())
assert len(loss_window) <= running_loss_window
average_loss = torch.mean(torch.stack(loss_window))
if min_avg_loss is not None:
# if we haven't improved by at least `threshold`
if average_loss > min_avg_loss or torch.isclose(
min_avg_loss, average_loss, atol=threshold
):
convergence_counter += 1
if convergence_counter >= patience:
converged = True
break
else:
convergence_counter = 0
if min_avg_loss is None or min_avg_loss >= average_loss:
min_avg_loss = average_loss.clone()
if debug:
print(
f"lr={optim.param_groups[0]['lr']}, Loss={loss},"
+ "Aloss={average_loss}, min_avg_loss={min_avg_loss}"
)
loss.backward()
optim.step()
model.zero_grad()
if scheduler:
scheduler.step(average_loss)
temp = next(data_iter, None)
if temp is None:
break
x, y, w = get_point(temp)
if converged:
break
epoch += 1
data_iter = iter(dataloader)
x, y, w = get_point(next(data_iter))
t2 = time.time()
return {
"train_time": t2 - t1,
"train_loss": torch.mean(torch.stack(loss_window)).item(),
"train_iter": i,
"train_epoch": epoch,
}
class NormLayer(nn.Module):
def __init__(self, mean, std, n=None, eps=1e-8) -> None:
super().__init__()
self.mean = mean
self.std = std
self.eps = eps
def forward(self, x):
return (x - self.mean) / (self.std + self.eps)
def sklearn_train_linear_model(
model: LinearModel,
dataloader: DataLoader,
construct_kwargs: Dict[str, Any],
sklearn_trainer: str = "Lasso",
norm_input: bool = False,
**fit_kwargs,
):
r"""
Alternative method to train with sklearn. This does introduce some slight
overhead as we convert the tensors to numpy and then convert the resulting
trained model to a `LinearModel` object. However, this conversion
should be negligible.
Please note that this assumes:
0. You have sklearn and numpy installed
1. The dataset can fit into memory
Args
model
The model to train.
dataloader
The data to use. This will be exhausted and converted to numpy
arrays. Therefore please do not feed an infinite dataloader.
norm_input
Whether or not to normalize the input
sklearn_trainer
The sklearn model to use to train the model. Please refer to
sklearn.linear_model for a list of modules to use.
construct_kwargs
Additional arguments provided to the `sklearn_trainer` constructor
fit_kwargs
Other arguments to send to `sklearn_trainer`'s `.fit` method
"""
from functools import reduce
try:
import numpy as np
except ImportError:
raise ValueError("numpy is not available. Please install numpy.")
try:
import sklearn
import sklearn.linear_model
import sklearn.svm
except ImportError:
raise ValueError("sklearn is not available. Please install sklearn >= 0.23")
if not sklearn.__version__ >= "0.23.0":
warnings.warn(
"Must have sklearn version 0.23.0 or higher to use "
"sample_weight in Lasso regression."
)
num_batches = 0
xs, ys, ws = [], [], []
for data in dataloader:
if len(data) == 3:
x, y, w = data
else:
assert len(data) == 2
x, y = data
w = None
xs.append(x.cpu().numpy())
ys.append(y.cpu().numpy())
if w is not None:
ws.append(w.cpu().numpy())
num_batches += 1
x = np.concatenate(xs, axis=0)
y = np.concatenate(ys, axis=0)
if len(ws) > 0:
w = np.concatenate(ws, axis=0)
else:
w = None
if norm_input:
mean, std = x.mean(0), x.std(0)
x -= mean
x /= std
t1 = time.time()
sklearn_model = reduce(
lambda val, el: getattr(val, el), [sklearn] + sklearn_trainer.split(".")
)(**construct_kwargs)
try:
sklearn_model.fit(x, y, sample_weight=w, **fit_kwargs)
except TypeError:
sklearn_model.fit(x, y, **fit_kwargs)
warnings.warn(
"Sample weight is not supported for the provided linear model!"
" Trained model without weighting inputs. For Lasso, please"
" upgrade sklearn to a version >= 0.23.0."
)
t2 = time.time()
# Convert weights to pytorch
classes = (
torch.IntTensor(sklearn_model.classes_)
if hasattr(sklearn_model, "classes_")
else None
)
# extract model device
device = getattr(model, "device", "cpu")
num_outputs = sklearn_model.coef_.shape[0] if sklearn_model.coef_.ndim > 1 else 1
weight_values = torch.FloatTensor(sklearn_model.coef_).to(device) # type: ignore
bias_values = torch.FloatTensor([sklearn_model.intercept_]).to( # type: ignore
device # type: ignore
) # type: ignore
model._construct_model_params(
norm_type=None,
weight_values=weight_values.view(num_outputs, -1),
bias_value=bias_values.squeeze().unsqueeze(0),
classes=classes,
)
if norm_input:
model.norm = NormLayer(mean, std)
return {"train_time": t2 - t1}
|
from captum.insights.attr_vis import AttributionVisualizer, Batch, features # noqa
|
# for legacy purposes
import warnings
from captum.insights.attr_vis.example import * # noqa
warnings.warn(
"Deprecated. Please import from captum.insights.attr_vis.example instead."
)
main() # noqa
|
#!/usr/bin/env python3
import logging
import os
import socket
import threading
from time import sleep
from typing import Optional
from captum.log import log_usage
from flask import Flask, jsonify, render_template, request
from flask_compress import Compress
from torch import Tensor
app = Flask(
__name__, static_folder="frontend/build/static", template_folder="frontend/build"
)
visualizer = None
port = None
Compress(app)
def namedtuple_to_dict(obj):
if isinstance(obj, Tensor):
return obj.item()
if hasattr(obj, "_asdict"): # detect namedtuple
return dict(zip(obj._fields, (namedtuple_to_dict(item) for item in obj)))
elif isinstance(obj, str): # iterables - strings
return obj
elif hasattr(obj, "keys"): # iterables - mapping
return dict(
zip(obj.keys(), (namedtuple_to_dict(item) for item in obj.values()))
)
elif hasattr(obj, "__iter__"): # iterables - sequence
return type(obj)((namedtuple_to_dict(item) for item in obj))
else: # non-iterable cannot contain namedtuples
return obj
@app.route("/attribute", methods=["POST"])
def attribute():
# force=True needed for Colab notebooks, which doesn't use the correct
# Content-Type header when forwarding requests through the Colab proxy
r = request.get_json(force=True)
return jsonify(
namedtuple_to_dict(
visualizer._calculate_attribution_from_cache(
r["inputIndex"], r["modelIndex"], r["labelIndex"]
)
)
)
@app.route("/fetch", methods=["POST"])
def fetch():
# force=True needed, see comment for "/attribute" route above
visualizer._update_config(request.get_json(force=True))
visualizer_output = visualizer.visualize()
clean_output = namedtuple_to_dict(visualizer_output)
return jsonify(clean_output)
@app.route("/init")
def init():
return jsonify(visualizer.get_insights_config())
@app.route("/")
def index(id=0):
return render_template("index.html")
def get_free_tcp_port():
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.bind(("", 0))
addr, port = tcp.getsockname()
tcp.close()
return port
def run_app(debug: bool = True, bind_all: bool = False):
if bind_all:
app.run(port=port, use_reloader=False, debug=debug, host="0.0.0.0")
else:
app.run(port=port, use_reloader=False, debug=debug)
@log_usage()
def start_server(
_viz,
blocking: bool = False,
debug: bool = False,
_port: Optional[int] = None,
bind_all: bool = False,
):
global visualizer
visualizer = _viz
global port
if port is None:
os.environ["WERKZEUG_RUN_MAIN"] = "true" # hides starting message
if not debug:
log = logging.getLogger("werkzeug")
log.disabled = True
app.logger.disabled = True
port = _port or get_free_tcp_port()
# Start in a new thread to not block notebook execution
t = threading.Thread(
target=run_app, kwargs={"debug": debug, "bind_all": bind_all}
)
t.start()
sleep(0.01) # add a short delay to allow server to start up
if blocking:
t.join()
print(f"\nFetch data and view Captum Insights at http://localhost:{port}/\n")
return port
|
#!/usr/bin/env python3
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union
from captum.attr import (
Deconvolution,
DeepLift,
FeatureAblation,
GuidedBackprop,
InputXGradient,
IntegratedGradients,
Occlusion,
Saliency,
)
from captum.attr._utils.approximation_methods import SUPPORTED_METHODS
class NumberConfig(NamedTuple):
value: int = 1
limit: Tuple[Optional[int], Optional[int]] = (None, None)
type: str = "number"
class StrEnumConfig(NamedTuple):
value: str
limit: List[str]
type: str = "enum"
class StrConfig(NamedTuple):
value: str
type: str = "string"
Config = Union[NumberConfig, StrEnumConfig, StrConfig]
SUPPORTED_ATTRIBUTION_METHODS = [
Deconvolution,
DeepLift,
GuidedBackprop,
InputXGradient,
IntegratedGradients,
Saliency,
FeatureAblation,
Occlusion,
]
class ConfigParameters(NamedTuple):
params: Dict[str, Config]
help_info: Optional[str] = None # TODO fill out help for each method
post_process: Optional[Dict[str, Callable[[Any], Any]]] = None
ATTRIBUTION_NAMES_TO_METHODS = {
# mypy bug - treating it as a type instead of a class
cls.get_name(): cls # type: ignore
for cls in SUPPORTED_ATTRIBUTION_METHODS
}
def _str_to_tuple(s):
if isinstance(s, tuple):
return s
return tuple([int(i) for i in s.split()])
ATTRIBUTION_METHOD_CONFIG: Dict[str, ConfigParameters] = {
IntegratedGradients.get_name(): ConfigParameters(
params={
"n_steps": NumberConfig(value=25, limit=(2, None)),
"method": StrEnumConfig(limit=SUPPORTED_METHODS, value="gausslegendre"),
},
post_process={"n_steps": int},
),
FeatureAblation.get_name(): ConfigParameters(
params={"perturbations_per_eval": NumberConfig(value=1, limit=(1, 100))},
),
Occlusion.get_name(): ConfigParameters(
params={
"sliding_window_shapes": StrConfig(value=""),
"strides": StrConfig(value=""),
"perturbations_per_eval": NumberConfig(value=1, limit=(1, 100)),
},
post_process={
"sliding_window_shapes": _str_to_tuple,
"strides": _str_to_tuple,
"perturbations_per_eval": int,
},
),
}
|
from captum.insights.attr_vis.app import AttributionVisualizer, Batch # noqa
|
#!/usr/bin/env python3
import base64
import warnings
from collections import namedtuple
from io import BytesIO
from typing import Callable, List, Optional, Union
from captum._utils.common import safe_div
from captum.attr._utils import visualization as viz
from captum.insights.attr_vis._utils.transforms import format_transforms
FeatureOutput = namedtuple("FeatureOutput", "name base modified type contribution")
def _convert_figure_base64(fig):
buff = BytesIO()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fig.tight_layout() # removes padding
fig.savefig(buff, format="png")
base64img = base64.b64encode(buff.getvalue()).decode("utf-8")
return base64img
class BaseFeature:
r"""
All Feature classes extend this class to implement custom visualizations in
Insights.
It enforces child classes to implement ``visualization_type`` and ``visualize``
methods.
"""
def __init__(
self,
name: str,
baseline_transforms: Optional[Union[Callable, List[Callable]]],
input_transforms: Optional[Union[Callable, List[Callable]]],
visualization_transform: Optional[Callable],
) -> None:
r"""
Args:
name (str): The label of the specific feature. For example, an
ImageFeature's name can be "Photo".
baseline_transforms (list, Callable, optional): Optional list of
callables (e.g. functions) to be called on the input tensor
to construct multiple baselines. Currently only one baseline
is supported. See
:py:class:`.IntegratedGradients` for more
information about baselines.
input_transforms (list, Callable, optional): Optional list of callables
(e.g. functions) called on the input tensor sequentially to
convert it into the format expected by the model.
visualization_transform (Callable, optional): Optional callable (e.g.
function) applied as a postprocessing step of the original
input data (before ``input_transforms``) to convert it to a
format to be understood by the frontend visualizer as
specified in ``captum/captum/insights/frontend/App.js``.
"""
self.name = name
self.baseline_transforms = format_transforms(baseline_transforms)
self.input_transforms = format_transforms(input_transforms)
self.visualization_transform = visualization_transform
@staticmethod
def visualization_type() -> str:
raise NotImplementedError
def visualize(self, attribution, data, contribution_frac) -> FeatureOutput:
raise NotImplementedError
class ImageFeature(BaseFeature):
r"""
ImageFeature is used to visualize image features in Insights. It expects an image in
NCHW format. If C has a dimension of 1, its assumed to be a greyscale image.
If it has a dimension of 3, its expected to be in RGB format.
"""
def __init__(
self,
name: str,
baseline_transforms: Union[Callable, List[Callable]],
input_transforms: Union[Callable, List[Callable]],
visualization_transform: Optional[Callable] = None,
) -> None:
r"""
Args:
name (str): The label of the specific feature. For example, an
ImageFeature's name can be "Photo".
baseline_transforms (list, Callable, optional): Optional list of
callables (e.g. functions) to be called on the input tensor
to construct multiple baselines. Currently only one baseline
is supported. See
:py:class:`.IntegratedGradients` for more
information about baselines.
input_transforms (list, Callable, optional): A list of transforms
or transform to be applied to the input. For images,
normalization is often applied here.
visualization_transform (Callable, optional): Optional callable (e.g.
function) applied as a postprocessing step of the original
input data (before input_transforms) to convert it to a
format to be visualized.
"""
super().__init__(
name,
baseline_transforms=baseline_transforms,
input_transforms=input_transforms,
visualization_transform=visualization_transform,
)
@staticmethod
def visualization_type() -> str:
return "image"
def visualize(self, attribution, data, contribution_frac) -> FeatureOutput:
if self.visualization_transform:
data = self.visualization_transform(data)
data_t, attribution_t = [
t.detach().squeeze().permute((1, 2, 0)).cpu().numpy()
for t in (data, attribution)
]
orig_fig, _ = viz.visualize_image_attr(
attribution_t, data_t, method="original_image", use_pyplot=False
)
attr_fig, _ = viz.visualize_image_attr(
attribution_t,
data_t,
method="heat_map",
sign="absolute_value",
use_pyplot=False,
)
img_64 = _convert_figure_base64(orig_fig)
attr_img_64 = _convert_figure_base64(attr_fig)
return FeatureOutput(
name=self.name,
base=img_64,
modified=attr_img_64,
type=self.visualization_type(),
contribution=contribution_frac,
)
class TextFeature(BaseFeature):
r"""
TextFeature is used to visualize text (e.g. sentences) in Insights.
It expects the visualization transform to convert the input data (e.g. index to
string) to the raw text.
"""
def __init__(
self,
name: str,
baseline_transforms: Union[Callable, List[Callable]],
input_transforms: Union[Callable, List[Callable]],
visualization_transform: Callable,
) -> None:
r"""
Args:
name (str): The label of the specific feature. For example, an
ImageFeature's name can be "Photo".
baseline_transforms (list, Callable, optional): Optional list of
callables (e.g. functions) to be called on the input tensor
to construct multiple baselines. Currently only one baseline
is supported. See
:py:class:`.IntegratedGradients` for more
information about baselines.
For text features, a common baseline is a tensor of indices
corresponding to PAD with the same size as the input
tensor. See :py:class:`.TokenReferenceBase` for more
information.
input_transforms (list, Callable, optional): A list of transforms
or transform to be applied to the input. For text, a common
transform is to convert the tokenized input tensor into an
interpretable embedding. See
:py:class:`.InterpretableEmbeddingBase`
and
:py:func:`~.configure_interpretable_embedding_layer`
for more information.
visualization_transform (Callable, optional): Optional callable (e.g.
function) applied as a postprocessing step of the original
input data (before ``input_transforms``) to convert it to a
suitable format for visualization. For text features,
a common function is to convert the token indices to their
corresponding (sub)words.
"""
super().__init__(
name,
baseline_transforms=baseline_transforms,
input_transforms=input_transforms,
visualization_transform=visualization_transform,
)
@staticmethod
def visualization_type() -> str:
return "text"
def visualize(self, attribution, data, contribution_frac) -> FeatureOutput:
if self.visualization_transform:
text = self.visualization_transform(data)
else:
text = data
attribution = attribution.squeeze(0)
data = data.squeeze(0)
if len(attribution.shape) > 1:
attribution = attribution.sum(dim=1)
# L-Infinity norm, if norm is 0, all attr elements are 0
attr_max = attribution.abs().max()
normalized_attribution = safe_div(attribution, attr_max)
modified = [x * 100 for x in normalized_attribution.tolist()]
return FeatureOutput(
name=self.name,
base=text,
modified=modified,
type=self.visualization_type(),
contribution=contribution_frac,
)
class GeneralFeature(BaseFeature):
r"""
GeneralFeature is used for non-specified feature visualization in Insights.
It can be used for dense or sparse features.
Currently general features are only supported for 2-d tensors, in the format (N, C)
where N is the number of samples and C is the number of categories.
"""
def __init__(self, name: str, categories: List[str]) -> None:
r"""
Args:
name (str): The label of the specific feature. For example, an
ImageFeature's name can be "Photo".
categories (list[str]): Category labels for the general feature. The
order and size should match the second dimension of the
``data`` tensor parameter in ``visualize``.
"""
super().__init__(
name,
baseline_transforms=None,
input_transforms=None,
visualization_transform=None,
)
self.categories = categories
@staticmethod
def visualization_type() -> str:
return "general"
def visualize(self, attribution, data, contribution_frac) -> FeatureOutput:
attribution = attribution.squeeze(0)
data = data.squeeze(0)
# L-2 norm, if norm is 0, all attr elements are 0
l2_norm = attribution.norm()
normalized_attribution = safe_div(attribution, l2_norm)
modified = [x * 100 for x in normalized_attribution.tolist()]
base = [f"{c}: {d:.2f}" for c, d in zip(self.categories, data.tolist())]
return FeatureOutput(
name=self.name,
base=base,
modified=modified,
type=self.visualization_type(),
contribution=contribution_frac,
)
class EmptyFeature(BaseFeature):
def __init__(
self,
name: str = "empty",
baseline_transforms: Optional[Union[Callable, List[Callable]]] = None,
input_transforms: Optional[Union[Callable, List[Callable]]] = None,
visualization_transform: Optional[Callable] = None,
) -> None:
super().__init__(
name,
baseline_transforms=baseline_transforms,
input_transforms=input_transforms,
visualization_transform=visualization_transform,
)
@staticmethod
def visualization_type() -> str:
return "empty"
def visualize(self, _attribution, _data, contribution_frac) -> FeatureOutput:
return FeatureOutput(
name=self.name,
base=None,
modified=None,
type=self.visualization_type(),
contribution=contribution_frac,
)
|
#!/usr/bin/env python3
import inspect
from collections import namedtuple
from typing import (
Callable,
cast,
Dict,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
)
import torch
from captum._utils.common import _run_forward, safe_div
from captum.insights.attr_vis.config import (
ATTRIBUTION_METHOD_CONFIG,
ATTRIBUTION_NAMES_TO_METHODS,
)
from captum.insights.attr_vis.features import BaseFeature
from torch import Tensor
from torch.nn import Module
OutputScore = namedtuple("OutputScore", "score index label")
class AttributionCalculation:
def __init__(
self,
models: Sequence[Module],
classes: Sequence[str],
features: List[BaseFeature],
score_func: Optional[Callable] = None,
use_label_for_attr: bool = True,
) -> None:
self.models = models
self.classes = classes
self.features = features
self.score_func = score_func
self.use_label_for_attr = use_label_for_attr
self.baseline_cache: dict = {}
self.transformed_input_cache: dict = {}
def calculate_predicted_scores(
self, inputs, additional_forward_args, model
) -> Tuple[
List[OutputScore], Optional[List[Tuple[Tensor, ...]]], Tuple[Tensor, ...]
]:
# Check if inputs have cached baselines and transformed inputs
hashable_inputs = tuple(inputs)
if hashable_inputs in self.baseline_cache:
baselines_group = self.baseline_cache[hashable_inputs]
transformed_inputs = self.transformed_input_cache[hashable_inputs]
else:
# Initialize baselines
baseline_transforms_len = 1 # todo support multiple baselines
baselines: List[List[Optional[Tensor]]] = [
[None] * len(self.features) for _ in range(baseline_transforms_len)
]
transformed_inputs = list(inputs)
for feature_i, feature in enumerate(self.features):
transformed_inputs[feature_i] = self._transform(
feature.input_transforms, transformed_inputs[feature_i], True
)
for baseline_i in range(baseline_transforms_len):
if baseline_i > len(feature.baseline_transforms) - 1:
baselines[baseline_i][feature_i] = torch.zeros_like(
transformed_inputs[feature_i]
)
else:
baselines[baseline_i][feature_i] = self._transform(
[feature.baseline_transforms[baseline_i]],
transformed_inputs[feature_i],
True,
)
baselines = cast(List[List[Optional[Tensor]]], baselines)
baselines_group = [tuple(b) for b in baselines]
self.baseline_cache[hashable_inputs] = baselines_group
self.transformed_input_cache[hashable_inputs] = transformed_inputs
outputs = _run_forward(
model,
tuple(transformed_inputs),
additional_forward_args=additional_forward_args,
)
if self.score_func is not None:
outputs = self.score_func(outputs)
if outputs.nelement() == 1:
scores = outputs
predicted = scores.round().to(torch.int)
else:
scores, predicted = outputs.topk(min(4, outputs.shape[-1]))
scores = scores.cpu().squeeze(0)
predicted = predicted.cpu().squeeze(0)
predicted_scores = self._get_labels_from_scores(scores, predicted)
return predicted_scores, baselines_group, tuple(transformed_inputs)
def calculate_attribution(
self,
baselines: Optional[Sequence[Tuple[Tensor, ...]]],
data: Tuple[Tensor, ...],
additional_forward_args: Optional[Tuple[Tensor, ...]],
label: Optional[Union[Tensor]],
attribution_method_name: str,
attribution_arguments: Dict,
model: Module,
) -> Tuple[Tensor, ...]:
attribution_cls = ATTRIBUTION_NAMES_TO_METHODS[attribution_method_name]
attribution_method = attribution_cls(model)
if attribution_method_name in ATTRIBUTION_METHOD_CONFIG:
param_config = ATTRIBUTION_METHOD_CONFIG[attribution_method_name]
if param_config.post_process:
for k, v in attribution_arguments.items():
if k in param_config.post_process:
attribution_arguments[k] = param_config.post_process[k](v)
# TODO support multiple baselines
baseline = baselines[0] if baselines and len(baselines) > 0 else None
label = (
None
if not self.use_label_for_attr or label is None or label.nelement() == 0
else label
)
if "baselines" in inspect.signature(attribution_method.attribute).parameters:
attribution_arguments["baselines"] = baseline
attr = attribution_method.attribute.__wrapped__(
attribution_method, # self
data,
additional_forward_args=additional_forward_args,
target=label,
**attribution_arguments,
)
return attr
def calculate_net_contrib(
self, attrs_per_input_feature: Tuple[Tensor, ...]
) -> List[float]:
# get the net contribution per feature (input)
net_contrib = torch.stack(
[attrib.flatten().sum() for attrib in attrs_per_input_feature]
)
# normalise the contribution, s.t. sum(abs(x_i)) = 1
norm = torch.norm(net_contrib, p=1)
# if norm is 0, all net_contrib elements are 0
net_contrib = safe_div(net_contrib, norm)
return net_contrib.tolist()
def _transform(
self, transforms: Iterable[Callable], inputs: Tensor, batch: bool = False
) -> Tensor:
transformed_inputs = inputs
# TODO support batch size > 1
if batch:
transformed_inputs = inputs.squeeze(0)
for t in transforms:
transformed_inputs = t(transformed_inputs)
if batch:
transformed_inputs = transformed_inputs.unsqueeze(0)
return transformed_inputs
def _get_labels_from_scores(
self, scores: Tensor, indices: Tensor
) -> List[OutputScore]:
pred_scores: List[OutputScore] = []
if indices.nelement() < 2:
return pred_scores
for i in range(len(indices)):
score = scores[i]
pred_scores.append(
OutputScore(score, indices[i], self.classes[int(indices[i])])
)
return pred_scores
|
#!/usr/bin/env python3
import os
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from captum.insights import AttributionVisualizer, Batch
from captum.insights.attr_vis.features import ImageFeature
def get_classes():
classes = [
"Plane",
"Car",
"Bird",
"Cat",
"Deer",
"Dog",
"Frog",
"Horse",
"Ship",
"Truck",
]
return classes
def get_pretrained_model():
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool1 = nn.MaxPool2d(2, 2)
self.pool2 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.relu3 = nn.ReLU()
self.relu4 = nn.ReLU()
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = self.relu3(self.fc1(x))
x = self.relu4(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
pt_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "models/cifar_torchvision.pt")
)
net.load_state_dict(torch.load(pt_path))
return net
def baseline_func(input):
return input * 0
def formatted_data_iter():
dataset = torchvision.datasets.CIFAR10(
root="data/test", train=False, download=True, transform=transforms.ToTensor()
)
dataloader = iter(
torch.utils.data.DataLoader(dataset, batch_size=4, shuffle=False, num_workers=2)
)
while True:
images, labels = next(dataloader)
yield Batch(inputs=images, labels=labels)
def main():
normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
model = get_pretrained_model()
visualizer = AttributionVisualizer(
models=[model],
score_func=lambda o: torch.nn.functional.softmax(o, 1),
classes=get_classes(),
features=[
ImageFeature(
"Photo",
baseline_transforms=[baseline_func],
input_transforms=[normalize],
)
],
dataset=formatted_data_iter(),
)
visualizer.serve(debug=True)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
from collections import namedtuple
from itertools import cycle
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
NamedTuple,
Optional,
Tuple,
Union,
)
import torch
from captum.attr import IntegratedGradients
from captum.attr._utils.batching import _batched_generator
from captum.insights.attr_vis.attribution_calculation import (
AttributionCalculation,
OutputScore,
)
from captum.insights.attr_vis.config import (
ATTRIBUTION_METHOD_CONFIG,
ATTRIBUTION_NAMES_TO_METHODS,
)
from captum.insights.attr_vis.features import BaseFeature
from captum.insights.attr_vis.server import namedtuple_to_dict
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
_CONTEXT_COLAB = "_CONTEXT_COLAB"
_CONTEXT_IPYTHON = "_CONTEXT_IPYTHON"
_CONTEXT_NONE = "_CONTEXT_NONE"
def _get_context():
"""Determine the most specific context that we're in.
Implementation from TensorBoard: https://git.io/JvObD.
Returns:
_CONTEXT_COLAB: If in Colab with an IPython notebook context.
_CONTEXT_IPYTHON: If not in Colab, but we are in an IPython notebook
context (e.g., from running `jupyter notebook` at the command
line).
_CONTEXT_NONE: Otherwise (e.g., by running a Python script at the
command-line or using the `ipython` interactive shell).
"""
# In Colab, the `google.colab` module is available, but the shell
# returned by `IPython.get_ipython` does not have a `get_trait`
# method.
try:
import google.colab # noqa: F401
import IPython
except ImportError:
pass
else:
if IPython.get_ipython() is not None:
# We'll assume that we're in a Colab notebook context.
return _CONTEXT_COLAB
# In an IPython command line shell or Jupyter notebook, we can
# directly query whether we're in a notebook context.
try:
import IPython
except ImportError:
pass
else:
ipython = IPython.get_ipython()
if ipython is not None and ipython.has_trait("kernel"):
return _CONTEXT_IPYTHON
# Otherwise, we're not in a known notebook context.
return _CONTEXT_NONE
VisualizationOutput = namedtuple(
"VisualizationOutput", "feature_outputs actual predicted active_index model_index"
)
Contribution = namedtuple("Contribution", "name percent")
SampleCache = namedtuple("SampleCache", "inputs additional_forward_args label")
class FilterConfig(NamedTuple):
attribution_method: str = IntegratedGradients.get_name()
# issue with mypy github.com/python/mypy/issues/8376
attribution_arguments: Dict[str, Any] = {
arg: config.value # type: ignore
for arg, config in ATTRIBUTION_METHOD_CONFIG[
IntegratedGradients.get_name()
].params.items()
}
prediction: str = "all"
classes: List[str] = []
num_examples: int = 4
class Batch:
def __init__(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
labels: Optional[Tensor],
additional_args=None,
) -> None:
r"""
Constructs batch of inputs to be attributed and visualized.
Args:
inputs (Tensor or tuple[Tensor, ...]): Batch of inputs for a model.
These may be either a Tensor or tuple of tensors. Each tensor
must correspond to a feature for AttributionVisualizer, and
the corresponding input transform function of the feature
is applied to each input tensor prior to passing it to the
model. It is assumed that the first dimension of each
input tensor corresponds to the number of examples
(batch size) and is aligned for all input tensors.
labels (Tensor): Tensor containing correct labels for input examples.
This must be a 1D tensor with length matching the first
dimension of each input tensor.
additional_args (tuple, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to ``forward_func`` in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples.
"""
self.inputs = inputs
self.labels = labels
self.additional_args = additional_args
class AttributionVisualizer:
def __init__(
self,
models: Union[List[Module], Module],
classes: List[str],
features: Union[List[BaseFeature], BaseFeature],
dataset: Iterable[Batch],
score_func: Optional[Callable] = None,
use_label_for_attr: bool = True,
) -> None:
r"""
Args:
models (torch.nn.Module): One or more PyTorch modules (models) for
attribution visualization.
classes (list[str]): List of strings corresponding to the names of
classes for classification.
features (list[BaseFeature]): List of BaseFeatures, which correspond
to input arguments to the model. Each feature object defines
relevant transformations for converting to model input,
constructing baselines, and visualizing. The length of the
features list should exactly match the number of (tensor)
arguments expected by the given model.
For instance, an image classifier should only provide
a single BaseFeature, while a multimodal classifier may
provide a list of features, each corresponding to a different
tensor input and potentially different modalities.
dataset (Iterable of Batch): Defines the dataset to visualize attributions
for. This must be an iterable of batch objects, each of which
may contain multiple input examples.
score_func (Callable, optional): This function is applied to the model
output to obtain the score for each class. For instance,
this function could be the softmax or final non-linearity
of the network, applied to the model output. The indices
of the second dimension of the output should correspond
to the class names provided. If None, the model outputs
are taken directly and assumed to correspond to the
class scores.
Default: None
use_label_for_attr (bool, optional): If true, the class index is passed
to the relevant attribution method. This is necessary in most
cases where there is an output neuron corresponding to each
class. When the model output is a scalar and class index
(e.g. positive, negative) is inferred from the output value,
this argument should be False.
Default: True
"""
if not isinstance(models, List):
models = [models]
if not isinstance(features, List):
features = [features]
self.classes = classes
self.features = features
self.dataset = dataset
self.models = models
self.attribution_calculation = AttributionCalculation(
models, classes, features, score_func, use_label_for_attr
)
self._outputs: List[VisualizationOutput] = []
self._config = FilterConfig(prediction="all", classes=[], num_examples=4)
self._dataset_iter = iter(dataset)
self._dataset_cache: List[Batch] = []
def _calculate_attribution_from_cache(
self, input_index: int, model_index: int, target: Optional[Tensor]
) -> Optional[VisualizationOutput]:
c = self._outputs[input_index][1]
result = self._calculate_vis_output(
c.inputs,
c.additional_forward_args,
c.label,
torch.tensor(target),
model_index,
)
if not result:
return None
return result[0]
def _update_config(self, settings):
self._config = FilterConfig(
attribution_method=settings["attribution_method"],
attribution_arguments=settings["arguments"],
prediction=settings["prediction"],
classes=settings["classes"],
num_examples=4,
)
@log_usage()
def render(self, debug=True):
from captum.insights.attr_vis.widget import CaptumInsights
from IPython.display import display
widget = CaptumInsights(visualizer=self)
display(widget)
if debug:
display(widget.out)
@log_usage()
def serve(self, blocking=False, debug=False, port=None, bind_all=False):
context = _get_context()
if context == _CONTEXT_COLAB:
return self._serve_colab(blocking=blocking, debug=debug, port=port)
else:
return self._serve(
blocking=blocking, debug=debug, port=port, bind_all=bind_all
)
def _serve(self, blocking=False, debug=False, port=None, bind_all=False):
from captum.insights.attr_vis.server import start_server
return start_server(
self, blocking=blocking, debug=debug, _port=port, bind_all=bind_all
)
def _serve_colab(self, blocking=False, debug=False, port=None):
import ipywidgets as widgets
from captum.insights.attr_vis.server import start_server
from IPython.display import display, HTML
# TODO: Output widget only captures beginning of server logs. It seems
# the context manager isn't respected when the web server is run on a
# separate thread. We should fix to display entirety of the logs
out = widgets.Output()
with out:
port = start_server(self, blocking=blocking, debug=debug, _port=port)
shell = """
<div id="root"></div>
<script>
(function() {
document.querySelector("base").href = "http://localhost:%PORT%";
function reloadScriptsAndCSS(root) {
// Referencing TensorBoard's method for reloading scripts,
// we remove and reinsert each script
for (const script of root.querySelectorAll("script")) {
const newScript = document.createElement("script");
newScript.type = script.type;
if (script.src) {
newScript.src = script.src;
}
if (script.textContent) {
newScript.textContent = script.textContent;
}
root.appendChild(newScript);
script.remove();
}
// A similar method is used to reload styles
for (const link of root.querySelectorAll("link")) {
const newLink = document.createElement("link");
newLink.rel = link.rel;
newLink.href = link.href;
document.querySelector("head").appendChild(newLink);
link.remove();
}
}
const root = document.getElementById("root");
fetch(".")
.then(x => x.text())
.then(html => void (root.innerHTML = html))
.then(() => reloadScriptsAndCSS(root));
})();
</script>
""".replace(
"%PORT%", str(port)
)
html = HTML(shell)
display(html)
display(out)
def _predictions_matches_labels(
self, predicted_scores: List[OutputScore], labels: Union[str, List[str]]
) -> bool:
if len(predicted_scores) == 0:
return False
predicted_label = predicted_scores[0].label
if isinstance(labels, List):
return predicted_label in labels
return labels == predicted_label
def _should_keep_prediction(
self, predicted_scores: List[OutputScore], actual_label: Optional[OutputScore]
) -> bool:
# filter by class
if len(self._config.classes) != 0:
if not self._predictions_matches_labels(
predicted_scores, self._config.classes
):
return False
if not actual_label:
return True
# filter by accuracy
label_name = actual_label.label
if self._config.prediction == "all":
pass
elif self._config.prediction == "correct":
if not self._predictions_matches_labels(predicted_scores, label_name):
return False
elif self._config.prediction == "incorrect":
if self._predictions_matches_labels(predicted_scores, label_name):
return False
else:
raise Exception(f"Invalid prediction config: {self._config.prediction}")
return True
def _calculate_vis_output(
self,
inputs,
additional_forward_args,
label,
target=None,
single_model_index=None,
) -> Optional[List[VisualizationOutput]]:
# Use all models, unless the user wants to render data for a particular one
models_used = (
[self.models[single_model_index]]
if single_model_index is not None
else self.models
)
results = []
for model_index, model in enumerate(models_used):
# Get list of model visualizations for each input
actual_label_output = None
if label is not None and len(label) > 0:
label_index = int(label[0])
actual_label_output = OutputScore(
score=100, index=label_index, label=self.classes[label_index]
)
(
predicted_scores,
baselines,
transformed_inputs,
) = self.attribution_calculation.calculate_predicted_scores(
inputs, additional_forward_args, model
)
# Filter based on UI configuration
if actual_label_output is None or not self._should_keep_prediction(
predicted_scores, actual_label_output
):
continue
if target is None:
target = (
predicted_scores[0].index if len(predicted_scores) > 0 else None
)
# attributions are given per input*
# inputs given to the model are described via `self.features`
#
# *an input contains multiple features that represent it
# e.g. all the pixels that describe an image is an input
attrs_per_feature = self.attribution_calculation.calculate_attribution(
baselines,
transformed_inputs,
additional_forward_args,
target,
self._config.attribution_method,
self._config.attribution_arguments,
model,
)
net_contrib = self.attribution_calculation.calculate_net_contrib(
attrs_per_feature
)
# the features per input given
features_per_input = [
feature.visualize(attr, data, contrib)
for feature, attr, data, contrib in zip(
self.features, attrs_per_feature, inputs, net_contrib
)
]
results.append(
VisualizationOutput(
feature_outputs=features_per_input,
actual=actual_label_output,
predicted=predicted_scores,
active_index=target
if target is not None
else actual_label_output.index,
# Even if we only iterated over one model, the index should be fixed
# to show the index the model would have had in the list
model_index=single_model_index
if single_model_index is not None
else model_index,
)
)
return results if results else None
def _get_outputs(self) -> List[Tuple[List[VisualizationOutput], SampleCache]]:
# If we run out of new batches, then we need to
# display data which was already shown before.
# However, since the dataset given to us is a generator,
# we can't reset it to return to the beginning.
# Because of this, we store a small cache of stale
# data, and iterate on it after the main generator
# stops returning new batches.
try:
batch_data = next(self._dataset_iter)
self._dataset_cache.append(batch_data)
if len(self._dataset_cache) > self._config.num_examples:
self._dataset_cache.pop(0)
except StopIteration:
self._dataset_iter = cycle(self._dataset_cache)
batch_data = next(self._dataset_iter)
vis_outputs = []
# Type ignore for issue with passing union to function taking generic
# https://github.com/python/mypy/issues/1533
for (
inputs,
additional_forward_args,
label,
) in _batched_generator( # type: ignore
inputs=batch_data.inputs,
additional_forward_args=batch_data.additional_args,
target_ind=batch_data.labels,
internal_batch_size=1, # should be 1 until we have batch label support
):
output = self._calculate_vis_output(inputs, additional_forward_args, label)
if output is not None:
cache = SampleCache(inputs, additional_forward_args, label)
vis_outputs.append((output, cache))
return vis_outputs
@log_usage()
def visualize(self):
self._outputs = []
while len(self._outputs) < self._config.num_examples:
self._outputs.extend(self._get_outputs())
return [o[0] for o in self._outputs]
def get_insights_config(self):
return {
"classes": self.classes,
"methods": list(ATTRIBUTION_NAMES_TO_METHODS.keys()),
"method_arguments": namedtuple_to_dict(
{k: v.params for (k, v) in ATTRIBUTION_METHOD_CONFIG.items()}
),
"selected_method": self._config.attribution_method,
}
|
#!/usr/bin/env python3
from typing import Callable, List, Optional, Union
def format_transforms(
transforms: Optional[Union[Callable, List[Callable]]]
) -> List[Callable]:
if transforms is None:
return []
if callable(transforms):
return [transforms]
return transforms
|
#!/usr/bin/env python3
import ipywidgets as widgets
from captum.insights import AttributionVisualizer
from captum.insights.attr_vis.server import namedtuple_to_dict
from traitlets import Dict, Instance, List, observe, Unicode
@widgets.register
class CaptumInsights(widgets.DOMWidget):
"""A widget for interacting with Captum Insights."""
_view_name = Unicode("CaptumInsightsView").tag(sync=True)
_model_name = Unicode("CaptumInsightsModel").tag(sync=True)
_view_module = Unicode("jupyter-captum-insights").tag(sync=True)
_model_module = Unicode("jupyter-captum-insights").tag(sync=True)
_view_module_version = Unicode("^0.1.0").tag(sync=True)
_model_module_version = Unicode("^0.1.0").tag(sync=True)
visualizer = Instance(klass=AttributionVisualizer)
insights_config = Dict().tag(sync=True)
label_details = Dict().tag(sync=True)
attribution = Dict().tag(sync=True)
config = Dict().tag(sync=True)
output = List().tag(sync=True)
def __init__(self, **kwargs) -> None:
super(CaptumInsights, self).__init__(**kwargs)
self.insights_config = self.visualizer.get_insights_config()
self.out = widgets.Output()
with self.out:
print("Captum Insights widget created.")
@observe("config")
def _fetch_data(self, change):
if not self.config:
return
with self.out:
self.visualizer._update_config(self.config)
self.output = namedtuple_to_dict(self.visualizer.visualize())
self.config = dict()
@observe("label_details")
def _fetch_attribution(self, change):
if not self.label_details:
return
with self.out:
self.attribution = namedtuple_to_dict(
self.visualizer._calculate_attribution_from_cache(
self.label_details["inputIndex"],
self.label_details["modelIndex"],
self.label_details["labelIndex"],
)
)
self.label_details = dict()
|
version_info = (0, 1, 0, "alpha", 0)
_specifier_ = {"alpha": "a", "beta": "b", "candidate": "rc", "final": ""}
__version__ = "%s.%s.%s%s" % (
version_info[0],
version_info[1],
version_info[2],
""
if version_info[3] == "final"
else _specifier_[version_info[3]] + str(version_info[4]),
)
|
from captum.insights.attr_vis.widget._version import __version__, version_info # noqa
from captum.insights.attr_vis.widget.widget import * # noqa
def _jupyter_nbextension_paths():
return [
{
"section": "notebook",
"src": "static",
"dest": "jupyter-captum-insights",
"require": "jupyter-captum-insights/extension",
}
]
|
#!/usr/bin/env python3
from captum.robust._core.fgsm import FGSM # noqa
from captum.robust._core.metrics.attack_comparator import AttackComparator # noqa
from captum.robust._core.metrics.min_param_perturbation import ( # noqa
MinParamPerturbation,
)
from captum.robust._core.perturbation import Perturbation # noqa
from captum.robust._core.pgd import PGD # noqa
|
#!/usr/bin/env python3
from typing import Any, Callable, Optional, Tuple, Union
import torch
from captum._utils.common import (
_format_additional_forward_args,
_format_output,
_format_tensor_into_tuples,
_is_tuple,
_select_targets,
)
from captum._utils.gradient import (
apply_gradient_requirements,
compute_gradients,
undo_gradient_requirements,
)
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.log import log_usage
from captum.robust._core.perturbation import Perturbation
from torch import Tensor
class FGSM(Perturbation):
r"""
Fast Gradient Sign Method is a one-step method that can generate
adversarial examples.
For non-targeted attack, the formulation is::
x' = x + epsilon * sign(gradient of L(theta, x, y))
For targeted attack on t, the formulation is::
x' = x - epsilon * sign(gradient of L(theta, x, t))
``L(theta, x, y)`` is the model's loss function with respect to model
parameters, inputs and labels.
More details on Fast Gradient Sign Method can be found in the original
paper: https://arxiv.org/abs/1412.6572
"""
def __init__(
self,
forward_func: Callable,
loss_func: Optional[Callable] = None,
lower_bound: float = float("-inf"),
upper_bound: float = float("inf"),
) -> None:
r"""
Args:
forward_func (Callable): The pytorch model for which the attack is
computed.
loss_func (Callable, optional): Loss function of which the gradient
computed. The loss function should take in outputs of the
model and labels, and return a loss tensor.
The default loss function is negative log.
lower_bound (float, optional): Lower bound of input values.
Default: ``float("-inf")``
upper_bound (float, optional): Upper bound of input values.
e.g. image pixels must be in the range 0-255
Default: ``float("inf")``
Attributes:
bound (Callable): A function that bounds the input values based on
given lower_bound and upper_bound. Can be overwritten for
custom use cases if necessary.
zero_thresh (float): The threshold below which gradient will be treated
as zero. Can be modified for custom use cases if necessary.
"""
super().__init__()
self.forward_func = forward_func
self.loss_func = loss_func
self.bound = lambda x: torch.clamp(x, min=lower_bound, max=upper_bound)
self.zero_thresh = 10**-6
@log_usage()
def perturb(
self,
inputs: TensorOrTupleOfTensorsGeneric,
epsilon: float,
target: Any,
additional_forward_args: Any = None,
targeted: bool = False,
mask: Optional[TensorOrTupleOfTensorsGeneric] = None,
) -> TensorOrTupleOfTensorsGeneric:
r"""
This method computes and returns the perturbed input for each input tensor.
It supports both targeted and non-targeted attacks.
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which adversarial
attack is computed. It can be provided as a single
tensor or a tuple of multiple tensors. If multiple
input tensors are provided, the batch sizes must be
aligned across all tensors.
epsilon (float): Step size of perturbation.
target (Any): True labels of inputs if non-targeted attack is
desired. Target class of inputs if targeted attack
is desired. Target will be passed to the loss function
to compute loss, so the type needs to match the
argument type of the loss function.
If using the default negative log as loss function,
labels should be of type int, tuple, tensor or list.
For general 2D outputs, labels can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the label for the corresponding example.
For outputs with > 2 dimensions, labels can be either:
- A single tuple, which contains #output_dims - 1
elements. This label index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
label for the corresponding example.
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. These arguments are provided to
forward_func in order following the arguments in inputs.
Default: None.
targeted (bool, optional): If attack should be targeted.
Default: False.
mask (Tensor or tuple[Tensor, ...], optional): mask of zeroes and ones
that defines which elements within the input tensor(s) are
perturbed. This mask must have the same shape and
dimensionality as the inputs. If this argument is not
provided, all elements will be perturbed.
Default: None.
Returns:
- **perturbed inputs** (*Tensor* or *tuple[Tensor, ...]*):
Perturbed input for each
input tensor. The perturbed inputs have the same shape and
dimensionality as the inputs.
If a single tensor is provided as inputs, a single tensor
is returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
"""
is_inputs_tuple = _is_tuple(inputs)
inputs: Tuple[Tensor, ...] = _format_tensor_into_tuples(inputs)
masks: Union[Tuple[int, ...], Tuple[Tensor, ...]] = (
_format_tensor_into_tuples(mask)
if (mask is not None)
else (1,) * len(inputs)
)
gradient_mask = apply_gradient_requirements(inputs)
def _forward_with_loss() -> Tensor:
additional_inputs = _format_additional_forward_args(additional_forward_args)
outputs = self.forward_func( # type: ignore
*(*inputs, *additional_inputs) # type: ignore
if additional_inputs is not None
else inputs
)
if self.loss_func is not None:
return self.loss_func(outputs, target)
else:
loss = -torch.log(outputs)
return _select_targets(loss, target)
grads = compute_gradients(_forward_with_loss, inputs)
undo_gradient_requirements(inputs, gradient_mask)
perturbed_inputs = self._perturb(inputs, grads, epsilon, targeted, masks)
perturbed_inputs = tuple(
self.bound(perturbed_inputs[i]) for i in range(len(perturbed_inputs))
)
return _format_output(is_inputs_tuple, perturbed_inputs)
def _perturb(
self,
inputs: Tuple,
grads: Tuple,
epsilon: float,
targeted: bool,
masks: Tuple,
) -> Tuple:
r"""
A helper function to calculate the perturbed inputs given original
inputs, gradient of loss function and epsilon. The calculation is
different for targeted v.s. non-targeted as described above.
"""
multiplier = -1 if targeted else 1
inputs = tuple(
torch.where(
torch.abs(grad) > self.zero_thresh,
inp + multiplier * epsilon * torch.sign(grad) * mask,
inp,
)
for grad, inp, mask in zip(grads, inputs, masks)
)
return inputs
|
#!/usr/bin/env python3
from typing import Any, Callable, Optional, Tuple, Union
import torch
import torch.nn.functional as F
from captum._utils.common import _format_output, _format_tensor_into_tuples, _is_tuple
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.log import log_usage
from captum.robust._core.fgsm import FGSM
from captum.robust._core.perturbation import Perturbation
from torch import Tensor
class PGD(Perturbation):
r"""
Projected Gradient Descent is an iterative version of the one-step attack
FGSM that can generate adversarial examples. It takes multiple gradient
steps to search for an adversarial perturbation within the desired
neighbor ball around the original inputs. In a non-targeted attack, the
formulation is::
x_0 = x
x_(t+1) = Clip_r(x_t + alpha * sign(gradient of L(theta, x, t)))
where Clip denotes the function that projects its argument to the r-neighbor
ball around x so that the perturbation will be bounded. Alpha is the step
size. L(theta, x, y) is the model's loss function with respect to model
parameters, inputs and targets.
In a targeted attack, the formulation is similar::
x_0 = x
x_(t+1) = Clip_r(x_t - alpha * sign(gradient of L(theta, x, t)))
More details on Projected Gradient Descent can be found in the original
paper: https://arxiv.org/abs/1706.06083
"""
def __init__(
self,
forward_func: Callable,
loss_func: Callable = None,
lower_bound: float = float("-inf"),
upper_bound: float = float("inf"),
) -> None:
r"""
Args:
forward_func (Callable): The pytorch model for which the attack is
computed.
loss_func (Callable, optional): Loss function of which the gradient
computed. The loss function should take in outputs of the
model and labels, and return the loss for each input tensor.
The default loss function is negative log.
lower_bound (float, optional): Lower bound of input values.
Default: ``float("-inf")``
upper_bound (float, optional): Upper bound of input values.
e.g. image pixels must be in the range 0-255
Default: ``float("inf")``
Attributes:
bound (Callable): A function that bounds the input values based on
given lower_bound and upper_bound. Can be overwritten for
custom use cases if necessary.
"""
super().__init__()
self.forward_func = forward_func
self.fgsm = FGSM(forward_func, loss_func)
self.bound = lambda x: torch.clamp(x, min=lower_bound, max=upper_bound)
@log_usage()
def perturb(
self,
inputs: TensorOrTupleOfTensorsGeneric,
radius: float,
step_size: float,
step_num: int,
target: Any,
additional_forward_args: Any = None,
targeted: bool = False,
random_start: bool = False,
norm: str = "Linf",
mask: Optional[TensorOrTupleOfTensorsGeneric] = None,
) -> TensorOrTupleOfTensorsGeneric:
r"""
This method computes and returns the perturbed input for each input tensor.
It supports both targeted and non-targeted attacks.
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which adversarial
attack is computed. It can be provided as a single
tensor or a tuple of multiple tensors. If multiple
input tensors are provided, the batch sizes must be
aligned across all tensors.
radius (float): Radius of the neighbor ball centered around inputs.
The perturbation should be within this range.
step_size (float): Step size of each gradient step.
step_num (int): Step numbers. It usually guarantees that the perturbation
can reach the border.
target (Any): True labels of inputs if non-targeted attack is
desired. Target class of inputs if targeted attack
is desired. Target will be passed to the loss function
to compute loss, so the type needs to match the
argument type of the loss function.
If using the default negative log as loss function,
labels should be of type int, tuple, tensor or list.
For general 2D outputs, labels can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the label for the corresponding example.
For outputs with > 2 dimensions, labels can be either:
- A single tuple, which contains #output_dims - 1
elements. This label index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
label for the corresponding example.
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. These arguments are provided to
forward_func in order following the arguments in inputs.
Default: ``None``
targeted (bool, optional): If attack should be targeted.
Default: ``False``
random_start (bool, optional): If a random initialization is added to
inputs. Default: ``False``
norm (str, optional): Specifies the norm to calculate distance from
original inputs: ``Linf`` | ``L2``.
Default: ``Linf``
mask (Tensor or tuple[Tensor, ...], optional): mask of zeroes and ones
that defines which elements within the input tensor(s) are
perturbed. This mask must have the same shape and
dimensionality as the inputs. If this argument is not
provided, all elements are perturbed.
Default: None.
Returns:
- **perturbed inputs** (*Tensor* or *tuple[Tensor, ...]*):
Perturbed input for each
input tensor. The perturbed inputs have the same shape and
dimensionality as the inputs.
If a single tensor is provided as inputs, a single tensor
is returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
"""
def _clip(inputs: Tensor, outputs: Tensor) -> Tensor:
diff = outputs - inputs
if norm == "Linf":
return inputs + torch.clamp(diff, -radius, radius)
elif norm == "L2":
return inputs + torch.renorm(diff, 2, 0, radius)
else:
raise AssertionError("Norm constraint must be L2 or Linf.")
is_inputs_tuple = _is_tuple(inputs)
formatted_inputs = _format_tensor_into_tuples(inputs)
formatted_masks: Union[Tuple[int, ...], Tuple[Tensor, ...]] = (
_format_tensor_into_tuples(mask)
if (mask is not None)
else (1,) * len(formatted_inputs)
)
perturbed_inputs = formatted_inputs
if random_start:
perturbed_inputs = tuple(
self.bound(
self._random_point(
formatted_inputs[i], radius, norm, formatted_masks[i]
)
)
for i in range(len(formatted_inputs))
)
for _i in range(step_num):
perturbed_inputs = self.fgsm.perturb(
perturbed_inputs,
step_size,
target,
additional_forward_args,
targeted,
formatted_masks,
)
perturbed_inputs = tuple(
_clip(formatted_inputs[j], perturbed_inputs[j])
for j in range(len(perturbed_inputs))
)
# Detaching inputs to avoid dependency of gradient between steps
perturbed_inputs = tuple(
self.bound(perturbed_inputs[j]).detach()
for j in range(len(perturbed_inputs))
)
return _format_output(is_inputs_tuple, perturbed_inputs)
def _random_point(
self, center: Tensor, radius: float, norm: str, mask: Union[Tensor, int]
) -> Tensor:
r"""
A helper function that returns a uniform random point within the ball
with the given center and radius. Norm should be either L2 or Linf.
"""
if norm == "L2":
u = torch.randn_like(center)
unit_u = F.normalize(u.view(u.size(0), -1)).view(u.size())
d = torch.numel(center[0])
r = (torch.rand(u.size(0)) ** (1.0 / d)) * radius
r = r[(...,) + (None,) * (r.dim() - 1)]
x = r * unit_u
return center + (x * mask)
elif norm == "Linf":
x = torch.rand_like(center) * radius * 2 - radius
return center + (x * mask)
else:
raise AssertionError("Norm constraint must be L2 or Linf.")
|
#!/usr/bin/env python3
from typing import Callable
class Perturbation:
r"""
All perturbation and attack algorithms extend this class. It enforces
its child classes to extend and override core `perturb` method.
"""
perturb: Callable
r"""
This method computes and returns the perturbed input for each input tensor.
Deriving classes are responsible for implementing its logic accordingly.
Specific adversarial attack algorithms that extend this class take relevant
arguments.
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which adversarial attack
is computed. It can be provided as a single tensor or
a tuple of multiple tensors. If multiple input tensors
are provided, the batch sizes must be aligned across all
tensors.
Returns:
- **perturbed inputs** (*Tensor* or *tuple[Tensor, ...]*):
Perturbed input for each
input tensor. The perturbed inputs have the same shape and
dimensionality as the inputs.
If a single tensor is provided as inputs, a single tensor
is returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
"""
def __call__(self, *args, **kwargs):
return self.perturb(*args, **kwargs)
|
#!/usr/bin/env python3
import warnings
from collections import namedtuple
from typing import (
Any,
Callable,
cast,
Dict,
Generic,
List,
NamedTuple,
Optional,
Tuple,
TypeVar,
Union,
)
from captum._utils.common import (
_expand_additional_forward_args,
_format_additional_forward_args,
_reduce_list,
)
from captum.attr import Max, Mean, Min, Summarizer
from captum.log import log_usage
from captum.robust._core.perturbation import Perturbation
from torch import Tensor
ORIGINAL_KEY = "Original"
MetricResultType = TypeVar(
"MetricResultType", float, Tensor, Tuple[Union[float, Tensor], ...]
)
class AttackInfo(NamedTuple):
attack_fn: Union[Perturbation, Callable]
name: str
num_attempts: int
apply_before_preproc: bool
attack_kwargs: Dict[str, Any]
additional_args: List[str]
def agg_metric(inp):
if isinstance(inp, Tensor):
return inp.mean(dim=0)
elif isinstance(inp, tuple):
return tuple(agg_metric(elem) for elem in inp)
return inp
class AttackComparator(Generic[MetricResultType]):
r"""
Allows measuring model robustness for a given attack or set of attacks. This class
can be used with any metric(s) as well as any set of attacks, either based on
attacks / perturbations from captum.robust such as FGSM or PGD or external
augmentation methods or perturbations such as torchvision transforms.
"""
def __init__(
self,
forward_func: Callable,
metric: Callable[..., MetricResultType],
preproc_fn: Optional[Callable] = None,
) -> None:
r"""
Args:
forward_func (Callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of a model's forward
function.
metric (Callable): This function is applied to the model output in
order to compute the desired performance metric or metrics.
This function should have the following signature::
>>> def model_metric(model_out: Tensor, **kwargs: Any)
>>> -> Union[float, Tensor, Tuple[Union[float, Tensor], ...]:
All kwargs provided to evaluate are provided to the metric function,
following the model output. A single metric can be returned as
a float or tensor, and multiple metrics should be returned as either
a tuple or named tuple of floats or tensors. For a tensor metric,
the first dimension should match the batch size, corresponding to
metrics for each example. Tensor metrics are averaged over the first
dimension when aggregating multiple batch results.
If tensor metrics represent results for the full batch, the size of the
first dimension should be 1.
preproc_fn (Callable, optional): Optional method applied to inputs. Output
of preproc_fn is then provided as input to model, in addition to
additional_forward_args provided to evaluate.
Default: ``None``
"""
self.forward_func = forward_func
self.metric: Callable = metric
self.preproc_fn = preproc_fn
self.attacks: Dict[str, AttackInfo] = {}
self.summary_results: Dict[str, Summarizer] = {}
self.metric_aggregator = agg_metric
self.batch_stats = [Mean, Min, Max]
self.aggregate_stats = [Mean]
self.summary_results = {}
self.out_format = None
def add_attack(
self,
attack: Union[Perturbation, Callable],
name: Optional[str] = None,
num_attempts: int = 1,
apply_before_preproc: bool = True,
attack_kwargs: Optional[Dict[str, Any]] = None,
additional_attack_arg_names: Optional[List[str]] = None,
) -> None:
r"""
Adds attack to be evaluated when calling evaluate.
Args:
attack (Perturbation or Callable): This can either be an instance
of a Captum Perturbation / Attack
or any other perturbation or attack function such
as a torchvision transform.
name (str, optional): Name or identifier for attack, used as key for
attack results. This defaults to attack.__class__.__name__
if not provided and must be unique for all added attacks.
Default: ``None``
num_attempts (int, optional): Number of attempts that attack should be
repeated. This should only be set to > 1 for non-deterministic
attacks. The minimum, maximum, and average (best, worst, and
average case) are tracked for attack attempts.
Default: ``1``
apply_before_preproc (bool, optional): Defines whether attack should be
applied before or after preproc function.
Default: ``True``
attack_kwargs (dict, optional): Additional arguments to be provided to
given attack. This should be provided as a dictionary of keyword
arguments.
Default: ``None``
additional_attack_arg_names (list[str], optional): Any additional
arguments for the attack which are specific to the particular input
example or batch. An example of this is target, which is necessary
for some attacks such as FGSM or PGD. These arguments are included
if provided as a kwarg to evaluate.
Default: ``None``
"""
if name is None:
name = attack.__class__.__name__
if attack_kwargs is None:
attack_kwargs = {}
if additional_attack_arg_names is None:
additional_attack_arg_names = []
if name in self.attacks:
raise RuntimeError(
"Cannot add attack with same name as existing attack {}".format(name)
)
self.attacks[name] = AttackInfo(
attack_fn=attack,
name=name,
num_attempts=num_attempts,
apply_before_preproc=apply_before_preproc,
attack_kwargs=attack_kwargs,
additional_args=additional_attack_arg_names,
)
def _format_summary(
self, summary: Union[Dict, List[Dict]]
) -> Dict[str, MetricResultType]:
r"""
This method reformats a given summary; particularly for tuples,
the Summarizer's summary format is a list of dictionaries,
each containing the summary for the corresponding elements.
We reformat this to return a dictionary with tuples containing
the summary results.
"""
if isinstance(summary, dict):
return summary
else:
summary_dict: Dict[str, Tuple] = {}
for key in summary[0]:
summary_dict[key] = tuple(s[key] for s in summary)
if self.out_format:
summary_dict[key] = self.out_format(*summary_dict[key])
return summary_dict # type: ignore
def _update_out_format(
self, out_metric: Union[float, Tensor, Tuple[Union[float, Tensor], ...]]
) -> None:
if (
not self.out_format
and isinstance(out_metric, tuple)
and hasattr(out_metric, "_fields")
):
self.out_format = namedtuple( # type: ignore
type(out_metric).__name__, cast(NamedTuple, out_metric)._fields
)
def _evaluate_batch(
self,
input_list: List[Any],
additional_forward_args: Optional[Tuple],
key_list: List[str],
batch_summarizers: Dict[str, Summarizer],
metric_kwargs: Dict[str, Any],
) -> None:
if additional_forward_args is None:
additional_forward_args = ()
if len(input_list) == 1:
model_out = self.forward_func(input_list[0], *additional_forward_args)
out_metric = self.metric(model_out, **metric_kwargs)
self._update_out_format(out_metric)
batch_summarizers[key_list[0]].update(out_metric)
else:
batched_inps = _reduce_list(input_list)
model_out = self.forward_func(batched_inps, *additional_forward_args)
current_count = 0
for i in range(len(input_list)):
batch_size = (
input_list[i].shape[0]
if isinstance(input_list[i], Tensor)
else input_list[i][0].shape[0]
)
out_metric = self.metric(
model_out[current_count : current_count + batch_size],
**metric_kwargs,
)
self._update_out_format(out_metric)
batch_summarizers[key_list[i]].update(out_metric)
current_count += batch_size
@log_usage()
def evaluate(
self,
inputs: Any,
additional_forward_args: Any = None,
perturbations_per_eval: int = 1,
**kwargs,
) -> Dict[str, Union[MetricResultType, Dict[str, MetricResultType]]]:
r"""
Evaluate model and attack performance on provided inputs
Args:
inputs (Any): Input for which attack metrics
are computed. It can be provided as a tensor, tuple of tensors,
or any raw input type (e.g. PIL image or text string).
This input is provided directly as input to preproc function as well
as any attack applied before preprocessing. If no pre-processing
function is provided, this input is provided directly to the main
model and all attacks.
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the preprocessing
outputs (or inputs if preproc_fn is None), this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. For all other types,
the given argument is used for all forward evaluations.
Default: ``None``
perturbations_per_eval (int, optional): Allows perturbations of multiple
attacks to be grouped and evaluated in one call of forward_fn
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
In order to apply this functionality, the output of preproc_fn
(or inputs itself if no preproc_fn is provided) must be a tensor
or tuple of tensors.
Default: ``1``
kwargs (Any, optional): Additional keyword arguments provided to metric
function as well as selected attacks based on chosen additional_args.
Default: ``None``
Returns:
- **attack results** Dict: str -> Dict[str, Union[Tensor, Tuple[Tensor, ...]]]:
Dictionary containing attack results for provided batch.
Maps attack name to dictionary,
containing best-case, worst-case and average-case results for attack.
Dictionary contains keys "mean", "max" and "min" when num_attempts > 1
and only "mean" for num_attempts = 1, which contains the (single) metric
result for the attack attempt.
An additional key of 'Original' is included with metric results
without any perturbations.
Examples::
>>> def accuracy_metric(model_out: Tensor, targets: Tensor):
>>> return torch.argmax(model_out, dim=1) == targets).float()
>>> attack_metric = AttackComparator(model=resnet18,
metric=accuracy_metric,
preproc_fn=normalize)
>>> random_rotation = transforms.RandomRotation()
>>> jitter = transforms.ColorJitter()
>>> attack_metric.add_attack(random_rotation, "Random Rotation",
>>> num_attempts = 5)
>>> attack_metric.add_attack((jitter, "Jitter", num_attempts = 1)
>>> attack_metric.add_attack(FGSM(resnet18), "FGSM 0.1", num_attempts = 1,
>>> apply_before_preproc=False,
>>> attack_kwargs={epsilon: 0.1},
>>> additional_args=["targets"])
>>> for images, labels in dataloader:
>>> batch_results = attack_metric.evaluate(inputs=images, targets=labels)
"""
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
expanded_additional_args = (
_expand_additional_forward_args(
additional_forward_args, perturbations_per_eval
)
if perturbations_per_eval > 1
else additional_forward_args
)
preproc_input = None
if self.preproc_fn is not None:
preproc_input = self.preproc_fn(inputs)
else:
preproc_input = inputs
input_list = [preproc_input]
key_list = [ORIGINAL_KEY]
batch_summarizers = {ORIGINAL_KEY: Summarizer([Mean()])}
if ORIGINAL_KEY not in self.summary_results:
self.summary_results[ORIGINAL_KEY] = Summarizer(
[stat() for stat in self.aggregate_stats]
)
def _check_and_evaluate(input_list, key_list):
if len(input_list) == perturbations_per_eval:
self._evaluate_batch(
input_list,
expanded_additional_args,
key_list,
batch_summarizers,
kwargs,
)
return [], []
return input_list, key_list
input_list, key_list = _check_and_evaluate(input_list, key_list)
for attack_key in self.attacks:
attack = self.attacks[attack_key]
if attack.num_attempts > 1:
stats = [stat() for stat in self.batch_stats]
else:
stats = [Mean()]
batch_summarizers[attack.name] = Summarizer(stats)
additional_attack_args = {}
for key in attack.additional_args:
if key not in kwargs:
warnings.warn(
f"Additional sample arg {key} not provided for {attack_key}"
)
else:
additional_attack_args[key] = kwargs[key]
for _ in range(attack.num_attempts):
if attack.apply_before_preproc:
attacked_inp = attack.attack_fn(
inputs, **additional_attack_args, **attack.attack_kwargs
)
preproc_attacked_inp = (
self.preproc_fn(attacked_inp)
if self.preproc_fn
else attacked_inp
)
else:
preproc_attacked_inp = attack.attack_fn(
preproc_input, **additional_attack_args, **attack.attack_kwargs
)
input_list.append(preproc_attacked_inp)
key_list.append(attack.name)
input_list, key_list = _check_and_evaluate(input_list, key_list)
if len(input_list) > 0:
final_add_args = _expand_additional_forward_args(
additional_forward_args, len(input_list)
)
self._evaluate_batch(
input_list, final_add_args, key_list, batch_summarizers, kwargs
)
return self._parse_and_update_results(batch_summarizers)
def _parse_and_update_results(
self, batch_summarizers: Dict[str, Summarizer]
) -> Dict[str, Union[MetricResultType, Dict[str, MetricResultType]]]:
results: Dict[str, Union[MetricResultType, Dict[str, MetricResultType]]] = {
ORIGINAL_KEY: self._format_summary(
cast(Union[Dict, List], batch_summarizers[ORIGINAL_KEY].summary)
)["mean"]
}
self.summary_results[ORIGINAL_KEY].update(
self.metric_aggregator(results[ORIGINAL_KEY])
)
for attack_key in self.attacks:
attack = self.attacks[attack_key]
attack_results = self._format_summary(
cast(Union[Dict, List], batch_summarizers[attack.name].summary)
)
results[attack.name] = attack_results
if len(attack_results) == 1:
key = next(iter(attack_results))
if attack.name not in self.summary_results:
self.summary_results[attack.name] = Summarizer(
[stat() for stat in self.aggregate_stats]
)
self.summary_results[attack.name].update(
self.metric_aggregator(attack_results[key])
)
else:
for key in attack_results:
summary_key = f"{attack.name} {key.title()} Attempt"
if summary_key not in self.summary_results:
self.summary_results[summary_key] = Summarizer(
[stat() for stat in self.aggregate_stats]
)
self.summary_results[summary_key].update(
self.metric_aggregator(attack_results[key])
)
return results
def summary(self) -> Dict[str, Dict[str, MetricResultType]]:
r"""
Returns average results over all previous batches evaluated.
Returns:
- **summary** Dict: str -> Dict[str, Union[Tensor, Tuple[Tensor, ...]]]:
Dictionary containing summarized average attack results.
Maps attack name (with "Mean Attempt", "Max Attempt" and "Min Attempt"
suffixes if num_attempts > 1) to dictionary containing a key of "mean"
maintaining summarized results,
which is the running mean of results over all batches
since construction or previous reset call. Tensor metrics are averaged
over dimension 0 for each batch, in order to aggregte metrics collected
per batch.
"""
return {
key: self._format_summary(
cast(Union[Dict, List], self.summary_results[key].summary)
)
for key in self.summary_results
}
def reset(self) -> None:
r"""
Reset stored average summary results for previous batches
"""
self.summary_results = {}
|
#!/usr/bin/env python3
import math
from enum import Enum
from typing import Any, Callable, cast, Dict, Generator, List, Optional, Tuple, Union
import torch
from captum._utils.common import (
_expand_additional_forward_args,
_format_additional_forward_args,
_reduce_list,
)
from captum._utils.typing import TargetType
from captum.log import log_usage
from captum.robust._core.perturbation import Perturbation
from torch import Tensor
def drange(
min_val: Union[int, float], max_val: Union[int, float], step_val: Union[int, float]
) -> Generator[Union[int, float], None, None]:
curr = min_val
while curr < max_val:
yield curr
curr += step_val
def default_correct_fn(model_out: Tensor, target: TargetType) -> bool:
assert (
isinstance(model_out, Tensor) and model_out.ndim == 2
), "Model output must be a 2D tensor to use default correct function;"
" otherwise custom correct function must be provided"
target_tensor = torch.tensor(target) if not isinstance(target, Tensor) else target
return all(torch.argmax(model_out, dim=1) == target_tensor)
class MinParamPerturbationMode(Enum):
LINEAR = 0
BINARY = 1
class MinParamPerturbation:
def __init__(
self,
forward_func: Callable,
attack: Union[Callable, Perturbation],
arg_name: str,
arg_min: Union[int, float],
arg_max: Union[int, float],
arg_step: Union[int, float],
mode: str = "linear",
num_attempts: int = 1,
preproc_fn: Optional[Callable] = None,
apply_before_preproc: bool = False,
correct_fn: Optional[Callable] = None,
) -> None:
r"""
Identifies minimal perturbation based on target variable which causes
misclassification (or other incorrect prediction) of target input.
More specifically, given a perturbation parametrized by a single value
(e.g. rotation by angle or mask percentage of top features based on
attribution results), MinParamPerturbation helps identify the minimum value
which leads to misclassification (or other model output change) with the
corresponding perturbed input.
Args:
forward_func (Callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of a model's forward
function.
attack (Perturbation or Callable): This can either be an instance
of a Captum Perturbation / Attack
or any other perturbation or attack function such
as a torchvision transform.
Perturb function must take additional argument (var_name) used for
minimal perturbation search.
arg_name (str): Name of argument / variable paramterizing attack, must be
kwarg of attack. Examples are num_dropout or stdevs
arg_min (int, float): Minimum value of target variable
arg_max (int, float): Maximum value of target variable
(not included in range)
arg_step (int, float): Minimum interval for increase of target variable.
mode (str, optional): Mode for search of minimum attack value;
either ``linear`` for linear search on variable, or ``binary`` for
binary search of variable
Default: ``linear``
num_attempts (int, optional): Number of attempts or trials with
given variable. This should only be set to > 1 for non-deterministic
perturbation / attack functions
Default: ``1``
preproc_fn (Callable, optional): Optional method applied to inputs. Output
of preproc_fn is then provided as input to model, in addition to
additional_forward_args provided to evaluate.
Default: ``None``
apply_before_preproc (bool, optional): Defines whether attack should be
applied before or after preproc function.
Default: ``False``
correct_fn (Callable, optional): This determines whether the perturbed input
leads to a correct or incorrect prediction. By default, this function
is set to the standard classification test for correctness
(comparing argmax of output with target), which requires model output to
be a 2D tensor, returning True if all batch examples are correct and
false otherwise. Setting this method allows
any custom behavior defining whether the perturbation is successful
at fooling the model. For non-classification use cases, a custom
function must be provided which determines correctness.
The first argument to this function must be the model out;
any additional arguments should be provided through
``correct_fn_kwargs``.
This function should have the following signature::
def correct_fn(model_out: Tensor, **kwargs: Any) -> bool
Method should return a boolean if correct (True) and incorrect (False).
Default: ``None`` (applies standard correct_fn for classification)
"""
self.forward_func = forward_func
self.attack = attack
self.arg_name = arg_name
self.arg_min = arg_min
self.arg_max = arg_max
self.arg_step = arg_step
assert self.arg_max > (
self.arg_min + self.arg_step
), "Step size cannot be smaller than range between min and max"
self.num_attempts = num_attempts
self.preproc_fn = preproc_fn
self.apply_before_preproc = apply_before_preproc
self.correct_fn = cast(
Callable, correct_fn if correct_fn is not None else default_correct_fn
)
assert (
mode.upper() in MinParamPerturbationMode.__members__
), f"Provided perturb mode {mode} is not valid - must be linear or binary"
self.mode = MinParamPerturbationMode[mode.upper()]
def _evaluate_batch(
self,
input_list: List,
additional_forward_args: Any,
correct_fn_kwargs: Optional[Dict[str, Any]],
target: TargetType,
) -> Optional[int]:
if additional_forward_args is None:
additional_forward_args = ()
all_kwargs = {}
if target is not None:
all_kwargs["target"] = target
if correct_fn_kwargs is not None:
all_kwargs.update(correct_fn_kwargs)
if len(input_list) == 1:
model_out = self.forward_func(input_list[0], *additional_forward_args)
out_metric = self.correct_fn(model_out, **all_kwargs)
return 0 if not out_metric else None
else:
batched_inps = _reduce_list(input_list)
model_out = self.forward_func(batched_inps, *additional_forward_args)
current_count = 0
for i in range(len(input_list)):
batch_size = (
input_list[i].shape[0]
if isinstance(input_list[i], Tensor)
else input_list[i][0].shape[0]
)
out_metric = self.correct_fn(
model_out[current_count : current_count + batch_size], **all_kwargs
)
if not out_metric:
return i
current_count += batch_size
return None
def _apply_attack(
self,
inputs: Any,
preproc_input: Any,
attack_kwargs: Optional[Dict[str, Any]],
param: Union[int, float],
) -> Tuple[Any, Any]:
if attack_kwargs is None:
attack_kwargs = {}
if self.apply_before_preproc:
attacked_inp = self.attack(
inputs, **attack_kwargs, **{self.arg_name: param}
)
preproc_attacked_inp = (
self.preproc_fn(attacked_inp) if self.preproc_fn else attacked_inp
)
else:
attacked_inp = self.attack(
preproc_input, **attack_kwargs, **{self.arg_name: param}
)
preproc_attacked_inp = attacked_inp
return preproc_attacked_inp, attacked_inp
def _linear_search(
self,
inputs: Any,
preproc_input: Any,
attack_kwargs: Optional[Dict[str, Any]],
additional_forward_args: Any,
expanded_additional_args: Any,
correct_fn_kwargs: Optional[Dict[str, Any]],
target: TargetType,
perturbations_per_eval: int,
) -> Tuple[Any, Optional[Union[int, float]]]:
input_list = []
attack_inp_list = []
param_list = []
for param in drange(self.arg_min, self.arg_max, self.arg_step):
for _ in range(self.num_attempts):
preproc_attacked_inp, attacked_inp = self._apply_attack(
inputs, preproc_input, attack_kwargs, param
)
input_list.append(preproc_attacked_inp)
param_list.append(param)
attack_inp_list.append(attacked_inp)
if len(input_list) == perturbations_per_eval:
successful_ind = self._evaluate_batch(
input_list,
expanded_additional_args,
correct_fn_kwargs,
target,
)
if successful_ind is not None:
return (
attack_inp_list[successful_ind],
param_list[successful_ind],
)
input_list = []
param_list = []
attack_inp_list = []
if len(input_list) > 0:
final_add_args = _expand_additional_forward_args(
additional_forward_args, len(input_list)
)
successful_ind = self._evaluate_batch(
input_list,
final_add_args,
correct_fn_kwargs,
target,
)
if successful_ind is not None:
return (
attack_inp_list[successful_ind],
param_list[successful_ind],
)
return None, None
def _binary_search(
self,
inputs: Any,
preproc_input: Any,
attack_kwargs: Optional[Dict[str, Any]],
additional_forward_args: Any,
expanded_additional_args: Any,
correct_fn_kwargs: Optional[Dict[str, Any]],
target: TargetType,
perturbations_per_eval: int,
) -> Tuple[Any, Optional[Union[int, float]]]:
min_range = self.arg_min
max_range = self.arg_max
min_so_far = None
min_input = None
while max_range > min_range:
mid_step = ((max_range - min_range) // self.arg_step) // 2
if mid_step == 0 and min_range + self.arg_step < max_range:
mid_step = 1
mid = min_range + (mid_step * self.arg_step)
input_list = []
param_list = []
attack_inp_list = []
attack_success = False
for i in range(self.num_attempts):
preproc_attacked_inp, attacked_inp = self._apply_attack(
inputs, preproc_input, attack_kwargs, mid
)
input_list.append(preproc_attacked_inp)
param_list.append(mid)
attack_inp_list.append(attacked_inp)
if len(input_list) == perturbations_per_eval or i == (
self.num_attempts - 1
):
additional_args = expanded_additional_args
if len(input_list) != perturbations_per_eval:
additional_args = _expand_additional_forward_args(
additional_forward_args, len(input_list)
)
successful_ind = self._evaluate_batch(
input_list,
additional_args,
correct_fn_kwargs,
target,
)
if successful_ind is not None:
attack_success = True
max_range = mid
if min_so_far is None or min_so_far > mid:
min_so_far = mid
min_input = attack_inp_list[successful_ind]
break
input_list = []
param_list = []
attack_inp_list = []
if math.isclose(min_range, mid):
break
if not attack_success:
min_range = mid
return min_input, min_so_far
@log_usage()
def evaluate(
self,
inputs: Any,
additional_forward_args: Optional[Tuple] = None,
target: TargetType = None,
perturbations_per_eval: int = 1,
attack_kwargs: Optional[Dict[str, Any]] = None,
correct_fn_kwargs: Optional[Dict[str, Any]] = None,
) -> Tuple[Any, Optional[Union[int, float]]]:
r"""
This method evaluates the model at each perturbed input and identifies
the minimum perturbation that leads to an incorrect model prediction.
It is recommended to provide a single input (batch size = 1) when using
this to identify a minimal perturbation for the chosen example. If a
batch of examples is provided, the default correct function identifies
the minimal perturbation for at least 1 example in the batch to be
misclassified. A custom correct_fn can be provided to customize
this behavior and define correctness for the batch.
Args:
inputs (Any): Input for which minimal perturbation
is computed. It can be provided as a tensor, tuple of tensors,
or any raw input type (e.g. PIL image or text string).
This input is provided directly as input to preproc function
as well as any attack applied before preprocessing. If no
pre-processing function is provided,
this input is provided directly to the main model and all attacks.
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the preprocessing
outputs (or inputs if preproc_fn is None), this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. For all other types,
the given argument is used for all forward evaluations.
Default: ``None``
target (TargetType): Target class for classification. This is required if
using the default ``correct_fn``.
perturbations_per_eval (int, optional): Allows perturbations of multiple
attacks to be grouped and evaluated in one call of forward_fn
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
In order to apply this functionality, the output of preproc_fn
(or inputs itself if no preproc_fn is provided) must be a tensor
or tuple of tensors.
Default: ``1``
attack_kwargs (dict, optional): Optional dictionary of keyword
arguments provided to attack function
correct_fn_kwargs (dict, optional): Optional dictionary of keyword
arguments provided to correct function
Returns:
Tuple of (perturbed_inputs, param_val) if successful
else Tuple of (None, None)
- **perturbed inputs** (Any):
Perturbed input (output of attack) which results in incorrect
prediction.
- param_val (int, float)
Param value leading to perturbed inputs causing misclassification
Examples::
>>> def gaussian_noise(inp: Tensor, std: float) -> Tensor:
>>> return inp + std*torch.randn_like(inp)
>>> min_pert = MinParamPerturbation(forward_func=resnet18,
attack=gaussian_noise,
arg_name="std",
arg_min=0.0,
arg_max=2.0,
arg_step=0.01,
)
>>> for images, labels in dataloader:
>>> noised_image, min_std = min_pert.evaluate(inputs=images, target=labels)
"""
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
expanded_additional_args = (
_expand_additional_forward_args(
additional_forward_args, perturbations_per_eval
)
if perturbations_per_eval > 1
else additional_forward_args
)
preproc_input = inputs if not self.preproc_fn else self.preproc_fn(inputs)
if self.mode is MinParamPerturbationMode.LINEAR:
search_fn = self._linear_search
elif self.mode is MinParamPerturbationMode.BINARY:
search_fn = self._binary_search
else:
raise NotImplementedError(
"Chosen MinParamPerturbationMode is not supported!"
)
return search_fn(
inputs,
preproc_input,
attack_kwargs,
additional_forward_args,
expanded_additional_args,
correct_fn_kwargs,
target,
perturbations_per_eval,
)
|
#!/usr/bin/env python3
from captum.influence._core.influence import DataInfluence # noqa
from captum.influence._core.similarity_influence import SimilarityInfluence # noqa
from captum.influence._core.tracincp import TracInCP, TracInCPBase # noqa
from captum.influence._core.tracincp_fast_rand_proj import (
TracInCPFast,
TracInCPFastRandProj,
) # noqa
__all__ = [
"DataInfluence",
"SimilarityInfluence",
"TracInCPBase",
"TracInCP",
"TracInCPFast",
"TracInCPFastRandProj",
]
|
from abc import ABC, abstractmethod
from typing import Tuple
import torch
from torch import Tensor
class NearestNeighbors(ABC):
r"""
An abstract class to define a nearest neighbors data structure. Classes
implementing this interface are intended for computing proponents / opponents in
certain implementations of `TracInCPBase`. In particular, it is for use in
implementations which compute proponents / opponents of a test instance by
1) storing representations of training instances within a nearest neighbors data
structure, and 2) finding within that structure the nearest neighbor of the
representation of a test instance. The assumption is that the data structure
stores the tensors passed to the `setup` method, which we refer to as the "stored
tensors". If this class is used to find proponents / opponents, the nearest
neighbors of a tensor should be the stored tensors that have the largest
dot-product with the query.
"""
@abstractmethod
def get_nearest_neighbors(
self, query: torch.Tensor, k: int
) -> Tuple[Tensor, Tensor]:
r"""
Given a `query`, a tensor of shape (N, *), returns the nearest neighbors in the
"stored tensors" (see above). `query` represents a batch of N tensors, each
of common but arbitrary shape *. We always assume the 0-th dimension indexes
the batch. In use cases of this class for computing proponents / opponents,
the nearest neighbors of a tensor should be the stored tensors with the largest
dot-product with the tensor, and the tensors in `query` will all be 1D,
so that `query` is 2D.
Args:
query (Tensor): tensor representing the batch of tensors for which k-nearest
neighbors are desired. `query` is of shape (N, *), where N is the
size of the batch, i.e. the 0-th dimension of `query` indexes the
batch. * denotes an arbitrary shape, so that each tensor in the
batch can be of a common, but arbitrary shape.
k (int): The number of nearest neighbors to return.
Returns:
results (tuple): A tuple of `(indices, distances)` is returned. `indices`
is a 2D tensor where `indices[i,j]` is the index (within the
"stored tensors" passed to the `setup` method) of the `j`-th
nearest neighbor of the `i`-th instance in query, and
`distances[i,j]` is the corresponding distance. `indices` should
be of dtype `torch.long` so that it can be used to index torch
tensors.
"""
pass
@abstractmethod
def setup(self, data: torch.Tensor) -> None:
r"""
`data` denotes the "stored tensors". These are the tensors within which we
want to find the nearest neighbors to each tensor in a batch of tensors, via a
call to the`get_nearest_neighbors` method. Before we can call it, however,
we need to first store the stored tensors, by doing processing that indexes
the stored tensors in a form that enables nearest-neighbors computation.
This method does that preprocessing, and is assumed to be called before any
call to `get_nearest_neighbors`. For example, this method might put the
stored tensors in a K-d tree. The tensors in the "stored tensors" can be of a
common, but arbitrary shape, denoted *, so that `data` is of shape (N, *),
where N is the number of tensors in the stored tensors. Therefore, the 0-th
dimension indexes the tensors in the stored tensors.
Args:
data (Tensor): A tensor of shape (N, *) representing the stored tensors.
The 0-th dimension indexes the tensors in the stored tensors,
so that `data[i]` is the tensor with index `i`. The nearest
neighbors of a query will be referred to by their index.
"""
pass
class AnnoyNearestNeighbors(NearestNeighbors):
"""
This is an implementation of `NearestNeighbors` that uses the Annoy module. At a
high level, Annoy finds nearest neighbors by constructing binary trees in which
vectors reside at leaf nodes. Vectors near each other will tend to be in the same
leaf node. See https://tinyurl.com/2p89sb2h and https://github.com/spotify/annoy
for more details. Annoy has 1 key parameter: the number of trees to construct.
Increasing the number of trees leads to more accurate results, but longer time to
create the trees and memory usage. As mentioned in the `NearestNeighbors`
documentation, for the use case of computing proponents / opponents, the nearest
neighbors returned should be those with the largest dot product with the query
vector. The term "vector" is used here because Annoy stores 1D vectors. However
in our wrapper around Annoy, we will allow the stored tensors to be of a common
but arbitrary shape *, and flatten them before storing in the Annoy data structure.
"""
def __init__(self, num_trees: int = 10) -> None:
"""
Args:
num_trees (int): The number of trees to use. Increasing this number gives
more accurate computation of nearest neighbors, but requires longer
setup time to create the trees, as well as memory.
"""
try:
import annoy # noqa
except ImportError:
raise ValueError(
(
"Using `AnnoyNearestNeighbors` requires installing the annoy "
"module. If pip is installed, this can be done with "
"`pip install --user annoy`."
)
)
self.num_trees = num_trees
def setup(self, data: torch.Tensor) -> None:
"""
`data` denotes the "stored tensors". These are the tensors within which we
want to find the nearest neighbors to a query tensor, via a call to the
`get_nearest_neighbors` method. Before we can call `get_nearest_neighbors`,
we need to first store the stored tensors, by doing processing that indexes
the stored tensors in a form that enables nearest-neighbors computation.
This method does that preprocessing, and is assumed to be called before any
call to `get_nearest_neighbors`. In particular, it creates the trees used to
index the stored tensors. This index is built to enable computation of
vectors that have the largest dot-product with the query tensors. The tensors
in the "stored tensors" can be of a common, but arbitrary shape, denoted *, so
that `data` is of shape (N, *), where N is the number of tensors in the stored
tensors. Therefore, the 0-th dimension indexes the tensors in the stored
tensors.
Args:
data (Tensor): A tensor of shape (N, *) representing the stored tensors.
The 0-th dimension indexes the tensors in the stored tensors,
so that `data[i]` is the tensor with index `i`. The nearest
neighbors of a query will be referred to by their index.
"""
import annoy
data = data.view((len(data), -1))
projection_dim = data.shape[1]
self.knn_index = annoy.AnnoyIndex(projection_dim, "dot")
for (i, projection) in enumerate(data):
self.knn_index.add_item(i, projection)
self.knn_index.build(self.num_trees)
def get_nearest_neighbors(
self, query: torch.Tensor, k: int
) -> Tuple[Tensor, Tensor]:
r"""
Given a `query`, a tensor of shape (N, *), returns the nearest neighbors in the
"stored tensors" (see above). `query` represents a batch of N tensors, each
of common but arbitrary shape *. We always assume the 0-th dimension indexes
the batch. In use cases of this class for computing proponents / opponents,
the nearest neighbors of a tensor should be the stored tensors with the largest
dot-product with the tensor, and the tensors in `query` will all be 1D,
so that `query` is 2D. This implementation returns the stored tensors
that have the largest dot-product with the query tensor, and does not constrain
the tensors in `query` or in the stored tensors to be 1D. If tensors are of
dimension greater than 1D, their dot-product will be defined to be the
dot-product of the flattened version of tensors.
Args:
query (Tensor): tensor representing the batch of tensors for which k-nearest
neighbors are desired. `query` is of shape (N, *), where N is the
size of the batch, i.e. the 0-th dimension of `query` indexes the
batch. * denotes an arbitrary shape, so that each tensor in the
batch can be of a common, but arbitrary shape.
k (int): The number of nearest neighbors to return.
Returns:
results (tuple): A tuple of `(indices, distances)` is returned. `indices`
is a 2D tensor where `indices[i,j]` is the index (within the
"stored tensors" passed to the `setup` method) of the `j`-th
nearest neighbor of the `i`-th instance in query, and
`distances[i,j]` is the corresponding distance. `indices` should
be of dtype `torch.long` so that it can be used to index torch
tensors.
"""
query = query.view((len(query), -1))
indices_and_distances = [
self.knn_index.get_nns_by_vector(instance, k, include_distances=True)
for instance in query
]
indices, distances = zip(*indices_and_distances)
indices = torch.Tensor(indices).type(torch.long)
distances = torch.Tensor(distances)
return indices, distances
|
#!/usr/bin/env python3
import warnings
from typing import Any, Callable, List, Optional, Tuple, TYPE_CHECKING, Union
import torch
import torch.nn as nn
from captum._utils.common import _parse_version
from captum._utils.progress import progress
if TYPE_CHECKING:
from captum.influence._core.tracincp import TracInCPBase
from torch import Tensor
from torch.nn import Module
from torch.utils.data import DataLoader, Dataset
def _tensor_batch_dot(t1: Tensor, t2: Tensor) -> Tensor:
r"""
Computes pairwise dot product between two tensors
Args:
Tensors t1 and t2 are feature vectors with dimension (batch_size_1, *) and
(batch_size_2, *). The * dimensions must match in total number of elements.
Returns:
Tensor with shape (batch_size_1, batch_size_2) containing the pairwise dot
products. For example, Tensor[i][j] would be the dot product between
t1[i] and t2[j].
"""
msg = (
"Please ensure each batch member has the same feature dimension. "
f"First input has {torch.numel(t1) / t1.shape[0]} features, and "
f"second input has {torch.numel(t2) / t2.shape[0]} features."
)
assert torch.numel(t1) / t1.shape[0] == torch.numel(t2) / t2.shape[0], msg
return torch.mm(
t1.view(t1.shape[0], -1),
t2.view(t2.shape[0], -1).T,
)
def _gradient_dot_product(
input_grads: Tuple[Tensor], src_grads: Tuple[Tensor]
) -> Tensor:
r"""
Computes the dot product between the gradient vector for a model on an input batch
and src batch, for each pairwise batch member. Gradients are passed in as a tuple
corresponding to the trainable parameters returned by model.parameters(). Output
corresponds to a tensor of size (inputs_batch_size, src_batch_size) with all
pairwise dot products.
"""
assert len(input_grads) == len(src_grads), "Mismatching gradient parameters."
iterator = zip(input_grads, src_grads)
total = _tensor_batch_dot(*next(iterator))
for input_grad, src_grad in iterator:
total += _tensor_batch_dot(input_grad, src_grad)
return total
def _jacobian_loss_wrt_inputs(
loss_fn: Union[Module, Callable],
out: Tensor,
targets: Tensor,
vectorize: bool,
reduction_type: str,
) -> Tensor:
r"""
Often, we have a loss function that computes a per-sample loss given a 1D tensor
input, and we want to calculate the jacobian of the loss w.r.t. that input. For
example, the input could be a length K tensor specifying the probability a given
sample belongs to each of K possible classes, and the loss function could be
cross-entropy loss. This function performs that calculation, but does so for a
*batch* of inputs. We create this helper function for two reasons: 1) to handle
differences between Pytorch versiosn for vectorized jacobian calculations, and
2) this function does not accept the aforementioned per-sample loss function.
Instead, it accepts a "reduction" loss function that *reduces* the per-sample loss
for a batch into a single loss. Using a "reduction" loss improves speed.
We will allow this reduction to either be the mean or sum of the per-sample losses,
and this function provides an uniform way to handle different possible reductions,
and also check if the reduction used is valid. Regardless of the reduction used,
this function returns the jacobian for the per-sample loss (for each sample in the
batch).
Args:
loss_fn (torch.nn.Module, Callable, or None): The loss function. If a library
defined loss function is provided, it would be expected to be a
torch.nn.Module. If a custom loss is provided, it can be either type,
but must behave as a library loss function would if `reduction='sum'`
or `reduction='mean'`.
out (Tensor): This is a tensor that represents the batch of inputs to
`loss_fn`. In practice, this will be the output of a model; this is
why this argument is named `out`. `out` is a 2D tensor of shape
(batch size, model output dimensionality). We will call `loss_fn` via
`loss_fn(out, targets)`.
targets (Tensor): The labels for the batch of inputs.
vectorize (bool): Flag to use experimental vectorize functionality for
`torch.autograd.functional.jacobian`.
reduction_type (str): The type of reduction used by `loss_fn`. If `loss_fn`
has the "reduction" attribute, we will check that they match. Can
only be "mean" or "sum".
Returns:
jacobians (Tensor): Returns the jacobian of the per-sample loss (implicitly
defined by `loss_fn` and `reduction_type`) w.r.t each sample
in the batch represented by `out`. This is a 2D tensor, where the
first dimension is the batch dimension.
"""
# TODO: allow loss_fn to be Callable
if isinstance(loss_fn, Module) and hasattr(loss_fn, "reduction"):
msg0 = "Please ensure that loss_fn.reduction is set to `sum` or `mean`"
assert loss_fn.reduction != "none", msg0
msg1 = (
f"loss_fn.reduction ({loss_fn.reduction}) does not match"
f"reduction type ({reduction_type}). Please ensure they are"
" matching."
)
assert loss_fn.reduction == reduction_type, msg1
if reduction_type != "sum" and reduction_type != "mean":
raise ValueError(
f"{reduction_type} is not a valid value for reduction_type. "
"Must be either 'sum' or 'mean'."
)
if _parse_version(torch.__version__) >= (1, 8, 0):
input_jacobians = torch.autograd.functional.jacobian(
lambda out: loss_fn(out, targets), out, vectorize=vectorize
)
else:
input_jacobians = torch.autograd.functional.jacobian(
lambda out: loss_fn(out, targets), out
)
if reduction_type == "mean":
input_jacobians = input_jacobians * len(input_jacobians)
return input_jacobians
def _load_flexible_state_dict(model: Module, path: str) -> float:
r"""
Helper to load pytorch models. This function attempts to find compatibility for
loading models that were trained on different devices / with DataParallel but are
being loaded in a different environment.
Assumes that the model has been saved as a state_dict in some capacity. This can
either be a single state dict, or a nesting dictionary which contains the model
state_dict and other information.
Args:
model (torch.nn.Module): The model for which to load a checkpoint
path (str): The filepath to the checkpoint
The module state_dict is modified in-place, and the learning rate is returned.
"""
checkpoint = torch.load(path)
learning_rate = checkpoint.get("learning_rate", 1.0)
# can get learning rate from optimizer state_dict?
if "module." in next(iter(checkpoint)):
if isinstance(model, nn.DataParallel):
model.load_state_dict(checkpoint)
else:
model = nn.DataParallel(model)
model.load_state_dict(checkpoint)
model = model.module
else:
if isinstance(model, nn.DataParallel):
model = model.module
model.load_state_dict(checkpoint)
model = nn.DataParallel(model)
else:
model.load_state_dict(checkpoint)
return learning_rate
def _get_k_most_influential_helper(
influence_src_dataloader: DataLoader,
influence_batch_fn: Callable,
inputs: Tuple[Any, ...],
k: int = 5,
proponents: bool = True,
show_progress: bool = False,
desc: Optional[str] = None,
) -> Tuple[Tensor, Tensor]:
r"""
Helper function that computes the quantities returned by
`TracInCPBase._get_k_most_influential`, using a specific implementation that is
constant memory.
Args:
influence_src_dataloader (DataLoader): The DataLoader, representing training
data, for which we want to compute proponents / opponents.
influence_batch_fn (Callable): A callable that will be called via
`influence_batch_fn(inputs, batch)`, where `batch` is a batch
in the `influence_src_dataloader` argument.
inputs (tuple[Any, ...]): This argument represents the test batch, and is a
single tuple of any, where the last element is assumed to be the labels
for the batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any.
k (int, optional): The number of proponents or opponents to return per test
instance.
Default: 5
proponents (bool, optional): Whether seeking proponents (`proponents=True`)
or opponents (`proponents=False`)
Default: True
show_progress (bool, optional): To compute the proponents (or opponents)
for the batch of examples, we perform computation for each batch in
training dataset `influence_src_dataloader`, If `show_progress` is
true, the progress of this computation will be displayed. In
particular, the number of batches for which the computation has
been performed will be displayed. It will try to use tqdm if
available for advanced features (e.g. time estimation). Otherwise,
it will fallback to a simple output of progress.
Default: False
desc (str, optional): If `show_progress` is true, this is the description to
show when displaying progress. If `desc` is none, no description is
shown.
Default: None
Returns:
(indices, influence_scores): `indices` is a torch.long Tensor that contains the
indices of the proponents (or opponents) for each test example. Its
dimension is `(inputs_batch_size, k)`, where `inputs_batch_size` is the
number of examples in `inputs`. For example, if `proponents==True`,
`indices[i][j]` is the index of the example in training dataset
`influence_src_dataloader` with the k-th highest influence score for
the j-th example in `inputs`. `indices` is a `torch.long` tensor so that
it can directly be used to index other tensors. Each row of
`influence_scores` contains the influence scores for a different test
example, in sorted order. In particular, `influence_scores[i][j]` is
the influence score of example `indices[i][j]` in training dataset
`influence_src_dataloader` on example `i` in the test batch represented
by `inputs` and `targets`.
"""
# For each test instance, maintain the best indices and corresponding distances
# initially, these will be empty
topk_indices = torch.Tensor().long()
topk_tracin_scores = torch.Tensor()
multiplier = 1.0 if proponents else -1.0
# needed to map from relative index in a batch fo index within entire `dataloader`
num_instances_processed = 0
# if show_progress, create progress bar
total: Optional[int] = None
if show_progress:
try:
total = len(influence_src_dataloader)
except AttributeError:
pass
influence_src_dataloader = progress(
influence_src_dataloader,
desc=desc,
total=total,
)
for batch in influence_src_dataloader:
# calculate tracin_scores for the batch
batch_tracin_scores = influence_batch_fn(inputs, batch)
batch_tracin_scores *= multiplier
# get the top-k indices and tracin_scores for the batch
batch_size = batch_tracin_scores.shape[1]
batch_topk_tracin_scores, batch_topk_indices = torch.topk(
batch_tracin_scores, min(batch_size, k), dim=1
)
batch_topk_indices = batch_topk_indices + num_instances_processed
num_instances_processed += batch_size
# combine the top-k for the batch with those for previously seen batches
topk_indices = torch.cat(
[topk_indices.to(batch_topk_indices.device), batch_topk_indices], dim=1
)
topk_tracin_scores = torch.cat(
[
topk_tracin_scores.to(batch_topk_tracin_scores.device),
batch_topk_tracin_scores,
],
dim=1,
)
# retain only the top-k in terms of tracin_scores
topk_tracin_scores, topk_argsort = torch.topk(
topk_tracin_scores, min(k, topk_indices.shape[1]), dim=1
)
topk_indices = torch.gather(topk_indices, dim=1, index=topk_argsort)
# if seeking opponents, we were actually keeping track of negative tracin_scores
topk_tracin_scores *= multiplier
return topk_indices, topk_tracin_scores
class _DatasetFromList(Dataset):
def __init__(self, _l: List[Any]) -> None:
self._l = _l
def __getitem__(self, i: int) -> Any:
return self._l[i]
def __len__(self) -> int:
return len(self._l)
def _format_inputs_dataset(inputs_dataset: Union[Tuple[Any, ...], DataLoader]):
# if `inputs_dataset` is not a `DataLoader`, turn it into one.
# `_DatasetFromList` turns a list into a `Dataset` where `__getitem__`
# returns an element in the list, and using it to construct a `DataLoader`
# with `batch_size=None` gives a `DataLoader` that yields a single batch.
if not isinstance(inputs_dataset, DataLoader):
inputs_dataset = DataLoader(
_DatasetFromList([inputs_dataset]), shuffle=False, batch_size=None
)
return inputs_dataset
def _self_influence_by_batches_helper(
self_influence_batch_fn: Callable,
instance_name: str,
inputs_dataset: Union[Tuple[Any, ...], DataLoader],
show_progress: bool = False,
) -> Tensor:
"""
Computes self influence scores for the examples in `inputs_dataset`, which is
either a single batch or a Pytorch `DataLoader` that yields batches. The self
influence scores for a single batch are computed using the
`self_influence_batch_fn` input. Note that if `inputs_dataset` is a single batch,
this will call `model` on that single batch, where `model` is the model used to
compute self influence scores by `self_influence_batch_fn`, and if `inputs_dataset`
yields batches, this will call `model` on each batch that is yielded. Therefore,
please ensure that for both cases, the batch(es) that `model` is called
with are not too large, so that there will not be an out-of-memory error. This
implementation performs an outer iteration over all batches that
`inputs_dataset` represents, and an inner iteration over checkpoints. The pros
of this implementation are that showing the progress of the computation is
straightforward.
Args:
self_influence_batch_fn (Callable): This is the function that computes self
influence scores for a single batch.
instance_name (str): This is the name of the implementation class that
`self_influence_batch_fn` is a method of. This is used for displaying
warning messages.
batches (tuple or DataLoader): Either a single tuple of any, or a
`DataLoader`, where each batch yielded is a tuple of any. In
either case, the tuple represents a single batch, where the last
element is assumed to be the labels for the batch. That is,
`model(*batch[0:-1])` produces the output for `model`,
and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset`. Please see documentation for the
`train_dataset` argument to `TracInCP.__init__` for
more details on the assumed structure of a batch.
show_progress (bool, optional): Computation of self influence scores can
take a long time if `inputs_dataset` represents many examples. If
`show_progress`is true, the progress of this computation will be
displayed. In particular, the number of batches for which self
influence scores have been computed will be displayed. It will try
to use tqdm if available for advanced features (e.g. time
estimation). Otherwise, it will fallback to a simple output of
progress.
Default: False
Returns:
self_influence_scores (Tensor): This is a 1D tensor containing the self
influence scores of all examples in `inputs_dataset`, regardless of
whether it represents a single batch or a `DataLoader` that yields
batches.
"""
# If `inputs_dataset` is not a `DataLoader`, turn it into one.
inputs_dataset = _format_inputs_dataset(inputs_dataset)
# If `show_progress` is true, create a progress bar that keeps track of how
# many batches have been processed
if show_progress:
# First, try to determine length of progress bar if possible, with a
# default of `None`
inputs_dataset_len = None
try:
inputs_dataset_len = len(inputs_dataset)
except TypeError:
warnings.warn(
"Unable to determine the number of batches in `inputs_dataset`. "
"Therefore, if showing the progress of the computation of self "
"influence scores, only the number of batches processed can be "
"displayed, and not the percentage completion of the computation, "
"nor any time estimates."
)
# then create the progress bar
inputs_dataset = progress(
inputs_dataset,
desc=f"Using {instance_name} to compute self influence. Processing batch",
total=inputs_dataset_len,
)
# To compute self influence scores for each batch, we use
# `_self_influence_by_checkpoints`, which can accept a tuple representing a
# single batch as the `inputs_dataset` argument (as well as a DataLoader).
# Because we are already displaying progress in terms of number of batches
# processed in this method, we will not show progress for the call to
# `_self_influence_by_checkpoints`.
return torch.cat(
[
self_influence_batch_fn(batch, show_progress=False)
for batch in inputs_dataset
]
)
def _check_loss_fn(
influence_instance: "TracInCPBase",
loss_fn: Optional[Union[Module, Callable]],
loss_fn_name: str,
sample_wise_grads_per_batch: Optional[bool] = None,
) -> str:
"""
This checks whether `loss_fn` satisfies the requirements assumed of all
implementations of `TracInCPBase`. It works regardless of whether the
implementation has the `sample_wise_grads_per_batch` attribute.
It returns the reduction type of the loss_fn. If `sample_wise_grads_per_batch`
if not provided, we assume the implementation does not have that attribute.
"""
# if `loss_fn` is `None`, there is nothing to check. then, the reduction type is
# only used by `_compute_jacobian_wrt_params_with_sample_wise_trick`, where
# reduction type should be "sum" if `loss_fn` is `None`.
if loss_fn is None:
return "sum"
# perhaps since `Module` is an implementation of `Callable`, this has redundancy
assert isinstance(loss_fn, Module) or callable(loss_fn)
reduction_type = "none"
# If we are able to access the reduction used by `loss_fn`, we check whether
# the reduction is compatible with `sample_wise_grads_per_batch`, if it has the
# attribute.
if hasattr(loss_fn, "reduction"):
reduction = loss_fn.reduction # type: ignore
if sample_wise_grads_per_batch is None:
assert reduction in [
"sum",
"mean",
], 'reduction for `loss_fn` must be "sum" or "mean"'
reduction_type = str(reduction)
elif sample_wise_grads_per_batch:
assert reduction in ["sum", "mean"], (
'reduction for `loss_fn` must be "sum" or "mean" when '
"`sample_wise_grads_per_batch` is True"
)
reduction_type = str(reduction)
else:
assert reduction == "none", (
'reduction for `loss_fn` must be "none" when '
"`sample_wise_grads_per_batch` is False"
)
else:
# if we are unable to access the reduction used by `loss_fn`, we warn
# the user about the assumptions we are making regarding the reduction
# used by `loss_fn`
if sample_wise_grads_per_batch is None:
warnings.warn(
f'Since `{loss_fn_name}` has no "reduction" attribute, the '
f'implementation assumes that `{loss_fn_name}` is a "reduction" loss '
"function that reduces the per-example losses by taking their *sum*. "
f"If `{loss_fn_name}` instead reduces the per-example losses by "
f"taking their mean, please set the reduction attribute of "
f'`{loss_fn_name}` to "mean", i.e. '
f'`{loss_fn_name}.reduction = "mean"`.'
)
reduction_type = "sum"
elif sample_wise_grads_per_batch:
warnings.warn(
f"Since `{loss_fn_name}`` has no 'reduction' attribute, and "
"`sample_wise_grads_per_batch` is True, the implementation assumes "
f"that `{loss_fn_name}` is a 'reduction' loss function that reduces "
f"the per-example losses by taking their *sum*. If `{loss_fn_name}` "
"instead reduces the per-example losses by taking their mean, "
f'please set the reduction attribute of `{loss_fn_name}` to "mean", '
f'i.e. `{loss_fn_name}.reduction = "mean"`. Note that if '
"`sample_wise_grads_per_batch` is True, the implementation "
"assumes the reduction is either a sum or mean reduction."
)
reduction_type = "sum"
else:
warnings.warn(
f'Since `{loss_fn_name}` has no "reduction" attribute, and '
"`sample_wise_grads_per_batch` is False, the implementation "
f'assumes that `{loss_fn_name}` is a "per-example" loss function (see '
f"documentation for `{loss_fn_name}` for details). Please ensure "
"that this is the case."
)
return reduction_type
|
#!/usr/bin/env python3
import glob
import warnings
from abc import abstractmethod
from os.path import join
from typing import (
Any,
Callable,
Iterator,
List,
NamedTuple,
Optional,
Tuple,
Type,
Union,
)
import torch
from captum._utils.av import AV
from captum._utils.common import _get_module_from_name, _parse_version
from captum._utils.gradient import (
_compute_jacobian_wrt_params,
_compute_jacobian_wrt_params_with_sample_wise_trick,
)
from captum._utils.progress import NullProgress, progress
from captum.influence._core.influence import DataInfluence
from captum.influence._utils.common import (
_check_loss_fn,
_format_inputs_dataset,
_get_k_most_influential_helper,
_gradient_dot_product,
_load_flexible_state_dict,
_self_influence_by_batches_helper,
)
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
from torch.utils.data import DataLoader, Dataset
r"""
Note: methods starting with "_" are protected, not private, and can be overridden in
child classes. They are not part of the API.
Implements abstract DataInfluence class and provides implementation details for
influence computation based on the logic provided in TracIn paper
(https://arxiv.org/abs/2002.08484).
The TracIn paper proposes an idealized notion of influence which can be represented by
the total amount a training example reduces loss for a test example via a training
process such as stochastic gradient descent. As this idealized notion of influence is
impractical to compute, the TracIn paper proposes instead to compute an influence
score, which uses a first-order approximation for the change in loss for a test example
by a training example, which is accumulated across saved model checkpoints. This
influence score is accumulated via a summed dot-product of gradient vectors for the
scores/loss of a test and training example.
"""
"""
TODO: Support for checkpoint type. Currently only supports model parameters as saved
checkpoints. Can use enum or string.
Potential implementation from design doc:
checkpoint_type (Enum = [Parameters | Loss_Grad]): For performance,
saved / loaded checkpoints can be either model parameters, or
gradient of the loss function on an input w.r.t parameters.
"""
class KMostInfluentialResults(NamedTuple):
"""
This namedtuple stores the results of using the `influence` method. This method
is implemented by all subclasses of `TracInCPBase` to calculate
proponents / opponents. The `indices` field stores the indices of the
proponents / opponents for each example in the test dataset. For example, if
finding opponents, `indices[i][j]` stores the index in the training data of the
example with the `j`-th highest influence score on the `i`-th example in the test
dataset. Similarly, the `influence_scores` field stores the actual influence scores,
so that `influence_scores[i][j]` is the influence score of example `indices[i][j]`
in the training data on example `i` of the test dataset. Please see
`TracInCPBase.influence` for more details.
"""
indices: Tensor
influence_scores: Tensor
class TracInCPBase(DataInfluence):
"""
To implement the `influence` method, classes inheriting from `TracInCPBase` will
separately implement the private `_self_influence`, `_get_k_most_influential`,
and `_influence` methods. The public `influence` method is a wrapper for these
private methods.
"""
def __init__(
self,
model: Module,
train_dataset: Union[Dataset, DataLoader],
checkpoints: Union[str, List[str], Iterator],
checkpoints_load_func: Callable = _load_flexible_state_dict,
loss_fn: Optional[Union[Module, Callable]] = None,
batch_size: Union[int, None] = 1,
test_loss_fn: Optional[Union[Module, Callable]] = None,
) -> None:
r"""
Args:
model (torch.nn.Module): An instance of pytorch model. This model should
define all of its layers as attributes of the model.
train_dataset (torch.utils.data.Dataset or torch.utils.data.DataLoader):
In the `influence` method, we compute the influence score of
training examples on examples in a test batch.
This argument represents the training dataset containing those
training examples. In order to compute those influence scores, we
will create a Pytorch DataLoader yielding batches of training
examples that is then used for processing. If this argument is
already a Pytorch Dataloader, that DataLoader can be directly
used for processing. If it is instead a Pytorch Dataset, we will
create a DataLoader using it, with batch size specified by
`batch_size`. For efficiency purposes, the batch size of the
DataLoader used for processing should be as large as possible, but
not too large, so that certain intermediate quantities created
from a batch still fit in memory. Therefore, if
`train_dataset` is a Dataset, `batch_size` should be large.
If `train_dataset` was already a DataLoader to begin with,
it should have been constructed to have a large batch size. It is
assumed that the Dataloader (regardless of whether it is created
from a Pytorch Dataset or not) yields tuples. For a `batch` that is
yielded, of length `L`, it is assumed that the forward function of
`model` accepts `L-1` arguments, and the last element of `batch` is
the label. In other words, `model(*batch[:-1])` gives the output of
`model`, and `batch[-1]` are the labels for the batch.
checkpoints (str, list[str], or Iterator): Either the directory of the
path to store and retrieve model checkpoints, a list of
filepaths with checkpoints from which to load, or an iterator which
returns objects from which to load checkpoints.
checkpoints_load_func (Callable, optional): The function to load a saved
checkpoint into a model to update its parameters, and get the
learning rate if it is saved. By default uses a utility to load a
model saved as a state dict.
Default: _load_flexible_state_dict
loss_fn (Callable, optional): The loss function applied to model.
Default: None
batch_size (int or None, optional): Batch size of the DataLoader created to
iterate through `train_dataset`, if it is a Dataset.
`batch_size` should be chosen as large as possible so that certain
intermediate quantities created from a batch still fit in memory.
Specific implementations of `TracInCPBase` will detail the size of
the intermediate quantities. `batch_size` must be an int if
`train_dataset` is a Dataset. If `train_dataset`
is a DataLoader, then `batch_size` is ignored as an argument.
Default: 1
test_loss_fn (Callable, optional): In some cases, one may want to use a
separate loss functions for training examples, i.e. those in
`train_dataset`, and for test examples, i.e. those
represented by the `inputs` and `targets` arguments to the
`influence` method. For example, if one wants to calculate the
influence score of a training example on a test example's
prediction for a fixed class, `test_loss_fn` could map from the
logits for all classes to the logits for a fixed class.
`test_loss_fn` needs to satisfy the same constraints as `loss_fn`.
If not provided, the loss function for test examples is assumed to
be the same as the loss function for training examples, i.e.
`loss_fn`.
Default: None
"""
self.model = model
if isinstance(checkpoints, str):
self.checkpoints = AV.sort_files(glob.glob(join(checkpoints, "*")))
elif isinstance(checkpoints, List) and isinstance(checkpoints[0], str):
self.checkpoints = AV.sort_files(checkpoints)
else:
self.checkpoints = list(checkpoints) # cast to avoid mypy error
if isinstance(self.checkpoints, List):
assert len(self.checkpoints) > 0, "No checkpoints saved!"
self.checkpoints_load_func = checkpoints_load_func
self.loss_fn = loss_fn
# If test_loss_fn not provided, it's assumed to be same as loss_fn
self.test_loss_fn = loss_fn if test_loss_fn is None else test_loss_fn
self.batch_size = batch_size
if not isinstance(train_dataset, DataLoader):
assert isinstance(batch_size, int), (
"since the `train_dataset` argument was a `Dataset`, "
"`batch_size` must be an int."
)
self.train_dataloader = DataLoader(train_dataset, batch_size, shuffle=False)
else:
self.train_dataloader = train_dataset
self.train_dataloader_len: Optional[int] = None
try:
# since we will calculate the number of batches in
# `self.train_dataloader` whenever we use progress bar, calculate
# it once in initialization, for re-use.
self.train_dataloader_len = len(self.train_dataloader)
except TypeError:
warnings.warn(
"Unable to determine the number of batches in training dataset "
"`train_dataset`. Therefore, if showing the progress of computations, "
"only the number of batches processed can be displayed, and not the "
"percentage completion of the computation, nor any time estimates."
)
@abstractmethod
def self_influence(
self,
inputs: Optional[Union[Tuple[Any, ...], DataLoader]] = None,
show_progress: bool = False,
) -> Tensor:
"""
If `inputs` is not specified calculates the self influence
scores for the training dataset `train_dataset`. Otherwise, computes
self influence scores for the examples in `inputs`,
which is either a single batch or a Pytorch `DataLoader` that yields
batches. Therefore, in this case, the computed self influence scores
are *not* for the examples in training dataset `train_dataset`.
Note that if `inputs` is a single batch, this
will call `model` on that single batch, and if `inputs` yields
batches, this will call `model` on each batch that is yielded. Therefore,
please ensure that for both cases, the batch(es) that `model` is called
with are not too large, so that there will not be an out-of-memory error.
Args:
inputs (tuple or DataLoader, optional): This specifies the
dataset for which self influence scores will be computed.
Either a single tuple of any, or a `DataLoader`, where each
batch yielded is a tuple of type any. In either case, the tuple
represents a single batch, where the last element is assumed to
be the labels for the batch. That is, `model(*batch[0:-1])`
produces the output for `model`, and `batch[-1]` are the labels,
if any. This is the same assumption made for each batch yielded
by training dataset `train_dataset`. Please see documentation for
the `train_dataset` argument to `TracInCP.__init__` for
more details on the assumed structure of a batch. If not provided
or `None`, self influence scores will be computed for training
dataset `train_dataset`, which yields batches satisfying the
above assumptions.
Default: None.
show_progress (bool, optional): Computation of self influence scores can
take a long time if `inputs` represents many examples. If
`show_progress` is true, the progress of this computation will be
displayed. In more detail, this computation will iterate over all
checkpoints (provided as the `checkpoints` initialization argument)
in an outer loop, and iterate over all batches that
`inputs` represents in an inner loop. Therefore, the
total number of (checkpoint, batch) combinations that need to be
iterated over is
(# of checkpoints x # of batches that `inputs` represents).
If `show_progress` is True, the total progress of both the outer
iteration over checkpoints and the inner iteration over batches is
displayed. It will try to use tqdm if available for advanced
features (e.g. time estimation). Otherwise, it will fallback to a
simple output of progress.
Default: False
Returns:
self_influence_scores (Tensor): This is a 1D tensor containing the self
influence scores of all examples in `inputs`, regardless of
whether it represents a single batch or a `DataLoader` that yields
batches.
"""
pass
@abstractmethod
def _get_k_most_influential(
self,
inputs: Tuple[Any, ...],
k: int = 5,
proponents: bool = True,
show_progress: bool = False,
) -> KMostInfluentialResults:
r"""
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
k (int, optional): The number of proponents or opponents to return per test
example.
Default: 5
proponents (bool, optional): Whether seeking proponents (`proponents=True`)
or opponents (`proponents=False`)
Default: True
show_progress (bool, optional): To compute the proponents (or opponents)
for the batch of examples, we perform computation for each batch in
training dataset `train_dataset`, If `show_progress` is
true, the progress of this computation will be displayed. In
particular, the number of batches for which the computation has
been performed will be displayed. It will try to use tqdm if
available for advanced features (e.g. time estimation). Otherwise,
it will fallback to a simple output of progress.
Default: False
Returns:
(indices, influence_scores) (namedtuple): `indices` is a torch.long Tensor
that contains the indices of the proponents (or opponents) for each
test example. Its dimension is `(inputs_batch_size, k)`, where
`inputs_batch_size` is the number of examples in `inputs`. For
example, if `proponents==True`, `indices[i][j]` is the index of the
example in training dataset `train_dataset` with the
k-th highest influence score for the j-th example in `inputs`.
`indices` is a `torch.long` tensor so that it can directly be used
to index other tensors. Each row of `influence_scores` contains the
influence scores for a different test example, in sorted order. In
particular, `influence_scores[i][j]` is the influence score of
example `indices[i][j]` in training dataset `train_dataset`
on example `i` in the test batch represented by `inputs`.
"""
pass
@abstractmethod
def _influence(
self,
inputs: Tuple[Any, ...],
show_progress: bool = False,
) -> Tensor:
r"""
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
show_progress (bool, optional): To compute the influence of examples in
training dataset `train_dataset`, we compute the influence
of each batch. If `show_progress` is true, the progress of this
computation will be displayed. In particular, the number of batches
for which influence has been computed will be displayed. It will
try to use tqdm if available for advanced features (e.g. time
estimation). Otherwise, it will fallback to a simple output of
progress.
Default: False
Returns:
influence_scores (Tensor): Influence scores over the entire
training dataset `train_dataset`. Dimensionality is
(inputs_batch_size, src_dataset_size). For example:
influence_scores[i][j] = the influence score for the j-th training
example to the i-th example in the test batch.
"""
pass
@abstractmethod
def influence( # type: ignore[override]
self,
inputs: Tuple[Any, ...],
k: Optional[int] = None,
proponents: bool = True,
unpack_inputs: bool = True,
show_progress: bool = False,
) -> Union[Tensor, KMostInfluentialResults]:
r"""
This is the key method of this class, and can be run in 2 different modes,
where the mode that is run depends on the arguments passed to this method:
- influence score mode: This mode is used if `k` is None. This mode computes
the influence score of every example in training dataset `train_dataset`
on every example in the test batch represented by `inputs`.
- k-most influential mode: This mode is used if `k` is not None, and an int.
This mode computes the proponents or opponents of every example in the
test batch represented by `inputs`. In particular, for each test example in
the test batch, this mode computes its proponents (resp. opponents),
which are the indices in the training dataset `train_dataset` of the
training examples with the `k` highest (resp. lowest) influence scores on the
test example. Proponents are computed if `proponents` is True. Otherwise,
opponents are computed. For each test example, this method also returns the
actual influence score of each proponent (resp. opponent) on the test
example.
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
k (int, optional): If not provided or `None`, the influence score mode will
be run. Otherwise, the k-most influential mode will be run,
and `k` is the number of proponents / opponents to return per
example in the test batch.
Default: None
proponents (bool, optional): Whether seeking proponents (`proponents=True`)
or opponents (`proponents=False`), if running in k-most influential
mode.
Default: True
show_progress (bool, optional): For all modes, computation of results
requires "training dataset computations": computations for each
batch in the training dataset `train_dataset`, which may
take a long time. If `show_progress` is true, the progress of
"training dataset computations" will be displayed. In particular,
the number of batches for which computations have been performed
will be displayed. It will try to use tqdm if available for
advanced features (e.g. time estimation). Otherwise, it will
fallback to a simple output of progress.
Default: False
Returns:
The return value of this method depends on which mode is run.
- influence score mode: if this mode is run (`k` is None), returns a 2D
tensor `influence_scores` of shape `(input_size, train_dataset_size)`,
where `input_size` is the number of examples in the test dataset, and
`train_dataset_size` is the number of examples in training dataset
`train_dataset`. In other words, `influence_scores[i][j]` is the
influence score of the `j`-th example in `train_dataset` on the `i`-th
example in the test batch.
- k-most influential mode: if this mode is run (`k` is an int), returns
a namedtuple `(indices, influence_scores)`. `indices` is a 2D tensor of
shape `(input_size, k)`, where `input_size` is the number of examples in
the test batch. If computing proponents (resp. opponents),
`indices[i][j]` is the index in training dataset `train_dataset` of the
example with the `j`-th highest (resp. lowest) influence score (out of
the examples in `train_dataset`) on the `i`-th example in the test
dataset. `influence_scores` contains the corresponding influence scores.
In particular, `influence_scores[i][j]` is the influence score of example
`indices[i][j]` in `train_dataset` on example `i` in the test batch
represented by `inputs`.
"""
pass
@classmethod
def get_name(cls: Type["TracInCPBase"]) -> str:
r"""
Create readable class name. Due to the nature of the names of `TracInCPBase`
subclasses, simplies returns the class name. For example, for a class called
TracInCP, we return the string TracInCP.
Returns:
name (str): a readable class name
"""
return cls.__name__
def _influence_route_to_helpers(
influence_instance: TracInCPBase,
inputs: Tuple[Any, ...],
k: Optional[int] = None,
proponents: bool = True,
**kwargs,
) -> Union[Tensor, KMostInfluentialResults]:
"""
This is a helper function called by `TracInCP.influence` and
`TracInCPFast.influence`. Those methods share a common logic in that they assume
an instance of their respective classes implement 2 private methods
(``_influence`, `_get_k_most_influential`), and the logic of
which private method to call is common, as described in the documentation of the
`influence` method. The arguments and return values of this function are the exact
same as the `influence` method. Note that `influence_instance` refers to the
instance for which the `influence` method was called.
"""
if k is None:
return influence_instance._influence(inputs, **kwargs)
else:
return influence_instance._get_k_most_influential(
inputs,
k,
proponents,
**kwargs,
)
class TracInCP(TracInCPBase):
def __init__(
self,
model: Module,
train_dataset: Union[Dataset, DataLoader],
checkpoints: Union[str, List[str], Iterator],
checkpoints_load_func: Callable = _load_flexible_state_dict,
layers: Optional[List[str]] = None,
loss_fn: Optional[Union[Module, Callable]] = None,
batch_size: Union[int, None] = 1,
test_loss_fn: Optional[Union[Module, Callable]] = None,
sample_wise_grads_per_batch: bool = False,
) -> None:
r"""
Args:
model (torch.nn.Module): An instance of pytorch model. This model should
define all of its layers as attributes of the model.
train_dataset (torch.utils.data.Dataset or torch.utils.data.DataLoader):
In the `influence` method, we compute the influence score of
training examples on examples in a test batch.
This argument represents the training dataset containing those
training examples. In order to compute those influence scores, we
will create a Pytorch DataLoader yielding batches of training
examples that is then used for processing. If this argument is
already a Pytorch Dataloader, that DataLoader can be directly
used for processing. If it is instead a Pytorch Dataset, we will
create a DataLoader using it, with batch size specified by
`batch_size`. For efficiency purposes, the batch size of the
DataLoader used for processing should be as large as possible, but
not too large, so that certain intermediate quantities created
from a batch still fit in memory. Therefore, if
`train_dataset` is a Dataset, `batch_size` should be large.
If `train_dataset` was already a DataLoader to begin with,
it should have been constructed to have a large batch size. It is
assumed that the Dataloader (regardless of whether it is created
from a Pytorch Dataset or not) yields tuples. For a `batch` that is
yielded, of length `L`, it is assumed that the forward function of
`model` accepts `L-1` arguments, and the last element of `batch` is
the label. In other words, `model(*batch[:-1])` gives the output of
`model`, and `batch[-1]` are the labels for the batch.
checkpoints (str, list[str], or Iterator): Either the directory of the
path to store and retrieve model checkpoints, a list of
filepaths with checkpoints from which to load, or an iterator which
returns objects from which to load checkpoints.
checkpoints_load_func (Callable, optional): The function to load a saved
checkpoint into a model to update its parameters, and get the
learning rate if it is saved. By default uses a utility to load a
model saved as a state dict.
Default: _load_flexible_state_dict
layers (list[str] or None, optional): A list of layer names for which
gradients should be computed. If `layers` is None, gradients will
be computed for all layers. Otherwise, they will only be computed
for the layers specified in `layers`.
Default: None
loss_fn (Callable, optional): The loss function applied to model. There
are two options for the return type of `loss_fn`. First, `loss_fn`
can be a "per-example" loss function - returns a 1D Tensor of
losses for each example in a batch. `nn.BCELoss(reduction="none")`
would be an "per-example" loss function. Second, `loss_fn` can be
a "reduction" loss function that reduces the per-example losses,
in a batch, and returns a single scalar Tensor. For this option,
the reduction must be the *sum* or the *mean* of the per-example
losses. For instance, `nn.BCELoss(reduction="sum")` is acceptable.
Note for the first option, the `sample_wise_grads_per_batch`
argument must be False, and for the second option,
`sample_wise_grads_per_batch` must be True. Also note that for
the second option, if `loss_fn` has no "reduction" attribute,
the implementation assumes that the reduction is the *sum* of the
per-example losses. If this is not the case, i.e. the reduction
is the *mean*, please set the "reduction" attribute of `loss_fn`
to "mean", i.e. `loss_fn.reduction = "mean"`.
Default: None
batch_size (int or None, optional): Batch size of the DataLoader created to
iterate through `train_dataset`, if it is a Dataset.
`batch_size` should be chosen as large as possible so that certain
intermediate quantities created from a batch still fit in memory.
Specific implementations of `TracInCPBase` will detail the size of
the intermediate quantities. `batch_size` must be an int if
`train_dataset` is a Dataset. If `train_dataset`
is a DataLoader, then `batch_size` is ignored as an argument.
Default: 1
test_loss_fn (Callable, optional): In some cases, one may want to use a
separate loss functions for training examples, i.e. those in
`train_dataset`, and for test examples, i.e. those
represented by the `inputs` and `targets` arguments to the
`influence` method. For example, if one wants to calculate the
influence score of a training example on a test example's
prediction for a fixed class, `test_loss_fn` could map from the
logits for all classes to the logits for a fixed class.
`test_loss_fn` needs satisfy the same constraints as `loss_fn`.
Thus, the same checks that we apply to `loss_fn` are also applied
to `test_loss_fn`, if the latter is provided. Note that the
constraints on both `loss_fn` and `test_loss_fn` both depend on
`sample_wise_grads_per_batch`. This means `loss_fn` and
`test_loss_fn` must either both be "per-example" loss functions,
or both be "reduction" loss functions. If not provided, the loss
function for test examples is assumed to be the same as the loss
function for training examples, i.e. `loss_fn`.
Default: None
sample_wise_grads_per_batch (bool, optional): PyTorch's native gradient
computations w.r.t. model parameters aggregates the results for a
batch and does not allow to access sample-wise gradients w.r.t.
model parameters. This forces us to iterate over each sample in
the batch if we want sample-wise gradients which is computationally
inefficient. We offer an implementation of batch-wise gradient
computations w.r.t. to model parameters which is computationally
more efficient. This implementation can be enabled by setting the
`sample_wise_grad_per_batch` argument to `True`, and should be
enabled if and only if the `loss_fn` argument is a "reduction" loss
function. For example, `nn.BCELoss(reduction="sum")` would be a
valid `loss_fn` if this implementation is enabled (see
documentation for `loss_fn` for more details). Note that our
current implementation enables batch-wise gradient computations
only for a limited number of PyTorch nn.Modules: Conv2D and Linear.
This list will be expanded in the near future. Therefore, please
do not enable this implementation if gradients will be computed
for other kinds of layers.
Default: False
"""
TracInCPBase.__init__(
self,
model,
train_dataset,
checkpoints,
checkpoints_load_func,
loss_fn,
batch_size,
test_loss_fn,
)
self.sample_wise_grads_per_batch = sample_wise_grads_per_batch
# check `loss_fn`
self.reduction_type = _check_loss_fn(
self, loss_fn, "loss_fn", sample_wise_grads_per_batch
)
# check `test_loss_fn` if it was provided
self.test_reduction_type = (
self.reduction_type
if test_loss_fn is None
else _check_loss_fn(
self, test_loss_fn, "test_loss_fn", sample_wise_grads_per_batch
)
)
r"""
TODO: Either restore model state after done (would have to place functionality
within influence to restore after every influence call)? or make a copy so that
changes to grad_requires aren't persistent after using TracIn.
"""
self.layer_modules = None
if layers is not None:
assert isinstance(layers, List), "`layers` should be a list!"
assert len(layers) > 0, "`layers` cannot be empty!"
assert isinstance(
layers[0], str
), "`layers` should contain str layer names."
self.layer_modules = [
_get_module_from_name(self.model, layer) for layer in layers
]
for layer, layer_module in zip(layers, self.layer_modules):
for name, param in layer_module.named_parameters():
if not param.requires_grad:
warnings.warn(
"Setting required grads for layer: {}, name: {}".format(
".".join(layer), name
)
)
param.requires_grad = True
@log_usage()
def influence( # type: ignore[override]
self,
inputs: Tuple[Any, ...],
k: Optional[int] = None,
proponents: bool = True,
show_progress: bool = False,
) -> Union[Tensor, KMostInfluentialResults]:
r"""
This is the key method of this class, and can be run in 2 different modes,
where the mode that is run depends on the arguments passed to this method:
- influence score mode: This mode is used if `k` is None. This mode computes
the influence score of every example in training dataset `train_dataset`
on every example in the test batch represented by `inputs`.
- k-most influential mode: This mode is used if `k` is not None, and an int.
This mode computes the proponents or opponents of every example in the
test batch represented by `inputs`. In particular, for each test example in
the test batch, this mode computes its proponents (resp. opponents),
which are the indices in the training dataset `train_dataset` of the
training examples with the `k` highest (resp. lowest) influence scores on the
test example. Proponents are computed if `proponents` is True. Otherwise,
opponents are computed. For each test example, this method also returns the
actual influence score of each proponent (resp. opponent) on the test
example.
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
k (int, optional): If not provided or `None`, the influence score mode will
be run. Otherwise, the k-most influential mode will be run,
and `k` is the number of proponents / opponents to return per
example in the test batch.
Default: None
proponents (bool, optional): Whether seeking proponents (`proponents=True`)
or opponents (`proponents=False`), if running in k-most influential
mode.
Default: True
show_progress (bool, optional): For all modes, computation of results
requires "training dataset computations": computations for each
batch in the training dataset `train_dataset`, which may
take a long time. If `show_progress` is true, the progress of
"training dataset computations" will be displayed. In particular,
the number of batches for which computations have been performed
will be displayed. It will try to use tqdm if available for
advanced features (e.g. time estimation). Otherwise, it will
fallback to a simple output of progress.
Default: False
Returns:
The return value of this method depends on which mode is run.
- influence score mode: if this mode is run (`k` is None), returns a 2D
tensor `influence_scores` of shape `(input_size, train_dataset_size)`,
where `input_size` is the number of examples in the test batch, and
`train_dataset_size` is the number of examples in training dataset
`train_dataset`. In other words, `influence_scores[i][j]` is the
influence score of the `j`-th example in `train_dataset` on the `i`-th
example in the test batch.
- k-most influential mode: if this mode is run (`k` is an int), returns
a namedtuple `(indices, influence_scores)`. `indices` is a 2D tensor of
shape `(input_size, k)`, where `input_size` is the number of examples in
the test batch. If computing proponents (resp. opponents),
`indices[i][j]` is the index in training dataset `train_dataset` of the
example with the `j`-th highest (resp. lowest) influence score (out of
the examples in `train_dataset`) on the `i`-th example in the test
batch. `influence_scores` contains the corresponding influence scores.
In particular, `influence_scores[i][j]` is the influence score of example
`indices[i][j]` in `train_dataset` on example `i` in the test batch
represented by `inputs`.
"""
assert inputs is not None, (
"`inputs` argument is required."
"If you wish to calculate self influence scores,"
" please use the `self_influence` method instead."
)
return _influence_route_to_helpers(
self,
inputs,
k,
proponents,
show_progress=show_progress,
)
def _sum_jacobians(
self,
inputs: DataLoader,
loss_fn: Optional[Union[Module, Callable]] = None,
reduction_type: Optional[str] = None,
):
"""
sums the jacobians of all examples in `inputs`. result is of the
same format as layer_jacobians, but the batch dimension has size 1
"""
inputs_iter = iter(inputs)
inputs_batch = next(inputs_iter)
def get_batch_contribution(inputs_batch):
_input_jacobians = self._basic_computation_tracincp(
inputs_batch[0:-1],
inputs_batch[-1],
loss_fn,
reduction_type,
)
return tuple(
torch.sum(jacobian, dim=0).unsqueeze(0) for jacobian in _input_jacobians
)
inputs_jacobians = get_batch_contribution(inputs_batch)
for inputs_batch in inputs_iter:
inputs_batch_jacobians = get_batch_contribution(inputs_batch)
inputs_jacobians = tuple(
[
inputs_jacobian + inputs_batch_jacobian
for (inputs_jacobian, inputs_batch_jacobian) in zip(
inputs_jacobians, inputs_batch_jacobians
)
]
)
return inputs_jacobians
def _concat_jacobians(
self,
inputs: DataLoader,
loss_fn: Optional[Union[Module, Callable]] = None,
reduction_type: Optional[str] = None,
):
all_inputs_batch_jacobians = [
self._basic_computation_tracincp(
inputs_batch[0:-1],
inputs_batch[-1],
loss_fn,
reduction_type,
)
for inputs_batch in inputs
]
return tuple(
torch.cat(all_inputs_batch_jacobian, dim=0)
for all_inputs_batch_jacobian in zip(*all_inputs_batch_jacobians)
)
@log_usage()
def compute_intermediate_quantities(
self,
inputs: Union[Tuple[Any, ...], DataLoader],
aggregate: bool = False,
) -> Tensor:
"""
Computes "embedding" vectors for all examples in a single batch, or a
`Dataloader` that yields batches. These embedding vectors are constructed so
that the influence score of a training example on a test example is simply the
dot-product of their corresponding vectors. Allowing a `DataLoader`
yielding batches to be passed in (as opposed to a single batch) gives the
potential to improve efficiency, because we load each checkpoint only once in
this method call. Thus if a `DataLoader` yielding batches is passed in, this
reduces the total number of times each checkpoint is loaded for a dataset,
compared to if a single batch is passed in. The reason we do not just increase
the batch size is that for large models, large batches do not fit in memory.
If `aggregate` is True, the *sum* of the vectors for all examples is returned,
instead of the vectors for each example. This can be useful for computing the
influence of a given training example on the total loss over a validation
dataset, because due to properties of the dot-product, this influence is the
dot-product of the training example's vector with the sum of the vectors in the
validation dataset. Also, by doing the sum aggregation within this method as
opposed to outside of it (by computing all vectors for the validation dataset,
then taking the sum) allows memory usage to be reduced.
Args:
inputs (Tuple, or DataLoader): Either a single tuple of any, or a
`DataLoader`, where each batch yielded is a tuple of any. In
either case, the tuple represents a single batch, where the last
element is assumed to be the labels for the batch. That is,
`model(*batch[0:-1])` produces the output for `model`, and
and `batch[-1]` are the labels, if any. Here, `model` is model
provided in initialization. This is the same assumption made for
each batch yielded by training dataset `train_dataset`.
aggregate (bool): Whether to return the sum of the vectors for all
examples, as opposed to vectors for each example.
Returns:
intermediate_quantities (Tensor): A tensor of dimension
(N, D * C). Here, N is the total number of examples in
`inputs` if `aggregate` is False, and 1, otherwise (so that
a 2D tensor is always returned). C is the number of checkpoints
passed as the `checkpoints` argument of `TracInCP.__init__`, and
each row represents the vector for an example. Regarding D: Let I
be the dimension of the output of the last fully-connected layer
times the dimension of the input of the last fully-connected layer.
If `self.projection_dim` is specified in initialization,
D = min(I * C, `self.projection_dim` * C). Otherwise, D = I * C.
In summary, if `self.projection_dim` is None, the dimension of each
vector will be determined by the size of the input and output of
the last fully-connected layer of `model`. Otherwise,
`self.projection_dim` must be an int, and random projection will be
performed to ensure that the vector is of dimension no more than
`self.projection_dim` * C. `self.projection_dim` corresponds to
the variable d in the top of page 15 of the TracIn paper:
https://arxiv.org/pdf/2002.08484.pdf.
"""
# If `inputs` is not a `DataLoader`, turn it into one.
inputs = _format_inputs_dataset(inputs)
def get_checkpoint_contribution(checkpoint):
assert (
checkpoint is not None
), "None returned from `checkpoints`, cannot load."
learning_rate = self.checkpoints_load_func(self.model, checkpoint)
# get jacobians as tuple of tensors
if aggregate:
inputs_jacobians = self._sum_jacobians(
inputs, self.loss_fn, self.reduction_type
)
else:
inputs_jacobians = self._concat_jacobians(
inputs, self.loss_fn, self.reduction_type
)
# flatten into single tensor
return learning_rate * torch.cat(
[
input_jacobian.flatten(start_dim=1)
for input_jacobian in inputs_jacobians
],
dim=1,
)
return torch.cat(
[
get_checkpoint_contribution(checkpoint)
for checkpoint in self.checkpoints
],
dim=1,
)
def _influence_batch_tracincp(
self,
test_batch: Tuple[Any, ...],
train_batch: Tuple[Any, ...],
):
"""
computes influence scores for a single training batch
"""
def get_checkpoint_contribution(checkpoint):
assert (
checkpoint is not None
), "None returned from `checkpoints`, cannot load."
learning_rate = self.checkpoints_load_func(self.model, checkpoint)
input_jacobians = self._basic_computation_tracincp(
test_batch[0:-1],
test_batch[-1],
self.test_loss_fn,
self.test_reduction_type,
)
return (
_gradient_dot_product(
input_jacobians,
self._basic_computation_tracincp(
train_batch[0:-1],
train_batch[-1],
self.loss_fn,
self.reduction_type,
),
)
* learning_rate
)
batch_tracin_scores = get_checkpoint_contribution(self.checkpoints[0])
for checkpoint in self.checkpoints[1:]:
batch_tracin_scores += get_checkpoint_contribution(checkpoint)
return batch_tracin_scores
def _influence(
self,
inputs: Tuple[Any, ...],
show_progress: bool = False,
) -> Tensor:
r"""
Computes the influence of examples in training dataset `train_dataset`
on the examples in the test batch represented by `inputs`.
This implementation does not require knowing the number of training examples
in advance. Instead, the number of training examples is inferred from the
output of `self._basic_computation_tracincp`.
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
show_progress (bool, optional): To compute the influence of examples in
training dataset `train_dataset`, we compute the influence
of each batch. If `show_progress` is true, the progress of this
computation will be displayed. In particular, the number of batches
for which influence has been computed will be displayed. It will
try to use tqdm if available for advanced features (e.g. time
estimation). Otherwise, it will fallback to a simple output of
progress.
Default: False
Returns:
influence_scores (Tensor): Influence scores from the TracInCP method.
Its shape is `(input_size, train_dataset_size)`, where `input_size`
is the number of examples in the test batch, and
`train_dataset_size` is the number of examples in
training dataset `train_dataset`. For example:
`influence_scores[i][j]` is the influence score for the j-th training
example to the i-th example in the test batch.
"""
train_dataloader = self.train_dataloader
if show_progress:
train_dataloader = progress(
train_dataloader,
desc=(
f"Using {self.get_name()} to compute "
"influence for training batches"
),
total=self.train_dataloader_len,
)
return torch.cat(
[
self._influence_batch_tracincp(inputs, batch)
for batch in train_dataloader
],
dim=1,
)
def _get_k_most_influential(
self,
inputs: Tuple[Any, ...],
k: int = 5,
proponents: bool = True,
show_progress: bool = False,
) -> KMostInfluentialResults:
r"""
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
k (int, optional): The number of proponents or opponents to return per test
example.
Default: 5
proponents (bool, optional): Whether seeking proponents (`proponents=True`)
or opponents (`proponents=False`)
Default: True
show_progress (bool, optional): To compute the proponents (or opponents)
for the batch of examples, we perform computation for each batch in
training dataset `train_dataset`, If `show_progress` is
true, the progress of this computation will be displayed. In
particular, the number of batches for which the computation has
been performed will be displayed. It will try to use tqdm if
available for advanced features (e.g. time estimation). Otherwise,
it will fallback to a simple output of progress.
Default: False
Returns:
(indices, influence_scores) (namedtuple): `indices` is a torch.long Tensor
that contains the indices of the proponents (or opponents) for each
test example. Its dimension is `(inputs_batch_size, k)`, where
`inputs_batch_size` is the number of examples in `inputs`. For
example, if `proponents==True`, `indices[i][j]` is the index of the
example in training dataset `train_dataset` with the
k-th highest influence score for the j-th example in `inputs`.
`indices` is a `torch.long` tensor so that it can directly be used
to index other tensors. Each row of `influence_scores` contains the
influence scores for a different test example, in sorted order. In
particular, `influence_scores[i][j]` is the influence score of
example `indices[i][j]` in training dataset `train_dataset`
on example `i` in the test batch represented by `inputs`.
"""
desc = (
None
if not show_progress
else (
(
f"Using {self.get_name()} to perform computation for "
f'getting {"proponents" if proponents else "opponents"}. '
"Processing training batches"
)
)
)
return KMostInfluentialResults(
*_get_k_most_influential_helper(
self.train_dataloader,
self._influence_batch_tracincp,
inputs,
k,
proponents,
show_progress,
desc,
)
)
def _self_influence_by_checkpoints(
self,
inputs: Union[Tuple[Any, ...], DataLoader],
show_progress: bool = False,
) -> Tensor:
"""
Computes self influence scores for the examples in `inputs`, which is
either a single batch or a Pytorch `DataLoader` that yields batches. Therefore,
the computed self influence scores are *not* for the examples in training
dataset `train_dataset` (unlike when computing self influence scores using the
`influence` method). Note that if `inputs` is a single batch, this
will call `model` on that single batch, and if `inputs` yields
batches, this will call `model` on each batch that is yielded. Therefore,
please ensure that for both cases, the batch(es) that `model` is called
with are not too large, so that there will not be an out-of-memory error. This
implementation performs an outer iteration over checkpoints, and an inner
iteration over all batches that `inputs` represents. The pros of this
implementation are that the checkpoints do not need to be loaded too many
times.
Args:
batches (tuple or DataLoader): Either a single tuple of any, or a
`DataLoader`, where each batch yielded is a tuple of any. In
either case, the tuple represents a single batch, where the last
element is assumed to be the labels for the batch. That is,
`model(*batch[0:-1])` produces the output for `model`,
and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset`. Please see documentation for the
`train_dataset` argument to `TracInCP.__init__` for
more details on the assumed structure of a batch.
show_progress (bool, optional): Computation of self influence scores can
take a long time if `inputs` represents many examples. If
`show_progress` is true, the progress of this computation will be
displayed. In more detail, this computation will iterate over all
checkpoints (provided as the `checkpoints` initialization argument)
in an outer loop, and iterate over all batches that
`inputs` represents in an inner loop. Thus if
`show_progress` is True, the progress of both the outer
iteration and the inner iterations will be displayed. To show
progress, it will try to use tqdm if available for advanced
features (e.g. time estimation). Otherwise, it will fallback to a
simple output of progress.
Default: False
Returns:
self_influence_scores (Tensor): This is a 1D tensor containing the self
influence scores of all examples in `inputs`, regardless of
whether it represents a single batch or a `DataLoader` that yields
batches.
"""
# If `inputs` is not a `DataLoader`, turn it into one.
inputs = _format_inputs_dataset(inputs)
# If `show_progress` is true, create an outer progress bar that keeps track of
# how many checkpoints have been processed
if show_progress:
# Try to determine length of inner progress bar if possible, with a default
# of `None`.
inputs_len = None
try:
inputs_len = len(inputs)
except TypeError:
warnings.warn(
"Unable to determine the number of batches in `inputs`. "
"Therefore, if showing the progress of the computation of self "
"influence scores, only the number of batches processed can be "
"displayed, and not the percentage completion of the computation, "
"nor any time estimates."
)
def calculate_via_vector_norm(layer_jacobian):
# Helper to efficiently calculate vector norm if pytorch version permits.
return (
torch.linalg.vector_norm(
layer_jacobian,
dim=list(range(1, len(layer_jacobian.shape))),
)
** 2
)
def calculate_via_flatten(layer_jacobian):
return torch.sum(layer_jacobian.flatten(start_dim=1) ** 2, dim=1)
def get_checkpoint_contribution(checkpoint):
# This function returns a 1D tensor representing the contribution to the
# self influence score for the given checkpoint, for all batches in
# `inputs`. The length of the 1D tensor is the total number of
# examples in `inputs`.
assert (
checkpoint is not None
), "None returned from `checkpoints`, cannot load."
learning_rate = self.checkpoints_load_func(self.model, checkpoint)
# This will store a list of the contribution of the self influence score
# from each batch. Each element is a 1D tensor of length batch_size - the
# batch size of each batch in `inputs` (they do not need to be all
# the same)
checkpoint_contribution = []
_inputs = inputs
# If `show_progress` is true, create an inner progress bar that keeps track
# of how many batches have been processed for the current checkpoint
if show_progress:
_inputs = progress(
inputs,
desc=(
f"Using {self.get_name()} to compute self "
"influence. Processing batch"
),
total=inputs_len,
)
for batch in _inputs:
layer_jacobians = self._basic_computation_tracincp(
batch[0:-1],
batch[-1],
self.loss_fn,
self.reduction_type,
)
# Note that all variables in this function are for an entire batch.
# Each `layer_jacobian` in `layer_jacobians` corresponds to a different
# layer. `layer_jacobian` is the jacobian w.r.t to a given layer's
# parameters. If the given layer's parameters are of shape *, then
# `layer_jacobian` is of shape (batch_size, *). For each layer, we need
# the squared jacobian for each example. So we square the jacobian and
# sum over all dimensions except the 0-th (the batch dimension). We then
# sum the contribution over all layers. For Pytorch > 1.10 we use the
# optimized torch.linalg.vector_norm as opposed to the explicit flatten.
calculate_fn = calculate_via_flatten
if _parse_version(torch.__version__) >= (1, 10, 0):
calculate_fn = calculate_via_vector_norm
checkpoint_contribution.append(
torch.sum(
torch.stack(
[
calculate_fn(layer_jacobian)
for layer_jacobian in layer_jacobians
],
dim=0,
),
dim=0,
)
* learning_rate
)
# We concatenate the contributions from each batch into a single 1D tensor,
# which represents the contributions for all batches in `inputs`
return torch.cat(checkpoint_contribution, dim=0)
if show_progress:
checkpoints_progress = progress(
desc=(
f"Using {self.get_name()} to compute self "
"influence. Processing checkpoint"
),
total=len(self.checkpoints),
mininterval=0.0,
)
else:
checkpoints_progress = NullProgress()
with checkpoints_progress:
batches_self_tracin_scores = get_checkpoint_contribution(
self.checkpoints[0]
)
checkpoints_progress.update()
# The self influence score for all examples is the sum of contributions from
# each checkpoint
for checkpoint in self.checkpoints[1:]:
batches_self_tracin_scores += get_checkpoint_contribution(checkpoint)
checkpoints_progress.update()
return batches_self_tracin_scores
@log_usage()
def self_influence(
self,
inputs: Optional[Union[Tuple[Any, ...], DataLoader]] = None,
show_progress: bool = False,
outer_loop_by_checkpoints: bool = False,
) -> Tensor:
"""
Computes self influence scores for the examples in `inputs`, which is
either a single batch or a Pytorch `DataLoader` that yields batches.
If `inputs` is not specified or `None` calculates self influence
score for the training dataset `train_dataset`. Note that if `inputs`
is a single batch, this will call `model` on that single batch, and if
`inputs` yields batches, this will call `model` on each batch that is
yielded. Therefore, please ensure that for both cases, the batch(es) that
`model` is called with are not too large, so that there will not be an
out-of-memory error.
Internally, this computation requires iterating both over the batches in
`inputs`, as well as different model checkpoints. There are two ways
this iteration can be done. If `outer_loop_by_checkpoints` is False, the outer
iteration will be over batches, and the inner iteration will be over
checkpoints. This has the pro that displaying the progress of the computation
is more intuitive, involving displaying the number of batches for which self
influence scores have been computed. If `outer_loop_by_checkpoints` is True,
the outer iteration will be over checkpoints, and the inner iteration will be
over batches. This has the pro that the checkpoints do not need to be loaded
for each batch. For large models, loading checkpoints can be time-intensive.
Args:
inputs (tuple or DataLoader, optional): This specifies the
dataset for which self influence scores will be computed.
Either a single tuple of any, or a `DataLoader`, where each
batch yielded is a tuple of type any. In either case, the tuple
represents a single batch, where the last element is assumed to
be the labels for the batch. That is, `model(*batch[0:-1])`
produces the output for `model`, and `batch[-1]` are the labels,
if any. This is the same assumption made for each batch yielded
by training dataset `train_dataset`. Please see documentation for
the `train_dataset` argument to `TracInCP.__init__` for
more details on the assumed structure of a batch. If not provided
or `None`, self influence scores will be computed for training
dataset `train_dataset`, which yields batches satisfying the
above assumptions.
Default: None.
show_progress (bool, optional): Computation of self influence scores can
take a long time if `inputs` represents many examples. If
`show_progress`is true, the progress of this computation will be
displayed. In more detail, if `outer_loop_by_checkpoints` is False,
this computation will iterate over all batches in an outer loop.
Thus if `show_progress` is True, the number of batches for which
self influence scores have been computed will be displayed. If
`outer_loop_by_checkpoints` is True, this computation will iterate
over all checkpoints (provided as the `checkpoints` initialization
argument) in an outer loop, and iterate over all batches that
`inputs` represents in an inner loop. Thus if
`show_progress` is True, the progress of both the outer
iteration and the inner iterations will be displayed. To show
progress, it will try to use tqdm if available for advanced
features (e.g. time estimation). Otherwise, it will fallback to a
simple output of progress.
Default: False
outer_loop_by_checkpoints (bool, optional): If performing an outer
iteration over checkpoints; see method description for more
details.
Default: False
"""
inputs = inputs if inputs is not None else self.train_dataloader
if outer_loop_by_checkpoints:
return self._self_influence_by_checkpoints(inputs, show_progress)
return _self_influence_by_batches_helper(
self._self_influence_by_checkpoints,
self.get_name(),
inputs,
show_progress,
)
def _basic_computation_tracincp(
self,
inputs: Tuple[Any, ...],
targets: Optional[Tensor] = None,
loss_fn: Optional[Union[Module, Callable]] = None,
reduction_type: Optional[str] = None,
) -> Tuple[Tensor, ...]:
"""
For instances of TracInCP, computation of influence scores or self influence
scores repeatedly calls this function for different checkpoints
and batches. In particular, this function computes the jacobian of a loss
function w.r.t. parameters in the `layers` initialization argument.
Args:
inputs (tuple[Any, ...]): A batch of examples, which could be a training
batch or test batch, depending which method is the caller. Does not
represent labels, which are passed as `targets`. The assumption is
that `model(*inputs)` produces the predictions for the batch.
targets (tensor or None): If computing influence scores on a loss function,
these are the labels corresponding to the batch `inputs`.
Default: none
loss_fn (Callable, optional): The loss function to use when computing the
jacobian.
reduction_type (str, optional): The reduction type of `loss_fn`. This
argument is only used if `sample_wise_grads_per_batch` was true in
initialization.
"""
if self.sample_wise_grads_per_batch:
return _compute_jacobian_wrt_params_with_sample_wise_trick(
self.model,
inputs,
targets,
loss_fn,
reduction_type,
self.layer_modules,
)
return _compute_jacobian_wrt_params(
self.model,
inputs,
targets,
loss_fn,
self.layer_modules,
)
|
#!/usr/bin/env python3
import warnings
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import captum._utils.common as common
import torch
from captum._utils.av import AV
from captum.attr import LayerActivation
from captum.influence._core.influence import DataInfluence
from torch import Tensor
from torch.nn import Module
from torch.utils.data import DataLoader, Dataset
r"""
Additional helper functions to calculate similarity metrics.
"""
def euclidean_distance(test, train) -> Tensor:
r"""
Calculates the pairwise euclidean distance for batches of feature vectors.
Tensors test and train have shape (batch_size_1, *), and (batch_size_2, *).
Returns pairwise euclidean distance Tensor of shape (batch_size_1, batch_size_2).
"""
similarity = torch.cdist(
test.view(test.shape[0], -1).unsqueeze(0),
train.view(train.shape[0], -1).unsqueeze(0),
).squeeze(0)
return similarity
def cosine_similarity(test, train, replace_nan=0) -> Tensor:
r"""
Calculates the pairwise cosine similarity for batches of feature vectors.
Tensors test and train have shape (batch_size_1, *), and (batch_size_2, *).
Returns pairwise cosine similarity Tensor of shape (batch_size_1, batch_size_2).
"""
test = test.view(test.shape[0], -1)
train = train.view(train.shape[0], -1)
if common._parse_version(torch.__version__) <= (1, 6, 0):
test_norm = torch.norm(test, p=None, dim=1, keepdim=True)
train_norm = torch.norm(train, p=None, dim=1, keepdim=True)
else:
test_norm = torch.linalg.norm(test, ord=2, dim=1, keepdim=True)
train_norm = torch.linalg.norm(train, ord=2, dim=1, keepdim=True)
test = torch.where(test_norm != 0.0, test / test_norm, Tensor([replace_nan]))
train = torch.where(train_norm != 0.0, train / train_norm, Tensor([replace_nan])).T
similarity = torch.mm(test, train)
return similarity
r"""
Implements abstract DataInfluence class and provides implementation details for
similarity metric-based influence computation. Similarity metrics can be used to compare
intermediate or final activation vectors of a model for different sets of input. Then,
these can be used to draw conclusions about influential instances.
Some standard similarity metrics such as dot product similarity or euclidean distance
are provided, but the user can provide any custom similarity metric as well.
"""
class SimilarityInfluence(DataInfluence):
def __init__(
self,
module: Module,
layers: Union[str, List[str]],
influence_src_dataset: Dataset,
activation_dir: str,
model_id: str = "",
similarity_metric: Callable = cosine_similarity,
similarity_direction: str = "max",
batch_size: int = 1,
**kwargs: Any,
) -> None:
r"""
Args:
module (torch.nn.Module): An instance of pytorch model. This model should
define all of its layers as attributes of the model.
layers (str or list[str]): The fully qualified layer(s) for which the
activation vectors are computed.
influence_src_dataset (torch.utils.data.Dataset): PyTorch Dataset that is
used to create a PyTorch Dataloader to iterate over the dataset and
its labels. This is the dataset for which we will be seeking for
influential instances. In most cases this is the training dataset.
activation_dir (str): The directory of the path to store
and retrieve activation computations. Best practice would be to use
an absolute path.
model_id (str): The name/version of the model for which layer
activations are being computed. Activations will be stored and
loaded under the subdirectory with this name if provided.
similarity_metric (Callable): This is a callable function that computes a
similarity metric between two representations. For example, the
representations pair could be from the training and test sets.
This function must adhere to certain standards. The inputs should be
torch Tensors with shape (batch_size_i/j, feature dimensions). The
output Tensor should have shape (batch_size_i, batch_size_j) with
scalar values corresponding to the similarity metric used for each
pairwise combination from the two batches.
For example, suppose we use `batch_size_1 = 16` for iterating
through `influence_src_dataset`, and for the `inputs` argument
we pass in a Tensor with 3 examples, i.e. batch_size_2 = 3. Also,
suppose that our inputs and intermediate activations throughout the
model will have dimension (N, C, H, W). Then, the feature dimensions
should be flattened within this function. For example::
>>> av_test.shape
torch.Size([3, N, C, H, W])
>>> av_src.shape
torch.Size([16, N, C, H, W])
>>> av_test = torch.view(av_test.shape[0], -1)
>>> av_test.shape
torch.Size([3, N x C x H x W])
and similarly for av_src. The similarity_metric should then use
these flattened tensors to return the pairwise similarity matrix.
For example, `similarity_metric(av_test, av_src)` should return a
tensor of shape (3, 16).
batch_size (int): Batch size for iterating through `influence_src_dataset`.
**kwargs: Additional key-value arguments that are necessary for specific
implementation of `DataInfluence` abstract class.
"""
self.module = module
self.layers = [layers] if isinstance(layers, str) else layers
self.influence_src_dataset = influence_src_dataset
self.activation_dir = activation_dir
self.model_id = model_id
self.batch_size = batch_size
if similarity_direction == "max" or similarity_direction == "min":
self.similarity_direction = similarity_direction
else:
raise ValueError(
f"{similarity_direction} is not a valid value. "
"Must be either 'max' or 'min'"
)
if similarity_metric is cosine_similarity:
if "replace_nan" in kwargs:
self.replace_nan = kwargs["replace_nan"]
else:
self.replace_nan = -2 if self.similarity_direction == "max" else 2
similarity_metric = partial(cosine_similarity, replace_nan=self.replace_nan)
self.similarity_metric = similarity_metric
self.influence_src_dataloader = DataLoader(
influence_src_dataset, batch_size, shuffle=False
)
def influence( # type: ignore[override]
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
top_k: int = 1,
additional_forward_args: Optional[Any] = None,
load_src_from_disk: bool = True,
**kwargs: Any,
) -> Dict:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Batch of examples for which
influential instances are computed. They are passed to the
forward_func. The first dimension in `inputs` tensor or tuple
of tensors corresponds to the batch size. A tuple of tensors
is only passed in if thisis the input form that `module` accepts.
top_k (int): The number of top-matching activations to return
additional_forward_args (Any, optional): Additional arguments that will be
passed to forward_func after inputs.
load_src_from_disk (bool): Loads activations for `influence_src_dataset`
where possible. Setting to False would force regeneration of
activations.
load_input_from_disk (bool): Regenerates activations for inputs by default
and removes previous `inputs` activations that are flagged with
`inputs_id`. Setting to True will load prior matching inputs
activations. Note that this could lead to unexpected behavior if
`inputs_id` is not configured properly and activations are loaded
for a different, prior `inputs`.
inputs_id (str): Used to identify inputs for loading activations.
**kwargs: Additional key-value arguments that are necessary for specific
implementation of `DataInfluence` abstract class.
Returns:
influences (dict): Returns the influential instances retrieved from
`influence_src_dataset` for each test example represented through a
tensor or a tuple of tensor in `inputs`. Returned influential
examples are represented as dict, with keys corresponding to
the layer names passed in `layers`. Each value in the dict is a
tuple containing the indices and values for the top k similarities
from `influence_src_dataset` by the chosen metric. The first value
in the tuple corresponds to the indices corresponding to the top k
most similar examples, and the second value is the similarity score.
The batch dimension corresponds to the batch dimension of `inputs`.
If inputs.shape[0] == 5, then dict[`layer_name`][0].shape[0] == 5.
These tensors will be of shape (inputs.shape[0], top_k).
"""
inputs_batch_size = (
inputs[0].shape[0] if isinstance(inputs, tuple) else inputs.shape[0]
)
influences: Dict[str, Any] = {}
layer_AVDatasets = AV.generate_dataset_activations(
self.activation_dir,
self.module,
self.model_id,
self.layers,
DataLoader(self.influence_src_dataset, self.batch_size, shuffle=False),
identifier="src",
load_from_disk=load_src_from_disk,
return_activations=True,
)
assert layer_AVDatasets is not None and not isinstance(
layer_AVDatasets, AV.AVDataset
)
layer_modules = [
common._get_module_from_name(self.module, layer) for layer in self.layers
]
test_activations = LayerActivation(self.module, layer_modules).attribute(
inputs, additional_forward_args
)
minmax = self.similarity_direction == "max"
# av_inputs shape: (inputs_batch_size, *) e.g. (inputs_batch_size, N, C, H, W)
# av_src shape: (self.batch_size, *) e.g. (self.batch_size, N, C, H, W)
test_activations = (
test_activations if len(self.layers) > 1 else [test_activations]
)
for i, (layer, layer_AVDataset) in enumerate(
zip(self.layers, layer_AVDatasets)
):
topk_val, topk_idx = torch.Tensor(), torch.Tensor().long()
zero_acts = torch.Tensor().long()
av_inputs = test_activations[i]
src_loader = DataLoader(layer_AVDataset)
for j, av_src in enumerate(src_loader):
av_src = av_src.squeeze(0)
similarity = self.similarity_metric(av_inputs, av_src)
msg = (
"Output of custom similarity does not meet required dimensions. "
f"Your output has shape {similarity.shape}.\nPlease ensure the "
"output shape matches (inputs_batch_size, src_dataset_batch_size), "
f"which should be {(inputs_batch_size, self.batch_size)}."
)
assert similarity.shape == (inputs_batch_size, av_src.shape[0]), msg
if hasattr(self, "replace_nan"):
idx = (similarity == self.replace_nan).nonzero()
zero_acts = torch.cat((zero_acts, idx))
r"""
TODO: For models that can have tuples as activations, we should
allow similarity metrics to accept tuples, support topk selection.
"""
topk_batch = min(top_k, self.batch_size)
values, indices = torch.topk(
similarity, topk_batch, dim=1, largest=minmax
)
indices += int(j * self.batch_size)
topk_val = torch.cat((topk_val, values), dim=1)
topk_idx = torch.cat((topk_idx, indices), dim=1)
# can modify how often to sort for efficiency? minor
sort_idx = torch.argsort(topk_val, dim=1, descending=minmax)
topk_val = torch.gather(topk_val, 1, sort_idx[:, :top_k])
topk_idx = torch.gather(topk_idx, 1, sort_idx[:, :top_k])
influences[layer] = (topk_idx, topk_val)
if torch.numel(zero_acts != 0):
zero_warning = (
f"Layer {layer} has zero-vector activations for some inputs. This "
"may cause undefined behavior for cosine similarity. The indices "
"for the offending inputs will be included under the key "
f"'zero_acts-{layer}' in the output dictionary. Indices are "
"returned as a tensor with [inputs_idx, src_dataset_idx] pairs "
"which may have corrupted similarity scores."
)
warnings.warn(zero_warning, RuntimeWarning)
key = "-".join(["zero_acts", layer])
influences[key] = zero_acts
return influences
|
#!/usr/bin/env python3
from abc import ABC, abstractmethod
from typing import Any
from torch.nn import Module
from torch.utils.data import Dataset
class DataInfluence(ABC):
r"""
An abstract class to define model data influence skeleton.
"""
def __init_(self, model: Module, train_dataset: Dataset, **kwargs: Any) -> None:
r"""
Args:
model (torch.nn.Module): An instance of pytorch model.
train_dataset (torch.utils.data.Dataset): PyTorch Dataset that is
used to create a PyTorch Dataloader to iterate over the dataset and
its labels. This is the dataset for which we will be seeking for
influential instances. In most cases this is the training dataset.
**kwargs: Additional key-value arguments that are necessary for specific
implementation of `DataInfluence` abstract class.
"""
self.model = model
self.train_dataset = train_dataset
@abstractmethod
def influence(self, inputs: Any = None, **kwargs: Any) -> Any:
r"""
Args:
inputs (Any): Batch of examples for which influential
instances are computed. They are passed to the forward_func. If
`inputs` if a tensor or tuple of tensors, the first dimension
of a tensor corresponds to the batch dimension.
**kwargs: Additional key-value arguments that are necessary for specific
implementation of `DataInfluence` abstract class.
Returns:
influences (Any): We do not add restrictions on the return type for now,
though this may change in the future.
"""
pass
|
#!/usr/bin/env python3
import threading
import warnings
from collections import defaultdict
from typing import Any, Callable, cast, Dict, Iterator, List, Optional, Tuple, Union
import torch
from captum._utils.common import _get_module_from_name, _sort_key_list
from captum._utils.gradient import _gather_distributed_tensors
from captum._utils.progress import NullProgress, progress
from captum.influence._core.tracincp import (
_influence_route_to_helpers,
KMostInfluentialResults,
TracInCPBase,
)
from captum.influence._utils.common import (
_check_loss_fn,
_format_inputs_dataset,
_get_k_most_influential_helper,
_jacobian_loss_wrt_inputs,
_load_flexible_state_dict,
_self_influence_by_batches_helper,
_tensor_batch_dot,
)
from captum.influence._utils.nearest_neighbors import (
AnnoyNearestNeighbors,
NearestNeighbors,
)
from captum.log import log_usage
from torch import device, Tensor
from torch.nn import Module
from torch.utils.data import DataLoader, Dataset
r"""
Implements abstract DataInfluence class and also provides implementation details for
influence computation based on the logic provided in TracIn paper
(https://arxiv.org/abs/2002.08484).
The TracIn paper proposes an idealized notion of influence which can be represented by
the total amount a training example reduces loss for a test example via a training
process such as stochastic gradient descent. As this idealized notion of influence is
impractical to compute, the TracIn paper proposes instead to compute an influence
score, which uses a first-order approximation for the change in loss for a test example
by a training example, which is accumulated across saved model checkpoints. This
influence score is accumulated via a summed dot-product of gradient vectors for the
scores/loss of a test and training example.
"""
"""
TODO: Support for checkpoint type. Currently only supports model parameters as saved
checkpoints. Can use enum or string.
Potential implementation from design doc:
checkpoint_type (Enum = [Parameters | Loss_Grad]): For performance,
saved / loaded checkpoints can be either model parameters, or
gradient of the loss function on an input w.r.t parameters.
"""
class TracInCPFast(TracInCPBase):
r"""
In Appendix F, Page 14 of the TracIn paper, they show that the calculation
of the influence score of between a test example x' and a training example x,
can be computed much more quickly than naive back-propagation in the special
case when considering only gradients in the last fully-connected layer. This class
computes influence scores for that special case. Note that the computed
influence scores are exactly the same as when naive back-propagation is used -
there is no loss in accuracy.
In more detail regarding the influence score computation: let :math`x`
and :math`\nabla_y f(y)` be the input and output-gradient of the last
fully-connected layer, respectively, for a training example. Similarly, let
:math`x'` and :math`\nabla_{y'} f(y')` be the corresponding quantities for
a test example. Then, the influence score of the training example on the test
example is the sum of the contribution from each checkpoint. The contribution from
a given checkpoint is :math`(x^T x')(\nabla_y f(y)^T \nabla_{y'} f(y'))`.
"""
def __init__(
self,
model: Module,
final_fc_layer: Union[Module, str],
train_dataset: Union[Dataset, DataLoader],
checkpoints: Union[str, List[str], Iterator],
checkpoints_load_func: Callable = _load_flexible_state_dict,
loss_fn: Optional[Union[Module, Callable]] = None,
batch_size: Union[int, None] = 1,
test_loss_fn: Optional[Union[Module, Callable]] = None,
vectorize: bool = False,
) -> None:
r"""
Args:
model (torch.nn.Module): An instance of pytorch model. This model should
define all of its layers as attributes of the model.
final_fc_layer (torch.nn.Module or str): The last fully connected layer in
the network for which gradients will be approximated via fast random
projection method. Can be either the layer module itself, or the
fully qualified name of the layer if it is a defined attribute of
the passed `model`.
train_dataset (torch.utils.data.Dataset or torch.utils.data.DataLoader):
In the `influence` method, we compute the influence score of
training examples on examples in a test batch.
This argument represents the training dataset containing those
training examples. In order to compute those influence scores, we
will create a Pytorch DataLoader yielding batches of training
examples that is then used for processing. If this argument is
already a Pytorch Dataloader, that DataLoader can be directly
used for processing. If it is instead a Pytorch Dataset, we will
create a DataLoader using it, with batch size specified by
`batch_size`. For efficiency purposes, the batch size of the
DataLoader used for processing should be as large as possible, but
not too large, so that certain intermediate quantities created
from a batch still fit in memory. Therefore, if
`train_dataset` is a Dataset, `batch_size` should be large.
If `train_dataset` was already a DataLoader to begin with,
it should have been constructed to have a large batch size. It is
assumed that the Dataloader (regardless of whether it is created
from a Pytorch Dataset or not) yields tuples. For a `batch` that is
yielded, of length `L`, it is assumed that the forward function of
`model` accepts `L-1` arguments, and the last element of `batch` is
the label. In other words, `model(*batch[:-1])` gives the output of
`model`, and `batch[-1]` are the labels for the batch.
checkpoints (str, list[str], or Iterator): Either the directory of the
path to store and retrieve model checkpoints, a list of
filepaths with checkpoints from which to load, or an iterator which
returns objects from which to load checkpoints.
checkpoints_load_func (Callable, optional): The function to load a saved
checkpoint into a model to update its parameters, and get the
learning rate if it is saved. By default uses a utility to load a
model saved as a state dict.
Default: _load_flexible_state_dict
loss_fn (Callable, optional): The loss function applied to model. `loss_fn`
must be a "reduction" loss function that reduces the per-example
losses in a batch, and returns a single scalar Tensor. Furthermore,
the reduction must be the *sum* or the *mean* of the per-example
losses. For instance, `nn.BCELoss(reduction="sum")` is acceptable.
Also note that if `loss_fn` has no "reduction" attribute,
the implementation assumes that the reduction is the *sum* of the
per-example losses. If this is not the case, i.e. the reduction
is the *mean*, please set the "reduction" attribute of `loss_fn`
to "mean", i.e. `loss_fn.reduction = "mean"`.
Default: None
batch_size (int or None, optional): Batch size of the DataLoader created to
iterate through `train_dataset`, if it is a Dataset.
`batch_size` should be chosen as large as possible so that certain
intermediate quantities created from a batch still fit in memory.
Specific implementations of `TracInCPBase` will detail the size of
the intermediate quantities. `batch_size` must be an int if
`train_dataset` is a Dataset. If `train_dataset`
is a DataLoader, then `batch_size` is ignored as an argument.
Default: 1
test_loss_fn (Callable, optional): In some cases, one may want to use a
separate loss functions for training examples, i.e. those in
`train_dataset`, and for test examples, i.e. those
represented by the `inputs` and `targets` arguments to the
`influence` method. For example, if one wants to calculate the
influence score of a training example on a test example's
prediction for a fixed class, `test_loss_fn` could map from the
logits for all classes to the logits for a fixed class.
`test_loss_fn` needs satisfy the same constraints as `loss_fn`.
Thus, the same checks that we apply to `loss_fn` are also applied
to `test_loss_fn`, if the latter is provided. If not provided, the
loss function for test examples is assumed to be the same as the
loss function for training examples, i.e. `loss_fn`.
Default: None
vectorize (bool, optional): Flag to use experimental vectorize functionality
for `torch.autograd.functional.jacobian`.
Default: False
"""
TracInCPBase.__init__(
self,
model,
train_dataset,
checkpoints,
checkpoints_load_func,
loss_fn,
batch_size,
test_loss_fn,
)
self.vectorize = vectorize
# TODO: restore prior state
self.final_fc_layer = final_fc_layer
if isinstance(self.final_fc_layer, str):
self.final_fc_layer = _get_module_from_name(model, self.final_fc_layer)
assert isinstance(self.final_fc_layer, Module)
for param in self.final_fc_layer.parameters():
param.requires_grad = True
assert loss_fn is not None, "loss function must not be none"
# check `loss_fn`
self.reduction_type = _check_loss_fn(self, loss_fn, "loss_fn")
# check `test_loss_fn` if it was provided
self.test_reduction_type = (
self.reduction_type
if test_loss_fn is None
else _check_loss_fn(self, test_loss_fn, "test_loss_fn")
)
@log_usage()
def influence( # type: ignore[override]
self,
inputs: Tuple[Any, ...],
k: Optional[int] = None,
proponents: bool = True,
show_progress: bool = False,
) -> Union[Tensor, KMostInfluentialResults]:
r"""
This is the key method of this class, and can be run in 2 different modes,
where the mode that is run depends on the arguments passed to this method:
- influence score mode: This mode is used if `k` is None. This mode computes
the influence score of every example in training dataset `train_dataset`
on every example in the test batch represented by `inputs`.
- k-most influential mode: This mode is used if `k` is not None, and an int.
This mode computes the proponents or opponents of every example in the
test batch represented by `inputs`. In particular, for each test example in
the test batch, this mode computes its proponents (resp. opponents),
which are the indices in the training dataset `train_dataset` of the
training examples with the `k` highest (resp. lowest) influence scores on the
test example. Proponents are computed if `proponents` is True. Otherwise,
opponents are computed. For each test example, this method also returns the
actual influence score of each proponent (resp. opponent) on the test
example.
Args:
inputs (tuple or DataLoader): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
k (int, optional): If not provided or `None`, the influence score mode will
be run. Otherwise, the k-most influential mode will be run,
and `k` is the number of proponents / opponents to return per
example in the test batch.
Default: None
proponents (bool, optional): Whether seeking proponents (`proponents=True`)
or opponents (`proponents=False`), if running in k-most influential
mode.
Default: True
show_progress (bool, optional): For all modes, computation of results
requires "training dataset computations": computations for each
batch in the training dataset `train_dataset`, which may
take a long time. If `show_progress` is true, the progress of
"training dataset computations" will be displayed. In particular,
the number of batches for which computations have been performed
will be displayed. It will try to use tqdm if available for
advanced features (e.g. time estimation). Otherwise, it will
fallback to a simple output of progress.
Default: False
Returns:
The return value of this method depends on which mode is run.
- influence score mode: if this mode is run (`k` is None), returns a 2D
tensor `influence_scores` of shape `(input_size, train_dataset_size)`,
where `input_size` is the number of examples in the test batch, and
`train_dataset_size` is the number of examples in training dataset
`train_dataset`. In other words, `influence_scores[i][j]` is the
influence score of the `j`-th example in `train_dataset` on the `i`-th
example in the test batch.
- k-most influential mode: if this mode is run (`k` is an int), returns
a namedtuple `(indices, influence_scores)`. `indices` is a 2D tensor of
shape `(input_size, k)`, where `input_size` is the number of examples in
the test batch. If computing proponents (resp. opponents),
`indices[i][j]` is the index in training dataset `train_dataset` of the
example with the `j`-th highest (resp. lowest) influence score (out of
the examples in `train_dataset`) on the `i`-th example in the test
batch. `influence_scores` contains the corresponding influence scores.
In particular, `influence_scores[i][j]` is the influence score of example
`indices[i][j]` in `train_dataset` on example `i` in the test batch
represented by `inputs`.
"""
assert inputs is not None, (
"`inputs` argument is required."
"If you wish to calculate self influence scores,"
" please use the `self_influence` method instead."
)
return _influence_route_to_helpers(
self,
inputs,
k,
proponents,
show_progress=show_progress,
)
def _influence_batch_tracincp_fast(
self,
test_batch: Tuple[Any, ...],
train_batch: Tuple[Any, ...],
):
"""
computes influence scores for a single training batch, when only considering
gradients in the last fully-connected layer, using the computation trick
described in the `TracInCPFast` class description.
"""
def get_checkpoint_contribution(checkpoint):
assert (
checkpoint is not None
), "None returned from `checkpoints`, cannot load."
learning_rate = self.checkpoints_load_func(self.model, checkpoint)
input_jacobians, input_layer_inputs = _basic_computation_tracincp_fast(
self,
test_batch[0:-1],
test_batch[-1],
self.test_loss_fn,
self.test_reduction_type,
)
src_jacobian, src_layer_input = _basic_computation_tracincp_fast(
self,
train_batch[0:-1],
train_batch[-1],
self.loss_fn,
self.reduction_type,
)
return (
_tensor_batch_dot(
input_jacobians, src_jacobian
) # shape is (test batch size, training batch size), containing x^T x'
# for every example x in the training batch and example x' in the test
# batch
* _tensor_batch_dot(input_layer_inputs, src_layer_input)
# shape is (test batch size, training batch size), containing
# (\nabla_y f(y)^T \nabla_{y'} f(y')) for every label y in the training
# batch and label y' in the test batch
* learning_rate
)
batch_tracin_scores = get_checkpoint_contribution(self.checkpoints[0])
for checkpoint in self.checkpoints[1:]:
batch_tracin_scores += get_checkpoint_contribution(checkpoint)
return batch_tracin_scores
def _influence( # type: ignore[override]
self,
inputs: Tuple[Any, ...],
show_progress: bool = False,
) -> Tensor:
r"""
Computes the influence of examples in training dataset `train_dataset`
on the examples in the test batch represented by `inputs`.
This implementation does not require knowing the number of training examples
in advance. Instead, the number of training examples is inferred from the
output of `_basic_computation_tracincp_fast`.
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
show_progress (bool, optional): To compute the influence of examples in
training dataset `train_dataset`, we compute the influence
of each batch. If `show_progress` is true, the progress of this
computation will be displayed. In particular, the number of batches
for which influence has been computed will be displayed. It will
try to use tqdm if available for advanced features (e.g. time
estimation). Otherwise, it will fallback to a simple output of
progress.
Default: False
Returns:
influence_scores (Tensor): Influence scores from the `TracInCPFast` method.
Its shape is `(input_size, train_dataset_size)`, where `input_size`
is the number of examples in the test batch, and
`train_dataset_size` is the number of examples in
training dataset `train_dataset`. For example:
`influence_scores[i][j]` is the influence score for the j-th training
example to the i-th example in the test batch.
"""
train_dataloader = self.train_dataloader
if show_progress:
train_dataloader = progress(
train_dataloader,
desc=(
f"Using {self.get_name()} to compute "
"influence for training batches"
),
total=self.train_dataloader_len,
)
return torch.cat(
[
self._influence_batch_tracincp_fast(inputs, batch)
for batch in train_dataloader
],
dim=1,
)
def _get_k_most_influential( # type: ignore[override]
self,
inputs: Tuple[Any, ...],
k: int = 5,
proponents: bool = True,
show_progress: bool = False,
) -> KMostInfluentialResults:
r"""
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
k (int, optional): The number of proponents or opponents to return per test
example.
Default: 5
proponents (bool, optional): Whether seeking proponents (`proponents=True`)
or opponents (`proponents=False`)
Default: True
show_progress (bool, optional): To compute the proponents (or opponents)
for the batch of examples, we perform computation for each batch in
training dataset `train_dataset`, If `show_progress` is
true, the progress of this computation will be displayed. In
particular, the number of batches for which the computation has
been performed will be displayed. It will try to use tqdm if
available for advanced features (e.g. time estimation). Otherwise,
it will fallback to a simple output of progress.
Default: False
Returns:
(indices, influence_scores) (namedtuple): `indices` is a torch.long Tensor
that contains the indices of the proponents (or opponents) for each
test example. Its dimension is `(inputs_batch_size, k)`, where
`inputs_batch_size` is the number of examples in `inputs`. For
example, if `proponents==True`, `indices[i][j]` is the index of the
example in training dataset `train_dataset` with the
k-th highest influence score for the j-th example in `inputs`.
`indices` is a `torch.long` tensor so that it can directly be used
to index other tensors. Each row of `influence_scores` contains the
influence scores for a different test example, in sorted order. In
particular, `influence_scores[i][j]` is the influence score of
example `indices[i][j]` in training dataset `train_dataset`
on example `i` in the test batch represented by `inputs`.
"""
desc = (
None
if not show_progress
else (
(
f"Using {self.get_name()} to perform computation for "
f'getting {"proponents" if proponents else "opponents"}. '
"Processing training batches"
)
)
)
return KMostInfluentialResults(
*_get_k_most_influential_helper(
self.train_dataloader,
self._influence_batch_tracincp_fast,
inputs,
k,
proponents,
show_progress,
desc,
)
)
def _self_influence_by_checkpoints(
self,
inputs: Union[Tuple[Any, ...], DataLoader],
show_progress: bool = False,
) -> Tensor:
"""
Computes self influence scores for the examples in `inputs`, which is
either a single batch or a Pytorch `DataLoader` that yields batches. Therefore,
the computed self influence scores are *not* for the examples in training
dataset `train_dataset` (unlike when computing self influence scores using the
`influence` method). Note that if `inputs` is a single batch, this
will call `model` on that single batch, and if `inputs` yields
batches, this will call `model` on each batch that is yielded. Therefore,
please ensure that for both cases, the batch(es) that `model` is called
with are not too large, so that there will not be an out-of-memory error. This
implementation performs an outer iteration over checkpoints, and an inner
iteration over all batches that `inputs` represents. The pros of this
implementation are that the checkpoints do not need to be loaded too many
times.
Args:
batches (tuple or DataLoader): Either a single tuple of any, or a
`DataLoader`, where each batch yielded is a tuple of any. In
either case, the tuple represents a single batch, where the last
element is assumed to be the labels for the batch. That is,
`model(*batch[0:-1])` produces the output for `model`,
and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset`. Please see documentation for the
`train_dataset` argument to `TracInCP.__init__` for
more details on the assumed structure of a batch.
show_progress (bool, optional): Computation of self influence scores can
take a long time if `inputs` represents many examples. If
`show_progress` is true, the progress of this computation will be
displayed. In more detail, this computation will iterate over all
checkpoints (provided as the `checkpoints` initialization argument)
in an outer loop, and iterate over all batches that
`inputs` represents in an inner loop. Thus if
`show_progress` is True, the progress of both the outer
iteration and the inner iterations will be displayed. To show
progress, it will try to use tqdm if available for advanced
features (e.g. time estimation). Otherwise, it will fallback to a
simple output of progress.
Default: False
Returns:
self_influence_scores (Tensor): This is a 1D tensor containing the self
influence scores of all examples in `inputs`, regardless of
whether it represents a single batch or a `DataLoader` that yields
batches.
"""
# If `inputs` is not a `DataLoader`, turn it into one.
inputs = _format_inputs_dataset(inputs)
# If `show_progress` is true, create an outer progress bar that keeps track of
# how many checkpoints have been processed
if show_progress:
# Try to determine length of inner progress bar if possible, with a default
# of `None`.
inputs_len = None
try:
inputs_len = len(inputs)
except TypeError:
warnings.warn(
"Unable to determine the number of batches in `inputs`. "
"Therefore, if showing the progress of the computation of self "
"influence scores, only the number of batches processed can be "
"displayed, and not the percentage completion of the computation, "
"nor any time estimates."
)
def get_checkpoint_contribution(checkpoint):
# This function returns a 1D tensor representing the contribution to the
# self influence score for the given checkpoint, for all batches in
# `inputs`. The length of the 1D tensor is the total number of
# examples in `inputs`.
assert (
checkpoint is not None
), "None returned from `checkpoints`, cannot load."
learning_rate = self.checkpoints_load_func(self.model, checkpoint)
# This will store a list of the contribution of the self influence score
# from each batch. Each element is a 1D tensor of length batch_size - the
# batch size of each batch in `inputs` (they do not need to be all
# the same)
checkpoint_contribution = []
_inputs = inputs
# If `show_progress` is true, create an inner progress bar that keeps track
# of how many batches have been processed for the current checkpoint
if show_progress:
_inputs = progress(
inputs,
desc=(
f"Using {self.get_name()} to compute self "
"influence. Processing batch"
),
total=inputs_len,
)
for batch in _inputs:
batch_jacobian, batch_layer_input = _basic_computation_tracincp_fast(
self,
batch[0:-1],
batch[-1],
self.loss_fn,
self.reduction_type,
)
checkpoint_contribution.append(
torch.sum(batch_jacobian**2, dim=1)
* torch.sum(batch_layer_input**2, dim=1)
* learning_rate
)
# We concatenate the contributions from each batch into a single 1D tensor,
# which represents the contributions for all batches in `inputs`
return torch.cat(checkpoint_contribution, dim=0)
if show_progress:
checkpoints_progress = progress(
desc=(
f"Using {self.get_name()} to compute self "
"influence. Processing checkpoint"
),
total=len(self.checkpoints),
mininterval=0.0,
)
else:
checkpoints_progress = NullProgress()
with checkpoints_progress:
batches_self_tracin_scores = get_checkpoint_contribution(
self.checkpoints[0]
)
checkpoints_progress.update()
# The self influence score for all examples is the sum of contributions from
# each checkpoint
for checkpoint in self.checkpoints[1:]:
batches_self_tracin_scores += get_checkpoint_contribution(checkpoint)
checkpoints_progress.update()
return batches_self_tracin_scores
@log_usage()
def self_influence(
self,
inputs: Optional[Union[Tuple[Any, ...], DataLoader]] = None,
show_progress: bool = False,
outer_loop_by_checkpoints: bool = False,
) -> Tensor:
"""
Computes self influence scores for the examples in `inputs`, which is
either a single batch or a Pytorch `DataLoader` that yields batches.
If `inputs` is not specified or `None` calculates self influence
score for the training dataset `train_dataset`. Note that if `inputs`
is a single batch, this will call `model` on that single batch,
and if `inputs` yields batches, this will call `model`
on each batch that is yielded. Therefore, please ensure that for both cases,
the batch(es) that `model` is called with are not too large, so that
there will not be an out-of-memory error.
Internally, this computation requires iterating both over the batches in
`inputs`, as well as different model checkpoints. There are two ways
this iteration can be done. If `outer_loop_by_checkpoints` is False, the outer
iteration will be over batches, and the inner iteration will be over
checkpoints. This has the pro that displaying the progress of the computation
is more intuitive, involving displaying the number of batches for which self
influence scores have been computed. If `outer_loop_by_checkpoints` is True,
the outer iteration will be over checkpoints, and the inner iteration will be
over batches. This has the pro that the checkpoints do not need to be loaded
for each batch. For large models, loading checkpoints can be time-intensive.
Args:
inputs (tuple or DataLoader, optional): This specifies the
dataset for which self influence scores will be computed.
Either a single tuple of any, or a `DataLoader`, where each
batch yielded is a tuple of type any. In either case, the tuple
represents a single batch, where the last element is assumed to
be the labels for the batch. That is, `model(*batch[0:-1])`
produces the output for `model`, and `batch[-1]` are the labels,
if any. This is the same assumption made for each batch yielded
by training dataset `train_dataset`. Please see documentation for
the `train_dataset` argument to `TracInCP.__init__` for
more details on the assumed structure of a batch. If not provided
or `None`, self influence scores will be computed for training
dataset `train_dataset`, which yields batches satisfying the
above assumptions.
Default: None.
show_progress (bool, optional): Computation of self influence scores can
take a long time if `inputs` represents many examples. If
`show_progress`is true, the progress of this computation will be
displayed. In more detail, if `outer_loop_by_checkpoints` is False,
this computation will iterate over all batches in an outer loop.
Thus if `show_progress` is True, the number of batches for which
self influence scores have been computed will be displayed. If
`outer_loop_by_checkpoints` is True, this computation will iterate
over all checkpoints (provided as the `checkpoints` initialization
argument) in an outer loop, and iterate over all batches that
`inputs` represents in an inner loop. Thus if
`show_progress` is True, the progress of both the outer
iteration and the inner iterations will be displayed. To show
progress, it will try to use tqdm if available for advanced
features (e.g. time estimation). Otherwise, it will fallback to a
simple output of progress.
Default: False
outer_loop_by_checkpoints (bool, optional): If performing an outer
iteration over checkpoints; see method description for more
details.
Default: False
"""
inputs = inputs if inputs is not None else self.train_dataloader
if outer_loop_by_checkpoints:
return self._self_influence_by_checkpoints(inputs, show_progress)
return _self_influence_by_batches_helper(
self._self_influence_by_checkpoints,
self.get_name(),
inputs,
show_progress,
)
def _basic_computation_tracincp_fast(
influence_instance: TracInCPFast,
inputs: Tuple[Any, ...],
targets: Tensor,
loss_fn: Optional[Union[Module, Callable]] = None,
reduction_type: Optional[str] = None,
):
"""
For instances of TracInCPFast and children classes, computation of influence scores
or self influence scores repeatedly calls this function for different checkpoints
and batches. These computations involve a loss function. If `test` is True, the
loss function is `self.loss_fn`. If `test` is False, the loss function is
`self.test_loss_fn`. These two attributes were set in initialization, with
`self.loss_fn` equal to the `loss_fn` initialization argument, and
`self.test_loss_fn` equal to the `test_loss_fn` initialization argument if it was
provided, and `loss_fn` otherwise.
Args:
influence_instance (TracInCPFast): A instance of TracInCPFast or its children.
We assume `influence_instance` has a `loss_fn` attribute, i.e. the loss
function applied to the output of the last fully-connected layer, as
well as a `reduction_type` attribute, which indicates whether `loss_fn`
reduces the per-example losses by using their mean or sum. The
`reduction_type` attribute must either be "mean" or "sum".
inputs (tuple[Any, ...]): A batch of examples, which could be a training batch
or test batch, depending which method is the caller. Does not
represent labels, which are passed as `targets`. The assumption is
that `model(*inputs)` produces the predictions for the batch.
targets (Tensor): If computing influence scores on a loss function,
these are the labels corresponding to the batch `inputs`.
loss_fn (Callable, optional): The loss function to use when computing the
jacobian.
reduction_type (str, optional): The reduction type of `loss_fn`. This argument
is only used if `sample_wise_grads_per_batch` was true in
initialization of `influence_instance`.
Returns:
(input_jacobians, layer_inputs) (tuple): `input_jacobians` is a 2D tensor,
where each row is the jacobian of the loss, with respect to the
*output* of the last fully-connected layer. `layer_inputs` is a 1D
tensor, where each row is the *input* to the last fully-connected
layer. For both, the length is the number of examples in the batch
represented by `inputs` and `targets`.
"""
layer_inputs: Dict[device, Tuple[Tensor, ...]] = defaultdict()
lock = threading.Lock()
def hook_wrapper(original_module):
def _capture_inputs(layer, input, output) -> None:
r"""Save activations into layer_inputs in forward pass"""
with lock:
is_eval_tuple = isinstance(input, tuple)
if is_eval_tuple:
layer_inputs_val = tuple(inp.detach() for inp in input)
else:
layer_inputs_val = input.detach()
layer_inputs[layer_inputs_val[0].device] = layer_inputs_val
return _capture_inputs
assert isinstance(influence_instance.final_fc_layer, Module)
handle = influence_instance.final_fc_layer.register_forward_hook(
hook_wrapper(influence_instance.final_fc_layer)
)
out = influence_instance.model(*inputs)
assert loss_fn is not None, "loss function is required"
assert reduction_type in [
"sum",
"mean",
], 'reduction_type must be either "mean" or "sum"'
input_jacobians = _jacobian_loss_wrt_inputs(
loss_fn,
out,
targets,
influence_instance.vectorize,
reduction_type,
)
handle.remove()
device_ids = cast(
Union[None, List[int]],
influence_instance.model.device_ids
if hasattr(influence_instance.model, "device_ids")
else None,
)
key_list = _sort_key_list(list(layer_inputs.keys()), device_ids)
_layer_inputs = _gather_distributed_tensors(layer_inputs, key_list=key_list)[0]
assert len(input_jacobians.shape) == 2
return input_jacobians, _layer_inputs
class TracInCPFastRandProj(TracInCPFast):
r"""
A version of TracInCPFast which is optimized for "interactive" calls to
`influence` for the purpose of calculating proponents / opponents, or
influence scores. "Interactive" means there will be multiple calls to
`influence`, with each call for a different batch of test examples, and
subsequent calls rely on the results of previous calls. The implementation in
this class has been optimized so that each call to `influence` is fast, so that
it can be used for interactive analysis. This class should only be used for
interactive use cases. It should not be used if `influence` will only be
called once, because to enable fast calls to `influence`, time and memory
intensive preprocessing is required in `__init__`. Furthermore, it should not
be used to calculate self influence scores - `TracInCPFast` should be used
instead for that purpose. To enable interactive analysis, this implementation
computes and saves "embedding" vectors for all training examples in
`train_dataset`. Crucially, the influence score of a training
example on a test example is simply the dot-product of their corresponding
vectors, and proponents / opponents can be found by first storing vectors for
training examples in a nearest-neighbor data structure, and then finding the
nearest-neighbors for a test example in terms of dot-product (see appendix F
of the TracIn paper). This class should only be used if calls to `influence`
to obtain proponents / opponents or influence scores will be made in an
"interactive" manner, and there is sufficient memory to store vectors for the
entire `train_dataset`. This is because in order to enable interactive
analysis, this implementation incures overhead in `__init__` to setup the
nearest-neighbors data structure, which is both time and memory intensive, as
vectors corresponding to all training examples needed to be stored. To reduce
memory usage, this implementation enables random projections of those vectors.
Note that the influence scores computed with random projections are less
accurate, though correct in expectation.
In more detail regarding the "embedding" vectors - the influence of a training
example on a test example, when only considering gradients in the last
fully-connected layer, the sum of the contribution from each checkpoint. The
contribution from a given checkpoint is
:math`(x^T x')(\nabla_y f(y)^T \nabla_{y'} f(y'))`, using the notation in the
description of `TracInCPFast`. As is, this is not a dot-product of 2 vectors.
However, we can rewrite that contribution as
:math`(x \nabla_y f(y)^T) \dot (x' f(y')^T)`. Both terms in this
product are 2D matrices, as they are outer products, and the "product" is actually
a dot-product, treating both matrices as vectors. Therefore, for a given
checkpoint, its contribution to the "embedding" of an example is just the
outer-product :math`(x \nabla_y f(y)^T)`, flattened. Furthemore, to reduce the
dimension of this contribution, we can right-multiply and
left-multiply the outer-product with two separate projection matrices. These
transform :math`\nabla_y f(y)` and :math`x` to lower dimensional vectors. While
the dimension of these two lower dimensional vectors do not necessarily need to
be the same, in our implementation, we let them be the same, both equal to the
square root of the desired projection dimension. Finally, the embedding of an
example is the concatenation of the contributions from each checkpoint.
"""
def __init__(
self,
model: Module,
final_fc_layer: Union[Module, str],
train_dataset: Union[Dataset, DataLoader],
checkpoints: Union[str, List[str], Iterator],
checkpoints_load_func: Callable = _load_flexible_state_dict,
loss_fn: Optional[Union[Module, Callable]] = None,
batch_size: Union[int, None] = 1,
test_loss_fn: Optional[Union[Module, Callable]] = None,
vectorize: bool = False,
nearest_neighbors: Optional[NearestNeighbors] = None,
projection_dim: int = None,
seed: int = 0,
) -> None:
r"""
Args:
model (torch.nn.Module): An instance of pytorch model. This model should
define all of its layers as attributes of the model.
final_fc_layer (torch.nn.Module or str): The last fully connected layer in
the network for which gradients will be approximated via fast random
projection method. Can be either the layer module itself, or the
fully qualified name of the layer if it is a defined attribute of
the passed `model`.
train_dataset (torch.utils.data.Dataset or torch.utils.data.DataLoader):
In the `influence` method, we compute the influence score of
training examples on examples in a test batch.
This argument represents the training dataset containing those
training examples. In order to compute those influence scores, we
will create a Pytorch DataLoader yielding batches of training
examples that is then used for processing. If this argument is
already a Pytorch Dataloader, that DataLoader can be directly
used for processing. If it is instead a Pytorch Dataset, we will
create a DataLoader using it, with batch size specified by
`batch_size`. For efficiency purposes, the batch size of the
DataLoader used for processing should be as large as possible, but
not too large, so that certain intermediate quantities created
from a batch still fit in memory. Therefore, if
`train_dataset` is a Dataset, `batch_size` should be large.
If `train_dataset` was already a DataLoader to begin with,
it should have been constructed to have a large batch size. It is
assumed that the Dataloader (regardless of whether it is created
from a Pytorch Dataset or not) yields tuples. For a `batch` that is
yielded, of length `L`, it is assumed that the forward function of
`model` accepts `L-1` arguments, and the last element of `batch` is
the label. In other words, `model(*batch[:-1])` gives the output of
`model`, and `batch[-1]` are the labels for the batch.
checkpoints (str, list[str], or Iterator): Either the directory of the
path to store and retrieve model checkpoints, a list of
filepaths with checkpoints from which to load, or an iterator which
returns objects from which to load checkpoints.
checkpoints_load_func (Callable, optional): The function to load a saved
checkpoint into a model to update its parameters, and get the
learning rate if it is saved. By default uses a utility to load a
model saved as a state dict.
Default: _load_flexible_state_dict
loss_fn (Callable, optional): The loss function applied to model. `loss_fn`
must be a "reduction" loss function that reduces the per-example
losses in a batch, and returns a single scalar Tensor. Furthermore,
the reduction must be the *sum* of the per-example losses. For
instance, `nn.BCELoss(reduction="sum")` is acceptable, but
`nn.BCELoss(reduction="mean")` is *not* acceptable.
Default: None
batch_size (int or None, optional): Batch size of the DataLoader created to
iterate through `train_dataset`, if it is a Dataset.
`batch_size` should be chosen as large as possible so that certain
intermediate quantities created from a batch still fit in memory.
Specific implementations of `TracInCPBase` will detail the size of
the intermediate quantities. `batch_size` must be an int if
`train_dataset` is a Dataset. If `train_dataset`
is a DataLoader, then `batch_size` is ignored as an argument.
Default: 1
test_loss_fn (Callable, optional): In some cases, one may want to use a
separate loss functions for training examples, i.e. those in
`train_dataset`, and for test examples, i.e. those
represented by the `inputs` and `targets` arguments to the
`influence` method. For example, if one wants to calculate the
influence score of a training example on a test example's
prediction for a fixed class, `test_loss_fn` could map from the
logits for all classes to the logits for a fixed class.
`test_loss_fn` needs satisfy the same constraints as `loss_fn`.
Thus, the same checks that we apply to `loss_fn` are also applied
to `test_loss_fn`, if the latter is provided. If not provided, the
loss function for test examples is assumed to be the same as the
loss function for training examples, i.e. `loss_fn`.
vectorize (bool): Flag to use experimental vectorize functionality
for `torch.autograd.functional.jacobian`.
Default: False
nearest_neighbors (NearestNeighbors, optional): The NearestNeighbors
instance for finding nearest neighbors. If None, defaults to
`AnnoyNearestNeighbors(n_trees=10)`.
Default: None
projection_dim (int, optional): Each example will be represented in
the nearest neighbors data structure with a vector. This vector
is the concatenation of several "checkpoint vectors", each of which
is computed using a different checkpoint in the `checkpoints`
argument. If `projection_dim` is an int, it represents the
dimension we will project each "checkpoint vector" to, so that the
vector for each example will be of dimension at most
`projection_dim` * C, where C is the number of checkpoints.
Regarding the dimension of each vector, D: Let I be the dimension
of the output of the last fully-connected layer times the dimension
of the input of the last fully-connected layer. If `projection_dim`
is not `None`, then D = min(I * C, `projection_dim` * C).
Otherwise, D = I * C. In summary, if `projection_dim` is None, the
dimension of this vector will be determined by the size of the
input and output of the last fully-connected layer of `model`, and
the number of checkpoints. Otherwise, `projection_dim` must be an
int, and random projection will be performed to ensure that the
vector is of dimension no more than `projection_dim` * C.
`projection_dim` corresponds to the variable d in the top of page
15 of the TracIn paper: https://arxiv.org/abs/2002.08484.
Default: None
seed (int, optional): Because this implementation chooses a random
projection, its output is random. Setting this seed specifies the
random seed when choosing the random projection.
Default: 0
"""
TracInCPFast.__init__(
self,
model,
final_fc_layer,
train_dataset,
checkpoints,
checkpoints_load_func,
loss_fn,
batch_size,
test_loss_fn,
vectorize,
)
warnings.warn(
(
"WARNING: Using this implementation stores quantities related to the "
"entire `train_dataset` in memory, and may results in running "
"out of memory. If this happens, consider using %s instead, for which "
"each call to `influence` to compute influence scores or proponents "
"will be slower, but may avoid running out of memory."
)
% "`TracInCPFast`"
)
self.nearest_neighbors = (
AnnoyNearestNeighbors() if nearest_neighbors is None else nearest_neighbors
)
self.projection_dim = projection_dim
torch.manual_seed(seed) # for reproducibility
self.projection_quantities = self._set_projections_tracincp_fast_rand_proj(
self.train_dataloader,
)
self.src_intermediate_quantities = (
self._get_intermediate_quantities_tracincp_fast_rand_proj(
self.train_dataloader,
self.projection_quantities,
)
)
self._process_src_intermediate_quantities_tracincp_fast_rand_proj(
self.src_intermediate_quantities,
)
def _influence( # type: ignore[override]
self,
inputs: Tuple[Any, ...],
) -> Tensor:
r"""
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
Returns:
influence_scores (Tensor): Influence scores from the `TracInCPFastRandProj`
method. Its shape is `(input_size, train_dataset_size)`, where `input_size`
is the number of examples in the test batch, and
`train_dataset_size` is the number of examples in
training dataset `train_dataset`. For example:
`influence_scores[i][j]` is the influence score for the j-th training
example to the i-th example in the test batch.
"""
# TODO: after D35721609 lands, use helper function
# `TracInCP._influence_rand_proj` here to avoid duplicated logic
input_projections = self._get_intermediate_quantities_tracincp_fast_rand_proj(
inputs,
self.projection_quantities,
test=True,
)
src_projections = self.src_intermediate_quantities
return torch.matmul(input_projections, src_projections.T)
def _get_k_most_influential( # type: ignore[override]
self,
inputs: Tuple[Any, ...],
k: int = 5,
proponents: bool = True,
) -> KMostInfluentialResults:
r"""
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
k (int, optional): The number of proponents or opponents to return per test
example.
Default: 5
proponents (bool, optional): Whether seeking proponents (`proponents=True`)
or opponents (`proponents=False`)
Default: True
Returns:
(indices, influence_scores) (namedtuple): `indices` is a torch.long Tensor
that contains the indices of the proponents (or opponents) for each
test example. Its dimension is `(inputs_batch_size, k)`, where
`inputs_batch_size` is the number of examples in `inputs`. For
example, if `proponents==True`, `indices[i][j]` is the index of the
example in training dataset `train_dataset` with the
k-th highest influence score for the j-th example in `inputs`.
`indices` is a `torch.long` tensor so that it can directly be used
to index other tensors. Each row of `influence_scores` contains the
influence scores for a different test example, in sorted order. In
particular, `influence_scores[i][j]` is the influence score of
example `indices[i][j]` in training dataset `train_dataset`
on example `i` in the test batch represented by `inputs`.
"""
input_projections = self._get_intermediate_quantities_tracincp_fast_rand_proj(
inputs,
self.projection_quantities,
test=True,
)
multiplier = 1 if proponents else -1
input_projections *= multiplier
indices, distances = self.nearest_neighbors.get_nearest_neighbors(
input_projections, k
)
distances *= multiplier
return KMostInfluentialResults(indices, distances)
@log_usage()
def self_influence(
self,
inputs: Optional[Union[Tuple[Any, ...], DataLoader]] = None,
show_progress: bool = False,
outer_loop_by_checkpoints: bool = False,
) -> Tensor:
"""
NOT IMPLEMENTED - no need to implement `TracInCPFastRandProj.self_influence`,
as `TracInCPFast.self_influence` is sufficient - the latter does not benefit
from random projections, since no quantities associated with a training
example are stored (other than its self influence score)
Computes self influence scores for a single batch or a Pytorch `DataLoader`
that yields batches. Note that if `inputs` is a single batch, this
will call `model` on that single batch, and if `inputs` yields
batches, this will call `model` on each batch that is yielded. Therefore,
please ensure that for both cases, the batch(es) that `model` is called
with are not too large, so that there will not be an out-of-memory error.
Args:
inputs (tuple or DataLoader): Either a single tuple of any, or a
`DataLoader`, where each batch yielded is a tuple of any. In
either case, the tuple represents a single batch, where the last
element is assumed to be the labels for the batch. That is,
`model(*batch[0:-1])` produces the output for `model`,
and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset`. Please see documentation for the
`train_dataset` argument to `TracInCP.__init__` for
more details on the assumed structure of a batch.
show_progress (bool, optional): Computation of self influence scores can
take a long time if `inputs` represents many examples. If
`show_progress` is true, the progress of this computation will be
displayed. In more detail, this computation will iterate over all
checkpoints (provided as the `checkpoints` initialization argument)
and all batches that `inputs` represents. Therefore, the
total number of (checkpoint, batch) combinations that need to be
iterated over is
(# of checkpoints x # of batches that `inputs` represents).
If `show_progress` is True, the total number of such combinations
that have been iterated over is displayed. It will try to use tqdm
if available for advanced features (e.g. time estimation).
Otherwise, it will fallback to a simple output of progress.
Default: False
outer_loop_by_checkpoints (bool, optional): If performing an outer
iteration over checkpoints; see method description for more
details.
Default: False
Returns:
self_influence_scores (Tensor): This is a 1D tensor containing the self
influence scores of all examples in `inputs`, regardless of
whether it represents a single batch or a `DataLoader` that yields
batches.
"""
warnings.warn(
(
"WARNING: If calculating self influence scores, when only considering "
"gradients with respect to the last fully-connected layer, "
"`TracInCPFastRandProj` should not be used. Instead, please use "
"`TracInCPFast`. This is because when calculating self influence "
"scores, no quantities associated with a training example are stored "
"so that memory-saving benefit of the random projections used by "
"`TracInCPFastRandProj`needed. Further considering the fact that "
"random projections results only in approximate self influence "
"scores, there is no reason to use `TracInCPFastRandProj` when "
"calculating self influence scores."
)
)
raise NotImplementedError
@log_usage()
def influence( # type: ignore[override]
self,
inputs: Optional[Tuple[Any, ...]] = None,
k: int = 5,
proponents: bool = True,
) -> Union[Tensor, KMostInfluentialResults]:
r"""
This is the key method of this class, and can be run in 2 different modes,
where the mode that is run depends on the arguments passed to this method:
- influence score mode: This mode is used if `k` is None. This mode computes
the influence score of every example in training dataset `train_dataset`
on every example in the test batch represented by `inputs`.
- k-most influential mode: This mode is used if `k` is not None, and an int.
This mode computes the proponents or opponents of every example in the
test batch represented by `inputs`. In particular, for each test example in
the test batch, this mode computes its proponents (resp. opponents),
which are the indices in the training dataset `train_dataset` of the
training examples with the `k` highest (resp. lowest) influence scores on the
test example. Proponents are computed if `proponents` is True. Otherwise,
opponents are computed. For each test example, this method also returns the
actual influence score of each proponent (resp. opponent) on the test
example.
Args:
inputs (tuple): `inputs` is the test batch and is a tuple of
any, where the last element is assumed to be the labels for the
batch. That is, `model(*batch[0:-1])` produces the output for
`model`, and `batch[-1]` are the labels, if any. This is the same
assumption made for each batch yielded by training dataset
`train_dataset` - please see its documentation in `__init__` for
more details on the assumed structure of a batch.
k (int, optional): If not provided or `None`, the influence score mode will
be run. Otherwise, the k-most influential mode will be run,
and `k` is the number of proponents / opponents to return per
example in the test batch.
Default: None
proponents (bool, optional): Whether seeking proponents (`proponents=True`)
or opponents (`proponents=False`), if running in k-most influential
mode.
Default: True
Returns:
The return value of this method depends on which mode is run.
- influence score mode: if this mode is run (`k` is None), returns a 2D
tensor `influence_scores` of shape `(input_size, train_dataset_size)`,
where `input_size` is the number of examples in the test batch, and
`train_dataset_size` is the number of examples in training dataset
`train_dataset`. In other words, `influence_scores[i][j]` is the
influence score of the `j`-th example in `train_dataset` on the `i`-th
example in the test batch.
- k-most influential mode: if this mode is run (`k` is an int), returns
a namedtuple `(indices, influence_scores)`. `indices` is a 2D tensor of
shape `(input_size, k)`, where `input_size` is the number of examples in
the test batch. If computing proponents (resp. opponents),
`indices[i][j]` is the index in training dataset `train_dataset` of the
example with the `j`-th highest (resp. lowest) influence score (out of
the examples in `train_dataset`) on the `i`-th example in the test
batch. `influence_scores` contains the corresponding influence scores.
In particular, `influence_scores[i][j]` is the influence score of example
`indices[i][j]` in `train_dataset` on example `i` in the test batch
represented by `inputs`.
"""
assert inputs is not None, (
"`inputs` argument is required."
"`TracInCPFastRandProj` does not support computing self influence scores"
"Even if it did, one would use the `self_influence` method."
)
return _influence_route_to_helpers(
self,
inputs,
k,
proponents,
)
def _set_projections_tracincp_fast_rand_proj(
self,
dataloader: DataLoader,
) -> Optional[Tuple[torch.Tensor, torch.Tensor]]:
"""
returns the variables `jacobian_projection` and `layer_input_projection`
if needed, based on `self.projection_dim`. The two variables are
used by `self._get_intermediate_quantities_fast_rand_proj`. They are both None
if projection is not needed, due to the intermediate quantities (see the
`_get_intermediate_quantities_fast_rand_proj` method for details) being no
greater than `self.projection_dim` * C even without projection, where C is the
number of checkpoints in the `checkpoints` argument to
`TracInCPFastRandProj.__init__`.
Args:
dataloader (DataLoader): determining the projection requires knowing the
dimensionality of the last layer's parameters (`jacobian_dim`
below) and its input (`layer_input_dim` below). These are
determined by passing a batch to `model`. `dataloader`
provides that batch.
Returns:
jacobian_projection (Tensor or None): Projection matrix to apply to
Jacobian of last layer to reduce its dimension, if needed.
None otherwise.
input_projection (Tensor or None): Projection matrix to apply to input of
last layer to reduce its dimension, if needed. None otherwise.
"""
# figure out projection dimensions, if needed
projection_dim = self.projection_dim
projection_quantities = None
if not (projection_dim is None):
# figure out original dimensions by looking at data, passing through network
self.checkpoints_load_func(self.model, next(iter(self.checkpoints)))
batch = next(iter(dataloader))
batch_jacobians, batch_layer_inputs = _basic_computation_tracincp_fast(
self,
batch[0:-1],
batch[-1],
self.loss_fn,
self.reduction_type,
)
jacobian_dim = batch_jacobians.shape[
1
] # this is the dimension of the output of the last fully-connected layer
layer_input_dim = batch_layer_inputs.shape[
1
] # this is the dimension of the input of the last fully-connected layer
device = batch_jacobians.device
dtype = batch_jacobians.dtype
# choose projection if needed
# without projection, the dimension of the intermediate quantities returned
# by `_get_intermediate_quantities_fast_rand_proj` will be
# `jacobian_dim` * `layer_input_dim` * number of checkpoints
# this is because for each checkpoint, we compute a "partial" intermediate
# quantity, and the intermediate quantity is the concatenation of the
# "partial" intermediate quantities, and the dimension of each "partial"
# intermediate quantity, without projection, is `jacobian_dim` *
# `layer_input_dim`. However, `projection_dim` refers to the maximum
# allowable dimension of the "partial" intermediate quantity. Therefore,
# we only project if `jacobian_dim` * `layer_input_dim` > `projection_dim`.
# `projection_dim` corresponds to the variable d in the top of page 15 of
# the TracIn paper: https://arxiv.org/abs/2002.08484.
if jacobian_dim * layer_input_dim > projection_dim:
jacobian_projection_dim = min(int(projection_dim**0.5), jacobian_dim)
layer_input_projection_dim = min(
int(projection_dim**0.5), layer_input_dim
)
jacobian_projection = torch.normal(
torch.zeros(jacobian_dim, jacobian_projection_dim),
1.0 / jacobian_projection_dim**0.5,
)
layer_input_projection = torch.normal(
torch.zeros(layer_input_dim, layer_input_projection_dim),
1.0 / layer_input_projection_dim**0.5,
)
projection_quantities = jacobian_projection.to(
device=device, dtype=dtype
), layer_input_projection.to(device=device, dtype=dtype)
return projection_quantities
def _process_src_intermediate_quantities_tracincp_fast_rand_proj(
self,
src_intermediate_quantities: torch.Tensor,
):
"""
Assumes `self._get_intermediate_quantities_tracin_fast_rand_proj` returns
vector representations for each example, and that influence between a
training and test example is obtained by taking the dot product of their
vector representations. In this case, given a test example, its proponents
can be found by storing the vector representations for training examples
into a data structure enablng fast largest-dot-product computation. This
method creates that data structure. This method has side effects.
Args:
src_intermediate_quantities (Tensor): the output of the
`_get_intermediate_quantities_tracin_fast_rand_proj` function when
applied to training dataset `train_dataset`. This
output is the vector representation of all training examples.
The dot product between the representation of a training example
and the representation of a test example gives the influence score
of the training example on the test example.
"""
self.nearest_neighbors.setup(src_intermediate_quantities)
def _get_intermediate_quantities_tracincp_fast_rand_proj(
self,
inputs: Union[Tuple[Any, ...], DataLoader],
projection_quantities: Optional[Tuple[torch.Tensor, torch.Tensor]],
test: bool = False,
) -> torch.Tensor:
r"""
This method computes vectors that can be used to compute influence. (see
Appendix F, page 15). Crucially, the influence score between a test example
and a training example is simply the dot product of their respective
vectors. This means that the training example with the largest influence score
on a given test example can be found using a nearest-neighbor (more
specifically, largest dot-product) data structure.
Args:
inputs (Tuple, or DataLoader): Either a single tuple of any, or a
`DataLoader`, where each batch yielded is a tuple of any. In
either case, the tuple represents a single batch, where the last
element is assumed to be the labels for the batch. That is,
`model(*batch[0:-1])` produces the output for `model`, and
and `batch[-1]` are the labels, if any. Here, `model` is model
provided in initialization. This is the same assumption made for
each batch yielded by training dataset `train_dataset`. Please see
documentation for the `train_dataset` argument to
`TracInCPFastRandProj.__init__` for more details on the assumed
structure of a batch.
projection_quantities (tuple or None): Is either the two tensors defining
the randomized projections to apply, or None, which means no
projection is to be applied.
test (bool): If True, the intermediate quantities are computed using
`self.test_loss_fn`. Otherwise, they are computed using
`self.loss_fn`.
Default: False
Returns:
intermediate_quantities (Tensor): A tensor of dimension
(N, D * C), where N is total number of examples in `dataloader`, C
is the number of checkpoints passed as the `checkpoints` argument
of `TracInCPFastRandProj.__init__`, and each row represents the
vector for an example. Regarding D: Let I be the dimension of the
output of the last fully-connected layer times the dimension of the
input of the last fully-connected layer. If `self.projection_dim`
is specified in initialization,
D = min(I * C, `self.projection_dim` * C). Otherwise, D = I * C.
In summary, if `self.projection_dim` is None, the dimension of each
vector will be determined by the size of the input and output of
the last fully-connected layer of `model`. Otherwise,
`self.projection_dim` must be an int, and random projection will be
performed to ensure that the vector is of dimension no more than
`self.projection_dim` * C. `self.projection_dim` corresponds to
the variable d in the top of page 15 of the TracIn paper:
https://arxiv.org/abs/2002.08484.
"""
# if `inputs` is not a `DataLoader`, turn it into one.
inputs = _format_inputs_dataset(inputs)
# internally, whether `projection_quantities` is None determines whether
# any projection will be applied to reduce the dimension of the "embedding"
# vectors. If projection will be applied, there are actually 2 different
# projection matrices - one to project the `input_jacobians`, and one to
# project the `layer_inputs`. See below for details of those two quantities.
# here, we extract the corresponding projection matrices for those two
# quantities, if doing projection. Note that the same projections are used
# for each checkpoint.
project = False
if projection_quantities is not None:
project = True
jacobian_projection, layer_input_projection = projection_quantities
# for each checkpoint, we will populate a list containing the contribution of
# the checkpoint for each batch
checkpoint_contributions: List[Union[List, Tensor]] = [
[] for _ in self.checkpoints
]
# the "embedding" vector is the concatenation of contributions from each
# checkpoint, which we compute one by one
for (j, checkpoint) in enumerate(self.checkpoints):
assert (
checkpoint is not None
), "None returned from `checkpoints`, cannot load."
learning_rate = self.checkpoints_load_func(self.model, checkpoint)
learning_rate_root = learning_rate**0.5
# after loading a checkpoint, we compute the contribution of that
# checkpoint, for *all* batches (instead of a single batch). this enables
# increased efficiency.
for batch in inputs:
# compute `input_jacobians` and `layer_inputs`, for a given checkpoint
# using a helper function. `input_jacobians` is a 2D tensor,
# where each row is the jacobian of the loss, with respect to the
# *output* of the last fully-connected layer. `layer_inputs` is a 2D
# tensor, where each row is the *input* to the last fully-connected
# layer. For both, the length is the number of examples in `batch`
input_jacobians, layer_inputs = _basic_computation_tracincp_fast(
self,
batch[0:-1],
batch[-1],
self.test_loss_fn,
self.test_reduction_type,
)
# if doing projection, project those two quantities
if project:
input_jacobians = torch.matmul(input_jacobians, jacobian_projection)
layer_inputs = torch.matmul(layer_inputs, layer_input_projection)
# for an example, the contribution to the "embedding" vector from each
# checkpoint is the outer product of its `input_jacobian` and its
# `layer_input`, flattened to a 1D tensor. here, we perform this
# for the entire batch. we append the contribution to a list containing
# the contribution of all batches, from the checkpoint.
cast(list, checkpoint_contributions[j]).append(
torch.matmul(
torch.unsqueeze(
input_jacobians, 2
), # size is (batch_size, output_size, 1)
torch.unsqueeze(
layer_inputs, 1
), # size is (batch_size, 1, input_size)
).flatten(
start_dim=1
) # matmul does a batched matrix multiplication to return a 3D
# tensor. each element along the batch (0-th) dimension is the
# matrix product of a (output_size, 1) and (1, input_size) tensor
# in other words, each element is an outer product, and the matmul
# is just doing a batched outer product. this is what we want, as
# the contribution to the "embedding" for an example is the outer
# product of the last layer's input and the gradient of its output.
# finally, we flatten the 3rd dimension so that the contribution to
# the embedding for this checkpoint is a 2D tensor, i.e. each
# example's contribution to the embedding is a 1D tensor.
* learning_rate_root
)
# once we have computed the contribution from each batch, for a given
# checkpoint, we concatenate them along the batch dimension to get a
# single 2D tensor for that checkpoint
checkpoint_contributions[j] = torch.cat(
checkpoint_contributions[j], dim=0 # type: ignore
)
# finally, we concatenate along the checkpoint dimension, to get a tensor of
# shape (batch_size, projection_dim * number of checkpoints)
# each row in this result is the "embedding" vector for an example in `batch`
return torch.cat(checkpoint_contributions, dim=1) # type: ignore
@log_usage()
def compute_intermediate_quantities(
self,
inputs: Union[Tuple[Any, ...], DataLoader],
) -> Tensor:
"""
Computes "embedding" vectors for all examples in a single batch, or a
`Dataloader` that yields batches. These embedding vectors are constructed so
that the influence score of a training example on a test example is simply the
dot-product of their corresponding vectors. Please see the documentation for
`TracInCPFastRandProj.__init__` for more details. Allowing a `DataLoader`
yielding batches to be passed in (as opposed to a single batch) gives the
potential to improve efficiency, because we load each checkpoint only once in
this method call. Thus if a `DataLoader` yielding batches is passed in, this
reduces the total number of times each checkpoint is loaded for a dataset,
compared to if a single batch is passed in. The reason we do not just increase
the batch size is that for large models, large batches do not fit in memory.
Args:
inputs (Tuple, or DataLoader): Either a single tuple of any, or a
`DataLoader`, where each batch yielded is a tuple of any. In
either case, the tuple represents a single batch, where the last
element is assumed to be the labels for the batch. That is,
`model(*batch[0:-1])` produces the output for `model`, and
and `batch[-1]` are the labels, if any. Here, `model` is model
provided in initialization. This is the same assumption made for
each batch yielded by training dataset `train_dataset`. Please see
documentation for the `train_dataset` argument to
`TracInCPFastRandProj.__init__` for more details on the assumed
structure of a batch.
Returns:
intermediate_quantities (Tensor): A tensor of dimension
(N, D * C), where N is total number of examples in
`inputs`, C is the number of checkpoints passed as the
`checkpoints` argument of `TracInCPFastRandProj.__init__`, and each
row represents the vector for an example. Regarding D: Let I be the
dimension of the output of the last fully-connected layer times the
dimension of the input of the last fully-connected layer. If
`self.projection_dim` is specified in initialization,
D = min(I * C, `self.projection_dim` * C). Otherwise, D = I * C.
In summary, if `self.projection_dim` is None, the dimension of each
vector will be determined by the size of the input and output of
the last fully-connected layer of `model`. Otherwise,
`self.projection_dim` must be an int, and random projection will be
performed to ensure that the vector is of dimension no more than
`self.projection_dim` * C. `self.projection_dim` corresponds to
the variable d in the top of page 15 of the TracIn paper:
https://arxiv.org/pdf/2002.08484.pdf.
"""
return self._get_intermediate_quantities_tracincp_fast_rand_proj(
inputs, self.projection_quantities
)
|
#!/usr/bin/env python3
from abc import ABC, abstractmethod
from typing import Optional, Tuple
import torch
from torch import Tensor
from torch.nn import Module
class StochasticGatesBase(Module, ABC):
"""
Abstract module for Stochastic Gates.
Stochastic Gates is a practical solution to add L0 norm regularization for neural
networks. L0 regularization, which explicitly penalizes any present (non-zero)
parameters, can help network pruning and feature selection, but directly optimizing
L0 is a non-differentiable combinatorial problem. To surrogate L0, Stochastic Gate
uses certain continuous probability distributions (e.g., Concrete, Gaussian) with
hard-sigmoid rectification as a continuous smoothed Bernoulli distribution
determining the weight of a parameter, i.e., gate. Then L0 is equal to the gates's
non-zero probability represented by the parameters of the continuous probability
distribution. The gate value can also be reparameterized to the distribution
parameters with a noise. So the expected L0 can be optimized through learning
the distribution parameters via stochastic gradients.
This base class defines the shared variables and forward logic of how the input is
gated regardless of the underneath distribution. The actual implementation should
extend this class and implement the distribution specific functions.
"""
def __init__(
self,
n_gates: int,
mask: Optional[Tensor] = None,
reg_weight: float = 1.0,
reg_reduction: str = "sum",
):
"""
Args:
n_gates (int): number of gates.
mask (Tensor, optional): If provided, this allows grouping multiple
input tensor elements to share the same stochastic gate.
This tensor should be broadcastable to match the input shape
and contain integers in the range 0 to n_gates - 1.
Indices grouped to the same stochastic gate should have the same value.
If not provided, each element in the input tensor
(on dimensions other than dim 0 - batch dim) is gated separately.
Default: None
reg_weight (float, optional): rescaling weight for L0 regularization term.
Default: 1.0
reg_reduction (str, optional): the reduction to apply to the regularization:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be
applied and it will be the same as the return of ``get_active_probs``,
``'mean'``: the sum of the gates non-zero probabilities will be divided
by the number of gates, ``'sum'``: the gates non-zero probabilities will
be summed.
Default: ``'sum'``
"""
super().__init__()
if mask is not None:
max_mask_ind = mask.max().item()
assert max_mask_ind == n_gates - 1, (
f"the maximum mask index (received {max_mask_ind}) should be equal to"
f" the number of gates - 1 (received {n_gates}) since each mask"
" should correspond to a gate"
)
valid_reg_reduction = ["none", "mean", "sum"]
assert (
reg_reduction in valid_reg_reduction
), f"reg_reduction must be one of [none, mean, sum], received: {reg_reduction}"
self.reg_reduction = reg_reduction
self.n_gates = n_gates
self.register_buffer(
"mask", mask.detach().clone() if mask is not None else None
)
self.reg_weight = reg_weight
def forward(self, input_tensor: Tensor) -> Tuple[Tensor, Tensor]:
"""
Args:
input_tensor (Tensor): Tensor to be gated with stochastic gates
Returns:
tuple[Tensor, Tensor]:
- gated_input (Tensor): Tensor of the same shape weighted by the sampled
gate values
- l0_reg (Tensor): L0 regularization term to be optimized together with
model loss,
e.g. loss(model_out, target) + l0_reg
"""
if self.mask is None:
n_ele = self._get_numel_of_input(input_tensor)
assert n_ele == self.n_gates, (
"if mask is not given, each example in the input batch should have the"
" same number of elements"
f" (received {n_ele}) as gates ({self.n_gates})"
)
input_size = input_tensor.size()
batch_size = input_size[0]
gate_values = self._sample_gate_values(batch_size)
# hard-sigmoid rectification z=min(1,max(0,_z))
gate_values = torch.clamp(gate_values, min=0, max=1)
if self.mask is not None:
# use expand_as not expand/broadcast_to which do not work with torch.fx
input_mask = self.mask.expand_as(input_tensor)
# flatten all dim except batch to gather from gate values
flattened_mask = input_mask.reshape(batch_size, -1)
gate_values = torch.gather(gate_values, 1, flattened_mask)
# reshape gates(batch_size, n_elements) into input_size for point-wise mul
gate_values = gate_values.reshape(input_size)
gated_input = input_tensor * gate_values
prob_density = self._get_gate_active_probs()
if self.reg_reduction == "sum":
l0_reg = prob_density.sum()
elif self.reg_reduction == "mean":
l0_reg = prob_density.mean()
else:
l0_reg = prob_density
l0_reg *= self.reg_weight
return gated_input, l0_reg
def get_gate_values(self, clamp: bool = True) -> Tensor:
"""
Get the gate values, which are the means of the underneath gate distributions,
optionally clamped within 0 and 1.
Args:
clamp (bool, optional): whether to clamp the gate values or not. As smoothed
Bernoulli variables, gate values are clamped within 0 and 1 by default.
Turn this off to get the raw means of the underneath
distribution (e.g., concrete, gaussian), which can be useful to
differentiate the gates' importance when multiple gate
values are beyond 0 or 1.
Default: ``True``
Returns:
Tensor:
- gate_values (Tensor): value of each gate in shape(n_gates)
"""
gate_values = self._get_gate_values()
if clamp:
gate_values = torch.clamp(gate_values, min=0, max=1)
return gate_values.detach()
def get_gate_active_probs(self) -> Tensor:
"""
Get the active probability of each gate, i.e, gate value > 0
Returns:
Tensor:
- probs (Tensor): probabilities tensor of the gates are active
in shape(n_gates)
"""
return self._get_gate_active_probs().detach()
@abstractmethod
def _get_gate_values(self) -> Tensor:
"""
Protected method to be override in the child depending on the chosen
distribution. Get the raw gate values derived from the learned parameters of
the according distribution without clamping.
Returns:
gate_values (Tensor): gate value tensor of shape(n_gates)
"""
pass
@abstractmethod
def _sample_gate_values(self, batch_size: int) -> Tensor:
"""
Protected method to be override in the child depending on the chosen
distribution. Sample gate values for each example in the batch from a
probability distribution
Args:
batch_size (int): input batch size
Returns:
gate_values (Tensor): gate value tensor of shape(batch_size, n_gates)
"""
pass
@abstractmethod
def _get_gate_active_probs(self) -> Tensor:
"""
Protected method to be override in the child depending on the chosen
distribution. Get the active probability of each gate, i.e, gate value > 0
Returns:
probs (Tensor): probabilities tensor of the gates are active
in shape(n_gates)
"""
pass
def _get_numel_of_input(self, input_tensor: Tensor) -> int:
"""
Get the number of elements of a single example in the batched input tensor
"""
assert input_tensor.dim() > 1, (
"The input tensor must have more than 1 dimension with the 1st dimention"
" being batch size;"
f" received input tensor shape {input_tensor.size()}"
)
return input_tensor[0].numel()
|
#!/usr/bin/env python3
import math
from typing import Optional
import torch
from captum.module.stochastic_gates_base import StochasticGatesBase
from torch import nn, Tensor
class GaussianStochasticGates(StochasticGatesBase):
"""
Stochastic Gates with Gaussian distribution.
Stochastic Gates is a practical solution to add L0 norm regularization for neural
networks. L0 regularization, which explicitly penalizes any present (non-zero)
parameters, can help network pruning and feature selection, but directly optimizing
L0 is a non-differentiable combinatorial problem. To surrogate L0, Stochastic Gate
uses certain continuous probability distributions (e.g., Concrete, Gaussian) with
hard-sigmoid rectification as a continuous smoothed Bernoulli distribution
determining the weight of a parameter, i.e., gate. Then L0 is equal to the gates's
non-zero probability represented by the parameters of the continuous probability
distribution. The gate value can also be reparameterized to the distribution
parameters with a noise. So the expected L0 can be optimized through learning
the distribution parameters via stochastic gradients.
GaussianStochasticGates adopts a gaussian distribution as the smoothed Bernoulli
distribution of gate. While the smoothed Bernoulli distribution should be
within 0 and 1, gaussian does not have boundaries. So hard-sigmoid rectification
is used to "fold" the parts smaller than 0 or larger than 1 back to 0 and 1.
More details can be found in the original paper:
https://arxiv.org/abs/1810.04247
Examples::
>>> n_params = 5 # number of gates
>>> stg = GaussianStochasticGates(n_params, reg_weight=0.01)
>>> inputs = torch.randn(3, n_params) # mock inputs with batch size of 3
>>> gated_inputs, reg = stg(mock_inputs) # gate the inputs
"""
def __init__(
self,
n_gates: int,
mask: Optional[Tensor] = None,
reg_weight: Optional[float] = 1.0,
std: Optional[float] = 0.5,
reg_reduction: str = "sum",
):
"""
Args:
n_gates (int): number of gates.
mask (Tensor, optional): If provided, this allows grouping multiple
input tensor elements to share the same stochastic gate.
This tensor should be broadcastable to match the input shape
and contain integers in the range 0 to n_gates - 1.
Indices grouped to the same stochastic gate should have the same value.
If not provided, each element in the input tensor
(on dimensions other than dim 0, i.e., batch dim) is gated separately.
Default: None
reg_weight (float, optional): rescaling weight for L0 regularization term.
Default: 1.0
std (float, optional): standard deviation that will be fixed throughout.
Default: 0.5
reg_reduction (str, optional): the reduction to apply to the regularization:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be
applied and it will be the same as the return of ``get_active_probs``,
``'mean'``: the sum of the gates non-zero probabilities will be divided
by the number of gates, ``'sum'``: the gates non-zero probabilities will
be summed.
Default: ``'sum'``
"""
super().__init__(
n_gates, mask=mask, reg_weight=reg_weight, reg_reduction=reg_reduction
)
mu = torch.empty(n_gates)
nn.init.normal_(mu, mean=0.5, std=0.01)
self.mu = nn.Parameter(mu)
assert 0 < std, f"the standard deviation should be positive, received {std}"
self.std = std
def _sample_gate_values(self, batch_size: int) -> Tensor:
"""
Sample gate values for each example in the batch from the Gaussian distribution
Args:
batch_size (int): input batch size
Returns:
gate_values (Tensor): gate value tensor of shape(batch_size, n_gates)
"""
if self.training:
n = torch.empty(batch_size, self.n_gates, device=self.mu.device)
n.normal_(mean=0, std=self.std)
return self.mu + n
return self.mu.expand(batch_size, self.n_gates)
def _get_gate_values(self) -> Tensor:
"""
Get the raw gate values, which are the means of the underneath gate
distributions, the learned mu
Returns:
gate_values (Tensor): value of each gate after model is trained
"""
return self.mu
def _get_gate_active_probs(self) -> Tensor:
"""
Get the active probability of each gate, i.e, gate value > 0, in the
Gaussian distribution
Returns:
probs (Tensor): probabilities tensor of the gates are active
in shape(n_gates)
"""
x = self.mu / self.std
return 0.5 * (1 + torch.erf(x / math.sqrt(2)))
@classmethod
def _from_pretrained(cls, mu: Tensor, *args, **kwargs):
"""
Private factory method to create an instance with pretrained parameters
Args:
mu (Tensor): FloatTensor containing weights for the pretrained mu
mask (Tensor, optional): If provided, this allows grouping multiple
input tensor elements to share the same stochastic gate.
This tensor should be broadcastable to match the input shape
and contain integers in the range 0 to n_gates - 1.
Indices grouped to the same stochastic gate should have the same value.
If not provided, each element in the input tensor
(on dimensions other than dim 0 - batch dim) is gated separately.
Default: None
reg_weight (float, optional): rescaling weight for L0 regularization term.
Default: 1.0
std (float, optional): standard deviation that will be fixed throughout.
Default: 0.5
reg_reduction (str, optional): the reduction to apply to the regularization:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be
applied and it will be the same as the return of ``get_active_probs``,
``'mean'``: the sum of the gates non-zero probabilities will be divided
by the number of gates, ``'sum'``: the gates non-zero probabilities will
be summed.
Default: ``'sum'``
Returns:
stg (GaussianStochasticGates): StochasticGates instance
"""
n_gates = mu.numel()
stg = cls(n_gates, *args, **kwargs)
stg.load_state_dict({"mu": mu}, strict=False)
return stg
|
from captum.module.binary_concrete_stochastic_gates import ( # noqa
BinaryConcreteStochasticGates,
)
from captum.module.gaussian_stochastic_gates import GaussianStochasticGates # noqa
from captum.module.stochastic_gates_base import StochasticGatesBase # noqa
|
#!/usr/bin/env python3
import math
from typing import Optional
import torch
from captum.module.stochastic_gates_base import StochasticGatesBase
from torch import nn, Tensor
def _torch_empty(batch_size: int, n_gates: int, device: torch.device) -> Tensor:
return torch.empty(batch_size, n_gates, device=device)
# torch.fx is introduced in 1.8.0
if hasattr(torch, "fx"):
torch.fx.wrap(_torch_empty)
def _logit(inp):
# torch.logit is introduced in 1.7.0
if hasattr(torch, "logit"):
return torch.logit(inp)
else:
return torch.log(inp) - torch.log(1 - inp)
class BinaryConcreteStochasticGates(StochasticGatesBase):
"""
Stochastic Gates with binary concrete distribution.
Stochastic Gates is a practical solution to add L0 norm regularization for neural
networks. L0 regularization, which explicitly penalizes any present (non-zero)
parameters, can help network pruning and feature selection, but directly optimizing
L0 is a non-differentiable combinatorial problem. To surrogate L0, Stochastic Gate
uses certain continuous probability distributions (e.g., Concrete, Gaussian) with
hard-sigmoid rectification as a continuous smoothed Bernoulli distribution
determining the weight of a parameter, i.e., gate. Then L0 is equal to the gates's
non-zero probability represented by the parameters of the continuous probability
distribution. The gate value can also be reparameterized to the distribution
parameters with a noise. So the expected L0 can be optimized through learning
the distribution parameters via stochastic gradients.
BinaryConcreteStochasticGates adopts a "stretched" binary concrete distribution as
the smoothed Bernoulli distribution of gate. The binary concrete distribution does
not include its lower and upper boundaries, 0 and 1, which are required by a
Bernoulli distribution, so it needs to be linearly stretched beyond both boundaries.
Then use hard-sigmoid rectification to "fold" the parts smaller than 0 or larger
than 1 back to 0 and 1.
More details can be found in the original paper:
https://arxiv.org/abs/1712.01312
Examples::
>>> n_params = 5 # number of parameters
>>> stg = BinaryConcreteStochasticGates(n_params, reg_weight=0.01)
>>> inputs = torch.randn(3, n_params) # mock inputs with batch size of 3
>>> gated_inputs, reg = stg(mock_inputs) # gate the inputs
"""
def __init__(
self,
n_gates: int,
mask: Optional[Tensor] = None,
reg_weight: float = 1.0,
temperature: float = 2.0 / 3,
lower_bound: float = -0.1,
upper_bound: float = 1.1,
eps: float = 1e-8,
reg_reduction: str = "sum",
):
"""
Args:
n_gates (int): number of gates.
mask (Tensor, optional): If provided, this allows grouping multiple
input tensor elements to share the same stochastic gate.
This tensor should be broadcastable to match the input shape
and contain integers in the range 0 to n_gates - 1.
Indices grouped to the same stochastic gate should have the same value.
If not provided, each element in the input tensor
(on dimensions other than dim 0, i.e., batch dim) is gated separately.
Default: None
reg_weight (float, optional): rescaling weight for L0 regularization term.
Default: 1.0
temperature (float, optional): temperature of the concrete distribution,
controls the degree of approximation, as 0 means the original Bernoulli
without relaxation. The value should be between 0 and 1.
Default: 2/3
lower_bound (float, optional): the lower bound to "stretch" the binary
concrete distribution
Default: -0.1
upper_bound (float, optional): the upper bound to "stretch" the binary
concrete distribution
Default: 1.1
eps (float, optional): term to improve numerical stability in binary
concerete sampling
Default: 1e-8
reg_reduction (str, optional): the reduction to apply to the regularization:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be
applied and it will be the same as the return of ``get_active_probs``,
``'mean'``: the sum of the gates non-zero probabilities will be divided
by the number of gates, ``'sum'``: the gates non-zero probabilities will
be summed.
Default: ``'sum'``
"""
super().__init__(
n_gates, mask=mask, reg_weight=reg_weight, reg_reduction=reg_reduction
)
# avoid changing the tensor's variable name
# when the module is used after compilation,
# users may directly access this tensor by name
log_alpha_param = torch.empty(n_gates)
nn.init.normal_(log_alpha_param, mean=0.0, std=0.01)
self.log_alpha_param = nn.Parameter(log_alpha_param)
assert (
0 < temperature < 1
), f"the temperature should be bwteen 0 and 1, received {temperature}"
self.temperature = temperature
assert (
lower_bound < 0
), f"the stretch lower bound should smaller than 0, received {lower_bound}"
self.lower_bound = lower_bound
assert (
upper_bound > 1
), f"the stretch upper bound should larger than 1, received {upper_bound}"
self.upper_bound = upper_bound
self.eps = eps
# pre-calculate the fixed term used in active prob
self.active_prob_offset = temperature * math.log(-lower_bound / upper_bound)
def _sample_gate_values(self, batch_size: int) -> Tensor:
"""
Sample gate values for each example in the batch from the binary concrete
distributions
Args:
batch_size (int): input batch size
Returns:
gate_values (Tensor): gate value tensor of shape(batch_size, n_gates)
"""
if self.training:
u = _torch_empty(
batch_size, self.n_gates, device=self.log_alpha_param.device
)
u.uniform_(self.eps, 1 - self.eps)
s = torch.sigmoid((_logit(u) + self.log_alpha_param) / self.temperature)
else:
s = torch.sigmoid(self.log_alpha_param)
s = s.expand(batch_size, self.n_gates)
s_bar = s * (self.upper_bound - self.lower_bound) + self.lower_bound
return s_bar
def _get_gate_values(self) -> Tensor:
"""
Get the raw gate values, which are the means of the underneath gate
distributions, derived from learned log_alpha_param
Returns:
gate_values (Tensor): value of each gate after model is trained
"""
gate_values = (
torch.sigmoid(self.log_alpha_param) * (self.upper_bound - self.lower_bound)
+ self.lower_bound
)
return gate_values
def _get_gate_active_probs(self) -> Tensor:
"""
Get the active probability of each gate, i.e, gate value > 0, in the binary
concrete distributions
Returns:
probs (Tensor): probabilities tensor of the gates are active
in shape(n_gates)
"""
return torch.sigmoid(self.log_alpha_param - self.active_prob_offset)
@classmethod
def _from_pretrained(cls, log_alpha_param: Tensor, *args, **kwargs):
"""
Private factory method to create an instance with pretrained parameters
Args:
log_alpha_param (Tensor): FloatTensor containing weights for
the pretrained log_alpha
mask (Tensor, optional): If provided, this allows grouping multiple
input tensor elements to share the same stochastic gate.
This tensor should be broadcastable to match the input shape
and contain integers in the range 0 to n_gates - 1.
Indices grouped to the same stochastic gate should have the same value.
If not provided, each element in the input tensor
(on dimensions other than dim 0 - batch dim) is gated separately.
Default: None
reg_weight (float, optional): rescaling weight for L0 regularization term.
Default: 1.0
temperature (float, optional): temperature of the concrete distribution,
controls the degree of approximation, as 0 means the original Bernoulli
without relaxation. The value should be between 0 and 1.
Default: 2/3
lower_bound (float, optional): the lower bound to "stretch" the binary
concrete distribution
Default: -0.1
upper_bound (float, optional): the upper bound to "stretch" the binary
concrete distribution
Default: 1.1
eps (float, optional): term to improve numerical stability in binary
concerete sampling
Default: 1e-8
reg_reduction (str, optional): the reduction to apply to the regularization:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be
applied and it will be the same as the return of ``get_active_probs``,
``'mean'``: the sum of the gates non-zero probabilities will be divided
by the number of gates, ``'sum'``: the gates non-zero probabilities will
be summed.
Default: ``'sum'``
Returns:
stg (BinaryConcreteStochasticGates): StochasticGates instance
"""
assert (
log_alpha_param.dim() == 1
), "log_alpha_param is expected to be 1-dimensional"
n_gates = log_alpha_param.numel()
stg = cls(n_gates, *args, **kwargs)
stg.load_state_dict({"log_alpha_param": log_alpha_param}, strict=False)
return stg
|
#!/usr/bin/env python3
from captum.attr._core.dataloader_attr import DataLoaderAttribution # noqa
from captum.attr._core.deep_lift import DeepLift, DeepLiftShap # noqa
from captum.attr._core.feature_ablation import FeatureAblation # noqa
from captum.attr._core.feature_permutation import FeaturePermutation # noqa
from captum.attr._core.gradient_shap import GradientShap # noqa
from captum.attr._core.guided_backprop_deconvnet import ( # noqa
Deconvolution,
GuidedBackprop,
)
from captum.attr._core.guided_grad_cam import GuidedGradCam # noqa
from captum.attr._core.input_x_gradient import InputXGradient # noqa
from captum.attr._core.integrated_gradients import IntegratedGradients # noqa
from captum.attr._core.kernel_shap import KernelShap # noqa
from captum.attr._core.layer.grad_cam import LayerGradCam # noqa
from captum.attr._core.layer.internal_influence import InternalInfluence # noqa
from captum.attr._core.layer.layer_activation import LayerActivation # noqa
from captum.attr._core.layer.layer_conductance import LayerConductance # noqa
from captum.attr._core.layer.layer_deep_lift import ( # noqa
LayerDeepLift,
LayerDeepLiftShap,
)
from captum.attr._core.layer.layer_feature_ablation import LayerFeatureAblation # noqa
from captum.attr._core.layer.layer_gradient_shap import LayerGradientShap # noqa
from captum.attr._core.layer.layer_gradient_x_activation import ( # noqa
LayerGradientXActivation,
)
from captum.attr._core.layer.layer_integrated_gradients import ( # noqa
LayerIntegratedGradients,
)
from captum.attr._core.layer.layer_lrp import LayerLRP # noqa
from captum.attr._core.lime import Lime, LimeBase # noqa
from captum.attr._core.lrp import LRP # noqa
from captum.attr._core.neuron.neuron_conductance import NeuronConductance # noqa
from captum.attr._core.neuron.neuron_deep_lift import ( # noqa
NeuronDeepLift,
NeuronDeepLiftShap,
)
from captum.attr._core.neuron.neuron_feature_ablation import ( # noqa
NeuronFeatureAblation,
)
from captum.attr._core.neuron.neuron_gradient import NeuronGradient # noqa
from captum.attr._core.neuron.neuron_gradient_shap import NeuronGradientShap # noqa
from captum.attr._core.neuron.neuron_guided_backprop_deconvnet import ( # noqa
NeuronDeconvolution,
NeuronGuidedBackprop,
)
from captum.attr._core.neuron.neuron_integrated_gradients import ( # noqa
NeuronIntegratedGradients,
)
from captum.attr._core.noise_tunnel import NoiseTunnel # noqa
from captum.attr._core.occlusion import Occlusion # noqa
from captum.attr._core.saliency import Saliency # noqa
from captum.attr._core.shapley_value import ShapleyValues, ShapleyValueSampling # noqa
from captum.attr._models.base import ( # noqa
configure_interpretable_embedding_layer,
InterpretableEmbeddingBase,
remove_interpretable_embedding_layer,
TokenReferenceBase,
)
from captum.attr._utils import visualization # noqa
from captum.attr._utils.attribution import ( # noqa # noqa # noqa # noqa # noqa
Attribution,
GradientAttribution,
LayerAttribution,
NeuronAttribution,
PerturbationAttribution,
)
from captum.attr._utils.class_summarizer import ClassSummarizer
from captum.attr._utils.stat import (
CommonStats,
Count,
Max,
Mean,
Min,
MSE,
StdDev,
Sum,
Var,
)
from captum.attr._utils.summarizer import Summarizer
__all__ = [
"Attribution",
"GradientAttribution",
"PerturbationAttribution",
"NeuronAttribution",
"LayerAttribution",
"IntegratedGradients",
"DataLoaderAttribution",
"DeepLift",
"DeepLiftShap",
"InputXGradient",
"Saliency",
"GuidedBackprop",
"Deconvolution",
"GuidedGradCam",
"FeatureAblation",
"FeaturePermutation",
"Occlusion",
"ShapleyValueSampling",
"ShapleyValues",
"LimeBase",
"Lime",
"LRP",
"KernelShap",
"LayerConductance",
"LayerGradientXActivation",
"LayerActivation",
"LayerFeatureAblation",
"InternalInfluence",
"LayerGradCam",
"LayerDeepLift",
"LayerDeepLiftShap",
"LayerGradientShap",
"LayerIntegratedGradients",
"LayerLRP",
"NeuronConductance",
"NeuronFeatureAblation",
"NeuronGradient",
"NeuronIntegratedGradients",
"NeuronDeepLift",
"NeuronDeepLiftShap",
"NeuronGradientShap",
"NeuronDeconvolution",
"NeuronGuidedBackprop",
"NoiseTunnel",
"GradientShap",
"InterpretableEmbeddingBase",
"TokenReferenceBase",
"visualization",
"configure_interpretable_embedding_layer",
"remove_interpretable_embedding_layer",
"Summarizer",
"CommonStats",
"ClassSummarizer",
"Mean",
"StdDev",
"MSE",
"Var",
"Min",
"Max",
"Sum",
"Count",
]
|
#!/usr/bin/env python3
import typing
import warnings
from typing import Any, Callable, Iterator, Tuple, Union
import torch
from captum._utils.common import (
_format_additional_forward_args,
_format_output,
_format_tensor_into_tuples,
_reduce_list,
)
from captum._utils.typing import (
TargetType,
TensorOrTupleOfTensorsGeneric,
TupleOrTensorOrBoolGeneric,
)
from captum.attr._utils.approximation_methods import approximation_parameters
from torch import Tensor
def _batch_attribution(
attr_method,
num_examples,
internal_batch_size,
n_steps,
include_endpoint=False,
**kwargs,
):
"""
This method applies internal batching to given attribution method, dividing
the total steps into batches and running each independently and sequentially,
adding each result to compute the total attribution.
Step sizes and alphas are spliced for each batch and passed explicitly for each
call to _attribute.
kwargs include all argument necessary to pass to each attribute call, except
for n_steps, which is computed based on the number of steps for the batch.
include_endpoint ensures that one step overlaps between each batch, which
is necessary for some methods, particularly LayerConductance.
"""
if internal_batch_size < num_examples:
warnings.warn(
"Internal batch size cannot be less than the number of input examples. "
"Defaulting to internal batch size of %d equal to the number of examples."
% num_examples
)
# Number of steps for each batch
step_count = max(1, internal_batch_size // num_examples)
if include_endpoint:
if step_count < 2:
step_count = 2
warnings.warn(
"This method computes finite differences between evaluations at "
"consecutive steps, so internal batch size must be at least twice "
"the number of examples. Defaulting to internal batch size of %d"
" equal to twice the number of examples." % (2 * num_examples)
)
total_attr = None
cumulative_steps = 0
step_sizes_func, alphas_func = approximation_parameters(kwargs["method"])
full_step_sizes = step_sizes_func(n_steps)
full_alphas = alphas_func(n_steps)
while cumulative_steps < n_steps:
start_step = cumulative_steps
end_step = min(start_step + step_count, n_steps)
batch_steps = end_step - start_step
if include_endpoint:
batch_steps -= 1
step_sizes = full_step_sizes[start_step:end_step]
alphas = full_alphas[start_step:end_step]
current_attr = attr_method._attribute(
**kwargs, n_steps=batch_steps, step_sizes_and_alphas=(step_sizes, alphas)
)
if total_attr is None:
total_attr = current_attr
else:
if isinstance(total_attr, Tensor):
total_attr = total_attr + current_attr.detach()
else:
total_attr = tuple(
current.detach() + prev_total
for current, prev_total in zip(current_attr, total_attr)
)
if include_endpoint and end_step < n_steps:
cumulative_steps = end_step - 1
else:
cumulative_steps = end_step
return total_attr
@typing.overload
def _tuple_splice_range(inputs: None, start: int, end: int) -> None:
...
@typing.overload
def _tuple_splice_range(inputs: Tuple, start: int, end: int) -> Tuple:
...
def _tuple_splice_range(
inputs: Union[None, Tuple], start: int, end: int
) -> Union[None, Tuple]:
"""
Splices each tensor element of given tuple (inputs) from range start
(inclusive) to end (non-inclusive) on its first dimension. If element
is not a Tensor, it is left unchanged. It is assumed that all tensor elements
have the same first dimension (corresponding to number of examples).
The returned value is a tuple with the same length as inputs, with Tensors
spliced appropriately.
"""
assert start < end, "Start point must precede end point for batch splicing."
if inputs is None:
return None
return tuple(
inp[start:end] if isinstance(inp, torch.Tensor) else inp for inp in inputs
)
def _batched_generator(
inputs: TensorOrTupleOfTensorsGeneric,
additional_forward_args: Any = None,
target_ind: TargetType = None,
internal_batch_size: Union[None, int] = None,
) -> Iterator[Tuple[Tuple[Tensor, ...], Any, TargetType]]:
"""
Returns a generator which returns corresponding chunks of size internal_batch_size
for both inputs and additional_forward_args. If batch size is None,
generator only includes original inputs and additional args.
"""
assert internal_batch_size is None or (
isinstance(internal_batch_size, int) and internal_batch_size > 0
), "Batch size must be greater than 0."
inputs = _format_tensor_into_tuples(inputs)
additional_forward_args = _format_additional_forward_args(additional_forward_args)
num_examples = inputs[0].shape[0]
# TODO Reconsider this check if _batched_generator is used for non gradient-based
# attribution algorithms
if not (inputs[0] * 1).requires_grad:
warnings.warn(
"""It looks like that the attribution for a gradient-based method is
computed in a `torch.no_grad` block or perhaps the inputs have no
requires_grad."""
)
if internal_batch_size is None:
yield inputs, additional_forward_args, target_ind
else:
for current_total in range(0, num_examples, internal_batch_size):
with torch.autograd.set_grad_enabled(True):
inputs_splice = _tuple_splice_range(
inputs, current_total, current_total + internal_batch_size
)
yield inputs_splice, _tuple_splice_range(
additional_forward_args,
current_total,
current_total + internal_batch_size,
), target_ind[
current_total : current_total + internal_batch_size
] if isinstance(
target_ind, list
) or (
isinstance(target_ind, torch.Tensor) and target_ind.numel() > 1
) else target_ind
def _batched_operator(
operator: Callable[..., TupleOrTensorOrBoolGeneric],
inputs: TensorOrTupleOfTensorsGeneric,
additional_forward_args: Any = None,
target_ind: TargetType = None,
internal_batch_size: Union[None, int] = None,
**kwargs: Any,
) -> TupleOrTensorOrBoolGeneric:
"""
Batches the operation of the given operator, applying the given batch size
to inputs and additional forward arguments, and returning the concatenation
of the results of each batch.
"""
all_outputs = [
operator(
inputs=input,
additional_forward_args=additional,
target_ind=target,
**kwargs,
)
for input, additional, target in _batched_generator(
inputs, additional_forward_args, target_ind, internal_batch_size
)
]
return _reduce_list(all_outputs)
def _select_example(curr_arg: Any, index: int, bsz: int) -> Any:
if curr_arg is None:
return None
is_tuple = isinstance(curr_arg, tuple)
if not is_tuple:
curr_arg = (curr_arg,)
selected_arg = []
for i in range(len(curr_arg)):
if isinstance(curr_arg[i], (Tensor, list)) and len(curr_arg[i]) == bsz:
selected_arg.append(curr_arg[i][index : index + 1])
else:
selected_arg.append(curr_arg[i])
return _format_output(is_tuple, tuple(selected_arg))
def _batch_example_iterator(bsz: int, *args) -> Iterator:
"""
Batches the provided argument.
"""
for i in range(bsz):
curr_args = [_select_example(args[j], i, bsz) for j in range(len(args))]
yield tuple(curr_args)
|
#!/usr/bin/env python3
import torch.nn as nn
class Addition_Module(nn.Module):
"""Custom addition module that uses multiple inputs to assure correct relevance
propagation. Any addition in a forward function needs to be replaced with the
module before using LRP."""
def __init__(self) -> None:
super().__init__()
def forward(self, x1, x2):
return x1 + x2
|
#!/usr/bin/env python3
from enum import Enum
from typing import Callable, List, Tuple
import torch
class Riemann(Enum):
left = 1
right = 2
middle = 3
trapezoid = 4
SUPPORTED_RIEMANN_METHODS = [
"riemann_left",
"riemann_right",
"riemann_middle",
"riemann_trapezoid",
]
SUPPORTED_METHODS = SUPPORTED_RIEMANN_METHODS + ["gausslegendre"]
def approximation_parameters(
method: str,
) -> Tuple[Callable[[int], List[float]], Callable[[int], List[float]]]:
r"""Retrieves parameters for the input approximation `method`
Args:
method (str): The name of the approximation method. Currently only `riemann`
and gauss legendre are
"""
if method in SUPPORTED_RIEMANN_METHODS:
return riemann_builders(method=Riemann[method.split("_")[-1]])
if method == "gausslegendre":
return gauss_legendre_builders()
raise ValueError("Invalid integral approximation method name: {}".format(method))
def riemann_builders(
method: Riemann = Riemann.trapezoid,
) -> Tuple[Callable[[int], List[float]], Callable[[int], List[float]]]:
r"""Step sizes are identical and alphas are scaled in [0, 1]
Args:
method (Riemann): `left`, `right`, `middle` and `trapezoid` riemann
Returns:
2-element tuple of **step_sizes**, **alphas**:
- **step_sizes** (*Callable*):
`step_sizes` takes the number of steps as an
input argument and returns an array of steps sizes which
sum is smaller than or equal to one.
- **alphas** (*Callable*):
`alphas` takes the number of steps as an input argument
and returns the multipliers/coefficients for the inputs
of integrand in the range of [0, 1]
"""
def step_sizes(n: int) -> List[float]:
assert n > 1, "The number of steps has to be larger than one"
deltas = [1 / n] * n
if method == Riemann.trapezoid:
deltas[0] /= 2
deltas[-1] /= 2
return deltas
def alphas(n: int) -> List[float]:
assert n > 1, "The number of steps has to be larger than one"
if method == Riemann.trapezoid:
return torch.linspace(0, 1, n).tolist()
elif method == Riemann.left:
return torch.linspace(0, 1 - 1 / n, n).tolist()
elif method == Riemann.middle:
return torch.linspace(1 / (2 * n), 1 - 1 / (2 * n), n).tolist()
elif method == Riemann.right:
return torch.linspace(1 / n, 1, n).tolist()
else:
raise AssertionError("Provided Reimann approximation method is not valid.")
# This is not a standard riemann method but in many cases it
# leades to faster approaximation. Test cases for small number of steps
# do not make sense but for larger number of steps the approximation is
# better therefore leaving this option available
# if method == 'riemann_include_endpoints':
# return [i / (n - 1) for i in range(n)]
return step_sizes, alphas
def gauss_legendre_builders() -> Tuple[
Callable[[int], List[float]], Callable[[int], List[float]]
]:
r"""Numpy's `np.polynomial.legendre` function helps to compute step sizes
and alpha coefficients using gauss-legendre quadrature rule.
Since numpy returns the integration parameters in different scales we need to
rescale them to adjust to the desired scale.
Gauss Legendre quadrature rule for approximating the integrals was originally
proposed by [Xue Feng and her intern Hauroun Habeeb]
(https://research.fb.com/people/feng-xue/).
Returns:
2-element tuple of **step_sizes**, **alphas**:
- **step_sizes** (*Callable*):
`step_sizes` takes the number of steps as an
input argument and returns an array of steps sizes which
sum is smaller than or equal to one.
- **alphas** (*Callable*):
`alphas` takes the number of steps as an input argument
and returns the multipliers/coefficients for the inputs
of integrand in the range of [0, 1]
"""
# allow using riemann even without np
import numpy as np
def step_sizes(n: int) -> List[float]:
assert n > 0, "The number of steps has to be larger than zero"
# Scaling from 2 to 1
return list(0.5 * np.polynomial.legendre.leggauss(n)[1])
def alphas(n: int) -> List[float]:
assert n > 0, "The number of steps has to be larger than zero"
# Scaling from [-1, 1] to [0, 1]
return list(0.5 * (1 + np.polynomial.legendre.leggauss(n)[0]))
return step_sizes, alphas
|
#!/usr/bin/env python3
import inspect
from typing import Any
import torch.nn as nn
class InputIdentity(nn.Module):
def __init__(self, input_name: str) -> None:
r"""
The identity operation
Args:
input_name (str)
The name of the input this layer is associated to. For debugging
purposes.
"""
super().__init__()
self.input_name = input_name
def forward(self, x):
return x
class ModelInputWrapper(nn.Module):
def __init__(self, module_to_wrap: nn.Module) -> None:
r"""
This is a convenience class. This wraps a model via first feeding the
model's inputs to separate layers (one for each input) and then feeding
the (unmodified) inputs to the underlying model (`module_to_wrap`). Each
input is fed through an `InputIdentity` layer/module. This class does
not change how you feed inputs to your model, so feel free to use your
model as you normally would.
To access a wrapped input layer, simply access it via the `input_maps`
ModuleDict, e.g. to get the corresponding module for input "x", simply
provide/write `my_wrapped_module.input_maps["x"]`
This is done such that one can use layer attribution methods on inputs.
Which should allow you to use mix layers with inputs with these
attribution methods. This is especially useful multimodal models which
input discrete features (mapped to embeddings, such as text) and regular
continuous feature vectors.
Notes:
- Since inputs are mapped with the identity, attributing to the
input/feature can be done with either the input or output of the
layer, e.g. attributing to an input/feature doesn't depend on whether
attribute_to_layer_input is True or False for
LayerIntegratedGradients.
- Please refer to the multimodal tutorial or unit tests
(test/attr/test_layer_wrapper.py) for an example.
Args:
module_to_wrap (nn.Module):
The model/module you want to wrap
"""
super().__init__()
self.module = module_to_wrap
# ignore self
self.arg_name_list = inspect.getfullargspec(module_to_wrap.forward).args[1:]
self.input_maps = nn.ModuleDict(
{arg_name: InputIdentity(arg_name) for arg_name in self.arg_name_list}
)
def forward(self, *args, **kwargs) -> Any:
args = list(args)
for idx, (arg_name, arg) in enumerate(zip(self.arg_name_list, args)):
args[idx] = self.input_maps[arg_name](arg)
for arg_name in kwargs.keys():
kwargs[arg_name] = self.input_maps[arg_name](kwargs[arg_name])
return self.module(*tuple(args), **kwargs)
|
#!/usr/bin/env python3
import warnings
from enum import Enum
from typing import Any, Iterable, List, Optional, Tuple, Union
import numpy as np
from matplotlib import cm, colors, pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.figure import Figure
from matplotlib.pyplot import axis, figure
from mpl_toolkits.axes_grid1 import make_axes_locatable
from numpy import ndarray
try:
from IPython.display import display, HTML
HAS_IPYTHON = True
except ImportError:
HAS_IPYTHON = False
class ImageVisualizationMethod(Enum):
heat_map = 1
blended_heat_map = 2
original_image = 3
masked_image = 4
alpha_scaling = 5
class TimeseriesVisualizationMethod(Enum):
overlay_individual = 1
overlay_combined = 2
colored_graph = 3
class VisualizeSign(Enum):
positive = 1
absolute_value = 2
negative = 3
all = 4
def _prepare_image(attr_visual: ndarray):
return np.clip(attr_visual.astype(int), 0, 255)
def _normalize_scale(attr: ndarray, scale_factor: float):
assert scale_factor != 0, "Cannot normalize by scale factor = 0"
if abs(scale_factor) < 1e-5:
warnings.warn(
"Attempting to normalize by value approximately 0, visualized results"
"may be misleading. This likely means that attribution values are all"
"close to 0."
)
attr_norm = attr / scale_factor
return np.clip(attr_norm, -1, 1)
def _cumulative_sum_threshold(values: ndarray, percentile: Union[int, float]):
# given values should be non-negative
assert percentile >= 0 and percentile <= 100, (
"Percentile for thresholding must be " "between 0 and 100 inclusive."
)
sorted_vals = np.sort(values.flatten())
cum_sums = np.cumsum(sorted_vals)
threshold_id = np.where(cum_sums >= cum_sums[-1] * 0.01 * percentile)[0][0]
return sorted_vals[threshold_id]
def _normalize_attr(
attr: ndarray,
sign: str,
outlier_perc: Union[int, float] = 2,
reduction_axis: Optional[int] = None,
):
attr_combined = attr
if reduction_axis is not None:
attr_combined = np.sum(attr, axis=reduction_axis)
# Choose appropriate signed values and rescale, removing given outlier percentage.
if VisualizeSign[sign] == VisualizeSign.all:
threshold = _cumulative_sum_threshold(np.abs(attr_combined), 100 - outlier_perc)
elif VisualizeSign[sign] == VisualizeSign.positive:
attr_combined = (attr_combined > 0) * attr_combined
threshold = _cumulative_sum_threshold(attr_combined, 100 - outlier_perc)
elif VisualizeSign[sign] == VisualizeSign.negative:
attr_combined = (attr_combined < 0) * attr_combined
threshold = -1 * _cumulative_sum_threshold(
np.abs(attr_combined), 100 - outlier_perc
)
elif VisualizeSign[sign] == VisualizeSign.absolute_value:
attr_combined = np.abs(attr_combined)
threshold = _cumulative_sum_threshold(attr_combined, 100 - outlier_perc)
else:
raise AssertionError("Visualize Sign type is not valid.")
return _normalize_scale(attr_combined, threshold)
def visualize_image_attr(
attr: ndarray,
original_image: Union[None, ndarray] = None,
method: str = "heat_map",
sign: str = "absolute_value",
plt_fig_axis: Union[None, Tuple[figure, axis]] = None,
outlier_perc: Union[int, float] = 2,
cmap: Union[None, str] = None,
alpha_overlay: float = 0.5,
show_colorbar: bool = False,
title: Union[None, str] = None,
fig_size: Tuple[int, int] = (6, 6),
use_pyplot: bool = True,
):
r"""
Visualizes attribution for a given image by normalizing attribution values
of the desired sign (positive, negative, absolute value, or all) and displaying
them using the desired mode in a matplotlib figure.
Args:
attr (numpy.ndarray): Numpy array corresponding to attributions to be
visualized. Shape must be in the form (H, W, C), with
channels as last dimension. Shape must also match that of
the original image if provided.
original_image (numpy.ndarray, optional): Numpy array corresponding to
original image. Shape must be in the form (H, W, C), with
channels as the last dimension. Image can be provided either
with float values in range 0-1 or int values between 0-255.
This is a necessary argument for any visualization method
which utilizes the original image.
Default: None
method (str, optional): Chosen method for visualizing attribution.
Supported options are:
1. `heat_map` - Display heat map of chosen attributions
2. `blended_heat_map` - Overlay heat map over greyscale
version of original image. Parameter alpha_overlay
corresponds to alpha of heat map.
3. `original_image` - Only display original image.
4. `masked_image` - Mask image (pixel-wise multiply)
by normalized attribution values.
5. `alpha_scaling` - Sets alpha channel of each pixel
to be equal to normalized attribution value.
Default: `heat_map`
sign (str, optional): Chosen sign of attributions to visualize. Supported
options are:
1. `positive` - Displays only positive pixel attributions.
2. `absolute_value` - Displays absolute value of
attributions.
3. `negative` - Displays only negative pixel attributions.
4. `all` - Displays both positive and negative attribution
values. This is not supported for `masked_image` or
`alpha_scaling` modes, since signed information cannot
be represented in these modes.
Default: `absolute_value`
plt_fig_axis (tuple, optional): Tuple of matplotlib.pyplot.figure and axis
on which to visualize. If None is provided, then a new figure
and axis are created.
Default: None
outlier_perc (float or int, optional): Top attribution values which
correspond to a total of outlier_perc percentage of the
total attribution are set to 1 and scaling is performed
using the minimum of these values. For sign=`all`, outliers
and scale value are computed using absolute value of
attributions.
Default: 2
cmap (str, optional): String corresponding to desired colormap for
heatmap visualization. This defaults to "Reds" for negative
sign, "Blues" for absolute value, "Greens" for positive sign,
and a spectrum from red to green for all. Note that this
argument is only used for visualizations displaying heatmaps.
Default: None
alpha_overlay (float, optional): Alpha to set for heatmap when using
`blended_heat_map` visualization mode, which overlays the
heat map over the greyscaled original image.
Default: 0.5
show_colorbar (bool, optional): Displays colorbar for heatmap below
the visualization. If given method does not use a heatmap,
then a colormap axis is created and hidden. This is
necessary for appropriate alignment when visualizing
multiple plots, some with colorbars and some without.
Default: False
title (str, optional): Title string for plot. If None, no title is
set.
Default: None
fig_size (tuple, optional): Size of figure created.
Default: (6,6)
use_pyplot (bool, optional): If true, uses pyplot to create and show
figure and displays the figure after creating. If False,
uses Matplotlib object oriented API and simply returns a
figure object without showing.
Default: True.
Returns:
2-element tuple of **figure**, **axis**:
- **figure** (*matplotlib.pyplot.figure*):
Figure object on which visualization
is created. If plt_fig_axis argument is given, this is the
same figure provided.
- **axis** (*matplotlib.pyplot.axis*):
Axis object on which visualization
is created. If plt_fig_axis argument is given, this is the
same axis provided.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> ig = IntegratedGradients(net)
>>> # Computes integrated gradients for class 3 for a given image .
>>> attribution, delta = ig.attribute(orig_image, target=3)
>>> # Displays blended heat map visualization of computed attributions.
>>> _ = visualize_image_attr(attribution, orig_image, "blended_heat_map")
"""
# Create plot if figure, axis not provided
if plt_fig_axis is not None:
plt_fig, plt_axis = plt_fig_axis
else:
if use_pyplot:
plt_fig, plt_axis = plt.subplots(figsize=fig_size)
else:
plt_fig = Figure(figsize=fig_size)
plt_axis = plt_fig.subplots()
if original_image is not None:
if np.max(original_image) <= 1.0:
original_image = _prepare_image(original_image * 255)
elif ImageVisualizationMethod[method] != ImageVisualizationMethod.heat_map:
raise ValueError(
"Original Image must be provided for"
"any visualization other than heatmap."
)
# Remove ticks and tick labels from plot.
plt_axis.xaxis.set_ticks_position("none")
plt_axis.yaxis.set_ticks_position("none")
plt_axis.set_yticklabels([])
plt_axis.set_xticklabels([])
plt_axis.grid(visible=False)
heat_map = None
# Show original image
if ImageVisualizationMethod[method] == ImageVisualizationMethod.original_image:
assert (
original_image is not None
), "Original image expected for original_image method."
if len(original_image.shape) > 2 and original_image.shape[2] == 1:
original_image = np.squeeze(original_image, axis=2)
plt_axis.imshow(original_image)
else:
# Choose appropriate signed attributions and normalize.
norm_attr = _normalize_attr(attr, sign, outlier_perc, reduction_axis=2)
# Set default colormap and bounds based on sign.
if VisualizeSign[sign] == VisualizeSign.all:
default_cmap = LinearSegmentedColormap.from_list(
"RdWhGn", ["red", "white", "green"]
)
vmin, vmax = -1, 1
elif VisualizeSign[sign] == VisualizeSign.positive:
default_cmap = "Greens"
vmin, vmax = 0, 1
elif VisualizeSign[sign] == VisualizeSign.negative:
default_cmap = "Reds"
vmin, vmax = 0, 1
elif VisualizeSign[sign] == VisualizeSign.absolute_value:
default_cmap = "Blues"
vmin, vmax = 0, 1
else:
raise AssertionError("Visualize Sign type is not valid.")
cmap = cmap if cmap is not None else default_cmap
# Show appropriate image visualization.
if ImageVisualizationMethod[method] == ImageVisualizationMethod.heat_map:
heat_map = plt_axis.imshow(norm_attr, cmap=cmap, vmin=vmin, vmax=vmax)
elif (
ImageVisualizationMethod[method]
== ImageVisualizationMethod.blended_heat_map
):
assert (
original_image is not None
), "Original Image expected for blended_heat_map method."
plt_axis.imshow(np.mean(original_image, axis=2), cmap="gray")
heat_map = plt_axis.imshow(
norm_attr, cmap=cmap, vmin=vmin, vmax=vmax, alpha=alpha_overlay
)
elif ImageVisualizationMethod[method] == ImageVisualizationMethod.masked_image:
assert VisualizeSign[sign] != VisualizeSign.all, (
"Cannot display masked image with both positive and negative "
"attributions, choose a different sign option."
)
plt_axis.imshow(
_prepare_image(original_image * np.expand_dims(norm_attr, 2))
)
elif ImageVisualizationMethod[method] == ImageVisualizationMethod.alpha_scaling:
assert VisualizeSign[sign] != VisualizeSign.all, (
"Cannot display alpha scaling with both positive and negative "
"attributions, choose a different sign option."
)
plt_axis.imshow(
np.concatenate(
[
original_image,
_prepare_image(np.expand_dims(norm_attr, 2) * 255),
],
axis=2,
)
)
else:
raise AssertionError("Visualize Method type is not valid.")
# Add colorbar. If given method is not a heatmap and no colormap is relevant,
# then a colormap axis is created and hidden. This is necessary for appropriate
# alignment when visualizing multiple plots, some with heatmaps and some
# without.
if show_colorbar:
axis_separator = make_axes_locatable(plt_axis)
colorbar_axis = axis_separator.append_axes("bottom", size="5%", pad=0.1)
if heat_map:
plt_fig.colorbar(heat_map, orientation="horizontal", cax=colorbar_axis)
else:
colorbar_axis.axis("off")
if title:
plt_axis.set_title(title)
if use_pyplot:
plt.show()
return plt_fig, plt_axis
def visualize_image_attr_multiple(
attr: ndarray,
original_image: Union[None, ndarray],
methods: List[str],
signs: List[str],
titles: Union[None, List[str]] = None,
fig_size: Tuple[int, int] = (8, 6),
use_pyplot: bool = True,
**kwargs: Any,
):
r"""
Visualizes attribution using multiple visualization methods displayed
in a 1 x k grid, where k is the number of desired visualizations.
Args:
attr (numpy.ndarray): Numpy array corresponding to attributions to be
visualized. Shape must be in the form (H, W, C), with
channels as last dimension. Shape must also match that of
the original image if provided.
original_image (numpy.ndarray, optional): Numpy array corresponding to
original image. Shape must be in the form (H, W, C), with
channels as the last dimension. Image can be provided either
with values in range 0-1 or 0-255. This is a necessary
argument for any visualization method which utilizes
the original image.
methods (list[str]): List of strings of length k, defining method
for each visualization. Each method must be a valid
string argument for method to visualize_image_attr.
signs (list[str]): List of strings of length k, defining signs for
each visualization. Each sign must be a valid
string argument for sign to visualize_image_attr.
titles (list[str], optional): List of strings of length k, providing
a title string for each plot. If None is provided, no titles
are added to subplots.
Default: None
fig_size (tuple, optional): Size of figure created.
Default: (8, 6)
use_pyplot (bool, optional): If true, uses pyplot to create and show
figure and displays the figure after creating. If False,
uses Matplotlib object oriented API and simply returns a
figure object without showing.
Default: True.
**kwargs (Any, optional): Any additional arguments which will be passed
to every individual visualization. Such arguments include
`show_colorbar`, `alpha_overlay`, `cmap`, etc.
Returns:
2-element tuple of **figure**, **axis**:
- **figure** (*matplotlib.pyplot.figure*):
Figure object on which visualization
is created. If plt_fig_axis argument is given, this is the
same figure provided.
- **axis** (*matplotlib.pyplot.axis*):
Axis object on which visualization
is created. If plt_fig_axis argument is given, this is the
same axis provided.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> ig = IntegratedGradients(net)
>>> # Computes integrated gradients for class 3 for a given image .
>>> attribution, delta = ig.attribute(orig_image, target=3)
>>> # Displays original image and heat map visualization of
>>> # computed attributions side by side.
>>> _ = visualize_image_attr_multiple(attribution, orig_image,
>>> ["original_image", "heat_map"], ["all", "positive"])
"""
assert len(methods) == len(signs), "Methods and signs array lengths must match."
if titles is not None:
assert len(methods) == len(titles), (
"If titles list is given, length must " "match that of methods list."
)
if use_pyplot:
plt_fig = plt.figure(figsize=fig_size)
else:
plt_fig = Figure(figsize=fig_size)
plt_axis = plt_fig.subplots(1, len(methods))
# When visualizing one
if len(methods) == 1:
plt_axis = [plt_axis]
for i in range(len(methods)):
visualize_image_attr(
attr,
original_image=original_image,
method=methods[i],
sign=signs[i],
plt_fig_axis=(plt_fig, plt_axis[i]),
use_pyplot=False,
title=titles[i] if titles else None,
**kwargs,
)
plt_fig.tight_layout()
if use_pyplot:
plt.show()
return plt_fig, plt_axis
def visualize_timeseries_attr(
attr: ndarray,
data: ndarray,
x_values: Optional[ndarray] = None,
method: str = "individual_channels",
sign: str = "absolute_value",
channel_labels: Optional[List[str]] = None,
channels_last: bool = True,
plt_fig_axis: Union[None, Tuple[figure, axis]] = None,
outlier_perc: Union[int, float] = 2,
cmap: Union[None, str] = None,
alpha_overlay: float = 0.7,
show_colorbar: bool = False,
title: Union[None, str] = None,
fig_size: Tuple[int, int] = (6, 6),
use_pyplot: bool = True,
**pyplot_kwargs,
):
r"""
Visualizes attribution for a given timeseries data by normalizing
attribution values of the desired sign (positive, negative, absolute value,
or all) and displaying them using the desired mode in a matplotlib figure.
Args:
attr (numpy.ndarray): Numpy array corresponding to attributions to be
visualized. Shape must be in the form (N, C) with channels
as last dimension, unless `channels_last` is set to True.
Shape must also match that of the timeseries data.
data (numpy.ndarray): Numpy array corresponding to the original,
equidistant timeseries data. Shape must be in the form
(N, C) with channels as last dimension, unless
`channels_last` is set to true.
x_values (numpy.ndarray, optional): Numpy array corresponding to the
points on the x-axis. Shape must be in the form (N, ). If
not provided, integers from 0 to N-1 are used.
Default: None
method (str, optional): Chosen method for visualizing attributions
overlaid onto data. Supported options are:
1. `overlay_individual` - Plot each channel individually in
a separate panel, and overlay the attributions for each
channel as a heat map. The `alpha_overlay` parameter
controls the alpha of the heat map.
2. `overlay_combined` - Plot all channels in the same panel,
and overlay the average attributions as a heat map.
3. `colored_graph` - Plot each channel in a separate panel,
and color the graphs according to the attribution
values. Works best with color maps that does not contain
white or very bright colors.
Default: `overlay_individual`
sign (str, optional): Chosen sign of attributions to visualize.
Supported options are:
1. `positive` - Displays only positive pixel attributions.
2. `absolute_value` - Displays absolute value of
attributions.
3. `negative` - Displays only negative pixel attributions.
4. `all` - Displays both positive and negative attribution
values.
Default: `absolute_value`
channel_labels (list[str], optional): List of labels
corresponding to each channel in data.
Default: None
channels_last (bool, optional): If True, data is expected to have
channels as the last dimension, i.e. (N, C). If False, data
is expected to have channels first, i.e. (C, N).
Default: True
plt_fig_axis (tuple, optional): Tuple of matplotlib.pyplot.figure and axis
on which to visualize. If None is provided, then a new figure
and axis are created.
Default: None
outlier_perc (float or int, optional): Top attribution values which
correspond to a total of outlier_perc percentage of the
total attribution are set to 1 and scaling is performed
using the minimum of these values. For sign=`all`, outliers
and scale value are computed using absolute value of
attributions.
Default: 2
cmap (str, optional): String corresponding to desired colormap for
heatmap visualization. This defaults to "Reds" for negative
sign, "Blues" for absolute value, "Greens" for positive sign,
and a spectrum from red to green for all. Note that this
argument is only used for visualizations displaying heatmaps.
Default: None
alpha_overlay (float, optional): Alpha to set for heatmap when using
`blended_heat_map` visualization mode, which overlays the
heat map over the greyscaled original image.
Default: 0.7
show_colorbar (bool): Displays colorbar for heat map below
the visualization.
title (str, optional): Title string for plot. If None, no title is
set.
Default: None
fig_size (tuple, optional): Size of figure created.
Default: (6,6)
use_pyplot (bool): If true, uses pyplot to create and show
figure and displays the figure after creating. If False,
uses Matplotlib object oriented API and simply returns a
figure object without showing.
Default: True.
pyplot_kwargs: Keyword arguments forwarded to plt.plot, for example
`linewidth=3`, `color='black'`, etc
Returns:
2-element tuple of **figure**, **axis**:
- **figure** (*matplotlib.pyplot.figure*):
Figure object on which visualization
is created. If plt_fig_axis argument is given, this is the
same figure provided.
- **axis** (*matplotlib.pyplot.axis*):
Axis object on which visualization
is created. If plt_fig_axis argument is given, this is the
same axis provided.
Examples::
>>> # Classifier takes input of shape (batch, length, channels)
>>> model = Classifier()
>>> dl = DeepLift(model)
>>> attribution = dl.attribute(data, target=0)
>>> # Pick the first sample and plot each channel in data in a separate
>>> # panel, with attributions overlaid
>>> visualize_timeseries_attr(attribution[0], data[0], "overlay_individual")
"""
# Check input dimensions
assert len(attr.shape) == 2, "Expected attr of shape (N, C), got {}".format(
attr.shape
)
assert len(data.shape) == 2, "Expected data of shape (N, C), got {}".format(
attr.shape
)
# Convert to channels-first
if channels_last:
attr = np.transpose(attr)
data = np.transpose(data)
num_channels = attr.shape[0]
timeseries_length = attr.shape[1]
if num_channels > timeseries_length:
warnings.warn(
"Number of channels ({}) greater than time series length ({}), "
"please verify input format".format(num_channels, timeseries_length)
)
num_subplots = num_channels
if (
TimeseriesVisualizationMethod[method]
== TimeseriesVisualizationMethod.overlay_combined
):
num_subplots = 1
attr = np.sum(attr, axis=0) # Merge attributions across channels
if x_values is not None:
assert (
x_values.shape[0] == timeseries_length
), "x_values must have same length as data"
else:
x_values = np.arange(timeseries_length)
# Create plot if figure, axis not provided
if plt_fig_axis is not None:
plt_fig, plt_axis = plt_fig_axis
else:
if use_pyplot:
plt_fig, plt_axis = plt.subplots(
figsize=fig_size, nrows=num_subplots, sharex=True
)
else:
plt_fig = Figure(figsize=fig_size)
plt_axis = plt_fig.subplots(nrows=num_subplots, sharex=True)
if not isinstance(plt_axis, ndarray):
plt_axis = np.array([plt_axis])
norm_attr = _normalize_attr(attr, sign, outlier_perc, reduction_axis=None)
# Set default colormap and bounds based on sign.
if VisualizeSign[sign] == VisualizeSign.all:
default_cmap = LinearSegmentedColormap.from_list(
"RdWhGn", ["red", "white", "green"]
)
vmin, vmax = -1, 1
elif VisualizeSign[sign] == VisualizeSign.positive:
default_cmap = "Greens"
vmin, vmax = 0, 1
elif VisualizeSign[sign] == VisualizeSign.negative:
default_cmap = "Reds"
vmin, vmax = 0, 1
elif VisualizeSign[sign] == VisualizeSign.absolute_value:
default_cmap = "Blues"
vmin, vmax = 0, 1
else:
raise AssertionError("Visualize Sign type is not valid.")
cmap = cmap if cmap is not None else default_cmap
cmap = cm.get_cmap(cmap)
cm_norm = colors.Normalize(vmin, vmax)
def _plot_attrs_as_axvspan(attr_vals, x_vals, ax):
half_col_width = (x_values[1] - x_values[0]) / 2.0
for icol, col_center in enumerate(x_vals):
left = col_center - half_col_width
right = col_center + half_col_width
ax.axvspan(
xmin=left,
xmax=right,
facecolor=(cmap(cm_norm(attr_vals[icol]))),
edgecolor=None,
alpha=alpha_overlay,
)
if (
TimeseriesVisualizationMethod[method]
== TimeseriesVisualizationMethod.overlay_individual
):
for chan in range(num_channels):
plt_axis[chan].plot(x_values, data[chan, :], **pyplot_kwargs)
if channel_labels is not None:
plt_axis[chan].set_ylabel(channel_labels[chan])
_plot_attrs_as_axvspan(norm_attr[chan], x_values, plt_axis[chan])
plt.subplots_adjust(hspace=0)
elif (
TimeseriesVisualizationMethod[method]
== TimeseriesVisualizationMethod.overlay_combined
):
# Dark colors are better in this case
cycler = plt.cycler("color", cm.Dark2.colors)
plt_axis[0].set_prop_cycle(cycler)
for chan in range(num_channels):
label = channel_labels[chan] if channel_labels else None
plt_axis[0].plot(x_values, data[chan, :], label=label, **pyplot_kwargs)
_plot_attrs_as_axvspan(norm_attr, x_values, plt_axis[0])
plt_axis[0].legend(loc="best")
elif (
TimeseriesVisualizationMethod[method]
== TimeseriesVisualizationMethod.colored_graph
):
for chan in range(num_channels):
points = np.array([x_values, data[chan, :]]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(segments, cmap=cmap, norm=cm_norm, **pyplot_kwargs)
lc.set_array(norm_attr[chan, :])
plt_axis[chan].add_collection(lc)
plt_axis[chan].set_ylim(
1.2 * np.min(data[chan, :]), 1.2 * np.max(data[chan, :])
)
if channel_labels is not None:
plt_axis[chan].set_ylabel(channel_labels[chan])
plt.subplots_adjust(hspace=0)
else:
raise AssertionError("Invalid visualization method: {}".format(method))
plt.xlim([x_values[0], x_values[-1]])
if show_colorbar:
axis_separator = make_axes_locatable(plt_axis[-1])
colorbar_axis = axis_separator.append_axes("bottom", size="5%", pad=0.4)
colorbar_alpha = alpha_overlay
if (
TimeseriesVisualizationMethod[method]
== TimeseriesVisualizationMethod.colored_graph
):
colorbar_alpha = 1.0
plt_fig.colorbar(
cm.ScalarMappable(cm_norm, cmap),
orientation="horizontal",
cax=colorbar_axis,
alpha=colorbar_alpha,
)
if title:
plt_axis[0].set_title(title)
if use_pyplot:
plt.show()
return plt_fig, plt_axis
# These visualization methods are for text and are partially copied from
# experiments conducted by Davide Testuggine at Facebook.
class VisualizationDataRecord:
r"""
A data record for storing attribution relevant information
"""
__slots__ = [
"word_attributions",
"pred_prob",
"pred_class",
"true_class",
"attr_class",
"attr_score",
"raw_input_ids",
"convergence_score",
]
def __init__(
self,
word_attributions,
pred_prob,
pred_class,
true_class,
attr_class,
attr_score,
raw_input_ids,
convergence_score,
) -> None:
self.word_attributions = word_attributions
self.pred_prob = pred_prob
self.pred_class = pred_class
self.true_class = true_class
self.attr_class = attr_class
self.attr_score = attr_score
self.raw_input_ids = raw_input_ids
self.convergence_score = convergence_score
def _get_color(attr):
# clip values to prevent CSS errors (Values should be from [-1,1])
attr = max(-1, min(1, attr))
if attr > 0:
hue = 120
sat = 75
lig = 100 - int(50 * attr)
else:
hue = 0
sat = 75
lig = 100 - int(-40 * attr)
return "hsl({}, {}%, {}%)".format(hue, sat, lig)
def format_classname(classname):
return '<td><text style="padding-right:2em"><b>{}</b></text></td>'.format(classname)
def format_special_tokens(token):
if token.startswith("<") and token.endswith(">"):
return "#" + token.strip("<>")
return token
def format_tooltip(item, text):
return '<div class="tooltip">{item}\
<span class="tooltiptext">{text}</span>\
</div>'.format(
item=item, text=text
)
def format_word_importances(words, importances):
if importances is None or len(importances) == 0:
return "<td></td>"
assert len(words) <= len(importances)
tags = ["<td>"]
for word, importance in zip(words, importances[: len(words)]):
word = format_special_tokens(word)
color = _get_color(importance)
unwrapped_tag = '<mark style="background-color: {color}; opacity:1.0; \
line-height:1.75"><font color="black"> {word}\
</font></mark>'.format(
color=color, word=word
)
tags.append(unwrapped_tag)
tags.append("</td>")
return "".join(tags)
def visualize_text(
datarecords: Iterable[VisualizationDataRecord], legend: bool = True
) -> "HTML": # In quotes because this type doesn't exist in standalone mode
assert HAS_IPYTHON, (
"IPython must be available to visualize text. "
"Please run 'pip install ipython'."
)
dom = ["<table width: 100%>"]
rows = [
"<tr><th>True Label</th>"
"<th>Predicted Label</th>"
"<th>Attribution Label</th>"
"<th>Attribution Score</th>"
"<th>Word Importance</th>"
]
for datarecord in datarecords:
rows.append(
"".join(
[
"<tr>",
format_classname(datarecord.true_class),
format_classname(
"{0} ({1:.2f})".format(
datarecord.pred_class, datarecord.pred_prob
)
),
format_classname(datarecord.attr_class),
format_classname("{0:.2f}".format(datarecord.attr_score)),
format_word_importances(
datarecord.raw_input_ids, datarecord.word_attributions
),
"<tr>",
]
)
)
if legend:
dom.append(
'<div style="border-top: 1px solid; margin-top: 5px; \
padding-top: 5px; display: inline-block">'
)
dom.append("<b>Legend: </b>")
for value, label in zip([-1, 0, 1], ["Negative", "Neutral", "Positive"]):
dom.append(
'<span style="display: inline-block; width: 10px; height: 10px; \
border: 1px solid; background-color: \
{value}"></span> {label} '.format(
value=_get_color(value), label=label
)
)
dom.append("</div>")
dom.append("".join(rows))
dom.append("</table>")
html = HTML("".join(dom))
display(html)
return html
|
#!/usr/bin/env python3
from typing import Any, Callable, cast, Generic, List, Tuple, Type, Union
import torch
import torch.nn.functional as F
from captum._utils.common import (
_format_additional_forward_args,
_format_tensor_into_tuples,
_run_forward,
_validate_target,
)
from captum._utils.gradient import compute_gradients
from captum._utils.typing import ModuleOrModuleList, TargetType
from captum.attr._utils.common import (
_format_input_baseline,
_sum_rows,
_tensorize_baseline,
_validate_input,
)
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
class Attribution:
r"""
All attribution algorithms extend this class. It enforces its child classes
to extend and override core `attribute` method.
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (Callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
"""
self.forward_func = forward_func
attribute: Callable
r"""
This method computes and returns the attribution values for each input tensor.
Deriving classes are responsible for implementing its logic accordingly.
Specific attribution algorithms that extend this class take relevant
arguments.
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which attribution
is computed. It can be provided as a single tensor or
a tuple of multiple tensors. If multiple input tensors
are provided, the batch sizes must be aligned across all
tensors.
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attribution values for each
input tensor. The `attributions` have the same shape and
dimensionality as the inputs.
If a single tensor is provided as inputs, a single tensor
is returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
"""
@property
def multiplies_by_inputs(self):
return False
def has_convergence_delta(self) -> bool:
r"""
This method informs the user whether the attribution algorithm provides
a convergence delta (aka an approximation error) or not. Convergence
delta may serve as a proxy of correctness of attribution algorithm's
approximation. If deriving attribution class provides a
`compute_convergence_delta` method, it should
override both `compute_convergence_delta` and `has_convergence_delta` methods.
Returns:
bool:
Returns whether the attribution algorithm
provides a convergence delta (aka approximation error) or not.
"""
return False
compute_convergence_delta: Callable
r"""
The attribution algorithms which derive `Attribution` class and provide
convergence delta (aka approximation error) should implement this method.
Convergence delta can be computed based on certain properties of the
attribution alogrithms.
Args:
attributions (Tensor or tuple[Tensor, ...]): Attribution scores that
are precomputed by an attribution algorithm.
Attributions can be provided in form of a single tensor
or a tuple of those. It is assumed that attribution
tensor's dimension 0 corresponds to the number of
examples, and if multiple input tensors are provided,
the examples must be aligned appropriately.
*args (Any, optional): Additonal arguments that are used by the
sub-classes depending on the specific implementation
of `compute_convergence_delta`.
Returns:
*Tensor* of **deltas**:
- **deltas** (*Tensor*):
Depending on specific implementaion of
sub-classes, convergence delta can be returned per
sample in form of a tensor or it can be aggregated
across multuple samples and returned in form of a
single floating point tensor.
"""
@classmethod
def get_name(cls: Type["Attribution"]) -> str:
r"""
Create readable class name by inserting a space before any capital
characters besides the very first.
Returns:
str: a readable class name
Example:
for a class called IntegratedGradients, we return the string
'Integrated Gradients'
"""
return "".join(
[
char if char.islower() or idx == 0 else " " + char
for idx, char in enumerate(cls.__name__)
]
)
class GradientAttribution(Attribution):
r"""
All gradient based attribution algorithms extend this class. It requires a
forward function, which most commonly is the forward function of the model
that we want to interpret or the model itself.
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (Callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
"""
Attribution.__init__(self, forward_func)
self.gradient_func = compute_gradients
@log_usage()
def compute_convergence_delta(
self,
attributions: Union[Tensor, Tuple[Tensor, ...]],
start_point: Union[
None, int, float, Tensor, Tuple[Union[int, float, Tensor], ...]
],
end_point: Union[Tensor, Tuple[Tensor, ...]],
target: TargetType = None,
additional_forward_args: Any = None,
) -> Tensor:
r"""
Here we provide a specific implementation for `compute_convergence_delta`
which is based on a common property among gradient-based attribution algorithms.
In the literature sometimes it is also called completeness axiom. Completeness
axiom states that the sum of the attribution must be equal to the differences of
NN Models's function at its end and start points. In other words:
sum(attributions) - (F(end_point) - F(start_point)) is close to zero.
Returned delta of this method is defined as above stated difference.
This implementation assumes that both the `start_point` and `end_point` have
the same shape and dimensionality. It also assumes that the target must have
the same number of examples as the `start_point` and the `end_point` in case
it is provided in form of a list or a non-singleton tensor.
Args:
attributions (Tensor or tuple[Tensor, ...]): Precomputed attribution
scores. The user can compute those using any attribution
algorithm. It is assumed the shape and the
dimensionality of attributions must match the shape and
the dimensionality of `start_point` and `end_point`.
It also assumes that the attribution tensor's
dimension 0 corresponds to the number of
examples, and if multiple input tensors are provided,
the examples must be aligned appropriately.
start_point (Tensor or tuple[Tensor, ...], optional): `start_point`
is passed as an input to model's forward function. It
is the starting point of attributions' approximation.
It is assumed that both `start_point` and `end_point`
have the same shape and dimensionality.
end_point (Tensor or tuple[Tensor, ...]): `end_point`
is passed as an input to model's forward function. It
is the end point of attributions' approximation.
It is assumed that both `start_point` and `end_point`
have the same shape and dimensionality.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples.
`additional_forward_args` is used both for `start_point`
and `end_point` when computing the forward pass.
Default: None
Returns:
*Tensor* of **deltas**:
- **deltas** (*Tensor*):
This implementation returns convergence delta per
sample. Deriving sub-classes may do any type of aggregation
of those values, if necessary.
"""
end_point, start_point = _format_input_baseline(end_point, start_point)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
# tensorizing start_point in case it is a scalar or one example baseline
# If the batch size is large we could potentially also tensorize only one
# sample and expand the output to the rest of the elements in the batch
start_point = _tensorize_baseline(end_point, start_point)
attributions = _format_tensor_into_tuples(attributions)
# verify that the attributions and end_point match on 1st dimension
for attribution, end_point_tnsr in zip(attributions, end_point):
assert end_point_tnsr.shape[0] == attribution.shape[0], (
"Attributions tensor and the end_point must match on the first"
" dimension but found attribution: {} and end_point: {}".format(
attribution.shape[0], end_point_tnsr.shape[0]
)
)
num_samples = end_point[0].shape[0]
_validate_input(end_point, start_point)
_validate_target(num_samples, target)
with torch.no_grad():
start_out_sum = _sum_rows(
_run_forward(
self.forward_func, start_point, target, additional_forward_args
)
)
end_out_sum = _sum_rows(
_run_forward(
self.forward_func, end_point, target, additional_forward_args
)
)
row_sums = [_sum_rows(attribution) for attribution in attributions]
attr_sum = torch.stack(
[cast(Tensor, sum(row_sum)) for row_sum in zip(*row_sums)]
)
_delta = attr_sum - (end_out_sum - start_out_sum)
return _delta
class PerturbationAttribution(Attribution):
r"""
All perturbation based attribution algorithms extend this class. It requires a
forward function, which most commonly is the forward function of the model
that we want to interpret or the model itself.
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (Callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
"""
Attribution.__init__(self, forward_func)
@property
def multiplies_by_inputs(self):
return True
class InternalAttribution(Attribution, Generic[ModuleOrModuleList]):
r"""
Shared base class for LayerAttrubution and NeuronAttribution,
attribution types that require a model and a particular layer.
"""
layer: ModuleOrModuleList
def __init__(
self,
forward_func: Callable,
layer: ModuleOrModuleList,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (Callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
layer (torch.nn.Module): Layer for which output attributions are computed.
Output size of attribute matches that of layer output.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model, which allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
Attribution.__init__(self, forward_func)
self.layer = layer
self.device_ids = device_ids
class LayerAttribution(InternalAttribution):
r"""
Layer attribution provides attribution values for the given layer, quantifying
the importance of each neuron within the given layer's output. The output
attribution of calling attribute on a LayerAttribution object always matches
the size of the layer output.
"""
def __init__(
self,
forward_func: Callable,
layer: ModuleOrModuleList,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (Callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
layer (torch.nn.Module): Layer for which output attributions are computed.
Output size of attribute matches that of layer output.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model, which allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
InternalAttribution.__init__(self, forward_func, layer, device_ids)
@staticmethod
def interpolate(
layer_attribution: Tensor,
interpolate_dims: Union[int, Tuple[int, ...]],
interpolate_mode: str = "nearest",
) -> Tensor:
r"""
Interpolates given 3D, 4D or 5D layer attribution to given dimensions.
This is often utilized to upsample the attribution of a convolutional layer
to the size of an input, which allows visualizing in the input space.
Args:
layer_attribution (Tensor): Tensor of given layer attributions.
interpolate_dims (int or tuple): Upsampled dimensions. The
number of elements must be the number of dimensions
of layer_attribution - 2, since the first dimension
corresponds to number of examples and the second is
assumed to correspond to the number of channels.
interpolate_mode (str): Method for interpolation, which
must be a valid input interpolation mode for
torch.nn.functional. These methods are
"nearest", "area", "linear" (3D-only), "bilinear"
(4D-only), "bicubic" (4D-only), "trilinear" (5D-only)
based on the number of dimensions of the given layer
attribution.
Returns:
*Tensor* of upsampled **attributions**:
- **attributions** (*Tensor*):
Upsampled layer attributions with first 2 dimensions matching
slayer_attribution and remaining dimensions given by
interpolate_dims.
"""
return F.interpolate(layer_attribution, interpolate_dims, mode=interpolate_mode)
class NeuronAttribution(InternalAttribution):
r"""
Neuron attribution provides input attribution for a given neuron, quantifying
the importance of each input feature in the activation of a particular neuron.
Calling attribute on a NeuronAttribution object requires also providing
the index of the neuron in the output of the given layer for which attributions
are required.
The output attribution of calling attribute on a NeuronAttribution object
always matches the size of the input.
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (Callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
layer (torch.nn.Module): Layer for which output attributions are computed.
Output size of attribute matches that of layer output.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model, which allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
InternalAttribution.__init__(self, forward_func, layer, device_ids)
attribute: Callable
r"""
This method computes and returns the neuron attribution values for each
input tensor. Deriving classes are responsible for implementing
its logic accordingly.
Specific attribution algorithms that extend this class take relevant
arguments.
Args:
inputs: A single high dimensional input tensor or a tuple of them.
neuron_selector (int or tuple): Tuple providing index of neuron in output
of given layer for which attribution is desired. Length of
this tuple must be one less than the number of
dimensions in the output of the given layer (since
dimension 0 corresponds to number of examples).
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attribution values for
each input vector. The `attributions` have the
dimensionality of inputs.
"""
|
#!/usr/bin/env python3
import typing
from inspect import signature
from typing import Any, Callable, List, Tuple, TYPE_CHECKING, Union
import torch
from captum._utils.common import (
_format_baseline,
_format_output,
_format_tensor_into_tuples,
_validate_input as _validate_input_basic,
)
from captum._utils.typing import (
BaselineType,
Literal,
TargetType,
TensorOrTupleOfTensorsGeneric,
)
from captum.attr._utils.approximation_methods import SUPPORTED_METHODS
from torch import Tensor
if TYPE_CHECKING:
from captum.attr._utils.attribution import GradientAttribution
def _sum_rows(input: Tensor) -> Tensor:
return input.reshape(input.shape[0], -1).sum(1)
def _validate_target(num_samples: int, target: TargetType) -> None:
if isinstance(target, list) or (
isinstance(target, torch.Tensor) and torch.numel(target) > 1
):
assert num_samples == len(target), (
"The number of samples provied in the"
"input {} does not match with the number of targets. {}".format(
num_samples, len(target)
)
)
def _validate_input(
inputs: Tuple[Tensor, ...],
baselines: Tuple[Union[Tensor, int, float], ...],
n_steps: int = 50,
method: str = "riemann_trapezoid",
draw_baseline_from_distrib: bool = False,
) -> None:
_validate_input_basic(inputs, baselines, draw_baseline_from_distrib)
assert (
n_steps >= 0
), "The number of steps must be a positive integer. " "Given: {}".format(n_steps)
assert (
method in SUPPORTED_METHODS
), "Approximation method must be one for the following {}. " "Given {}".format(
SUPPORTED_METHODS, method
)
def _validate_noise_tunnel_type(
nt_type: str, supported_noise_tunnel_types: List[str]
) -> None:
assert nt_type in supported_noise_tunnel_types, (
"Noise types must be either `smoothgrad`, `smoothgrad_sq` or `vargrad`. "
"Given {}".format(nt_type)
)
@typing.overload
def _format_input_baseline(
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: Union[Tensor, Tuple[Tensor, ...]],
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
@typing.overload
def _format_input_baseline(
inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: BaselineType
) -> Tuple[Tuple[Tensor, ...], Tuple[Union[Tensor, int, float], ...]]:
...
def _format_input_baseline(
inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: BaselineType
) -> Tuple[Tuple[Tensor, ...], Tuple[Union[Tensor, int, float], ...]]:
inputs = _format_tensor_into_tuples(inputs)
baselines = _format_baseline(baselines, inputs)
return inputs, baselines
# This function can potentially be merged with the `format_baseline` function
# however, since currently not all algorithms support baselines of type
# callable this will be kept in a separate function.
@typing.overload
def _format_callable_baseline(
baselines: Union[
None,
Callable[..., Union[Tensor, Tuple[Tensor, ...]]],
Tensor,
Tuple[Tensor, ...],
],
inputs: Union[Tensor, Tuple[Tensor, ...]],
) -> Tuple[Tensor, ...]:
...
@typing.overload
def _format_callable_baseline(
baselines: Union[
None,
Callable[..., Union[Tensor, Tuple[Tensor, ...]]],
Tensor,
int,
float,
Tuple[Union[Tensor, int, float], ...],
],
inputs: Union[Tensor, Tuple[Tensor, ...]],
) -> Tuple[Union[Tensor, int, float], ...]:
...
def _format_callable_baseline(
baselines: Union[
None,
Callable[..., Union[Tensor, Tuple[Tensor, ...]]],
Tensor,
int,
float,
Tuple[Union[Tensor, int, float], ...],
],
inputs: Union[Tensor, Tuple[Tensor, ...]],
) -> Tuple[Union[Tensor, int, float], ...]:
if callable(baselines):
# Note: this assumes that if baselines is a function and if it takes
# arguments, then the first argument is the `inputs`.
# This can be expanded in the future with better type checks
baseline_parameters = signature(baselines).parameters
if len(baseline_parameters) == 0:
baselines = baselines()
else:
baselines = baselines(inputs)
return _format_baseline(baselines, _format_tensor_into_tuples(inputs))
def _format_and_verify_strides(
strides: Union[None, int, Tuple[int, ...], Tuple[Union[int, Tuple[int, ...]], ...]],
inputs: Tuple[Tensor, ...],
) -> Tuple[Union[int, Tuple[int, ...]], ...]:
# Formats strides, which are necessary for occlusion
# Assumes inputs are already formatted (in tuple)
if strides is None:
strides = tuple(1 for input in inputs)
if len(inputs) == 1 and not (isinstance(strides, tuple) and len(strides) == 1):
strides = (strides,) # type: ignore
assert isinstance(strides, tuple) and len(strides) == len(
inputs
), "Strides must be provided for each input tensor."
for i in range(len(inputs)):
assert isinstance(strides[i], int) or (
isinstance(strides[i], tuple)
and len(strides[i]) == len(inputs[i].shape) - 1 # type: ignore
), (
"Stride for input index {} is {}, which is invalid for input with "
"shape {}. It must be either an int or a tuple with length equal to "
"len(input_shape) - 1."
).format(
i, strides[i], inputs[i].shape
)
return strides
def _format_and_verify_sliding_window_shapes(
sliding_window_shapes: Union[Tuple[int, ...], Tuple[Tuple[int, ...], ...]],
inputs: Tuple[Tensor, ...],
) -> Tuple[Tuple[int, ...], ...]:
# Formats shapes of sliding windows, which is necessary for occlusion
# Assumes inputs is already formatted (in tuple)
if isinstance(sliding_window_shapes[0], int):
sliding_window_shapes = (sliding_window_shapes,) # type: ignore
sliding_window_shapes: Tuple[Tuple[int, ...], ...]
assert len(sliding_window_shapes) == len(
inputs
), "Must provide sliding window dimensions for each input tensor."
for i in range(len(inputs)):
assert (
isinstance(sliding_window_shapes[i], tuple)
and len(sliding_window_shapes[i]) == len(inputs[i].shape) - 1
), (
"Occlusion shape for input index {} is {} but should be a tuple with "
"{} dimensions."
).format(
i, sliding_window_shapes[i], len(inputs[i].shape) - 1
)
return sliding_window_shapes
@typing.overload
def _compute_conv_delta_and_format_attrs(
attr_algo: "GradientAttribution",
return_convergence_delta: bool,
attributions: Tuple[Tensor, ...],
start_point: Union[int, float, Tensor, Tuple[Union[int, float, Tensor], ...]],
end_point: Union[Tensor, Tuple[Tensor, ...]],
additional_forward_args: Any,
target: TargetType,
is_inputs_tuple: Literal[False] = False,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
...
@typing.overload
def _compute_conv_delta_and_format_attrs(
attr_algo: "GradientAttribution",
return_convergence_delta: bool,
attributions: Tuple[Tensor, ...],
start_point: Union[int, float, Tensor, Tuple[Union[int, float, Tensor], ...]],
end_point: Union[Tensor, Tuple[Tensor, ...]],
additional_forward_args: Any,
target: TargetType,
is_inputs_tuple: Literal[True],
) -> Union[Tuple[Tensor, ...], Tuple[Tuple[Tensor, ...], Tensor]]:
...
# FIXME: GradientAttribution is provided as a string due to a circular import.
# This should be fixed when common is refactored into separate files.
def _compute_conv_delta_and_format_attrs(
attr_algo: "GradientAttribution",
return_convergence_delta: bool,
attributions: Tuple[Tensor, ...],
start_point: Union[int, float, Tensor, Tuple[Union[int, float, Tensor], ...]],
end_point: Union[Tensor, Tuple[Tensor, ...]],
additional_forward_args: Any,
target: TargetType,
is_inputs_tuple: bool = False,
) -> Union[
Tensor, Tuple[Tensor, ...], Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]
]:
if return_convergence_delta:
# computes convergence error
delta = attr_algo.compute_convergence_delta(
attributions,
start_point,
end_point,
additional_forward_args=additional_forward_args,
target=target,
)
return _format_output(is_inputs_tuple, attributions), delta
else:
return _format_output(is_inputs_tuple, attributions)
def _tensorize_baseline(
inputs: Tuple[Tensor, ...], baselines: Tuple[Union[int, float, Tensor], ...]
) -> Tuple[Tensor, ...]:
def _tensorize_single_baseline(baseline, input):
if isinstance(baseline, (int, float)):
return torch.full_like(input, baseline)
if input.shape[0] > baseline.shape[0] and baseline.shape[0] == 1:
return torch.cat([baseline] * input.shape[0])
return baseline
assert isinstance(inputs, tuple) and isinstance(baselines, tuple), (
"inputs and baselines must"
"have tuple type but found baselines: {} and inputs: {}".format(
type(baselines), type(inputs)
)
)
return tuple(
_tensorize_single_baseline(baseline, input)
for baseline, input in zip(baselines, inputs)
)
def _reshape_and_sum(
tensor_input: Tensor, num_steps: int, num_examples: int, layer_size: Tuple[int, ...]
) -> Tensor:
# Used for attribution methods which perform integration
# Sums across integration steps by reshaping tensor to
# (num_steps, num_examples, (layer_size)) and summing over
# dimension 0. Returns a tensor of size (num_examples, (layer_size))
return torch.sum(
tensor_input.reshape((num_steps, num_examples) + layer_size), dim=0
)
def _call_custom_attribution_func(
custom_attribution_func: Callable[..., Tuple[Tensor, ...]],
multipliers: Tuple[Tensor, ...],
inputs: Tuple[Tensor, ...],
baselines: Tuple[Tensor, ...],
) -> Tuple[Tensor, ...]:
assert callable(custom_attribution_func), (
"`custom_attribution_func`"
" must be a callable function but {} provided".format(
type(custom_attribution_func)
)
)
custom_attr_func_params = signature(custom_attribution_func).parameters
if len(custom_attr_func_params) == 1:
return custom_attribution_func(multipliers)
elif len(custom_attr_func_params) == 2:
return custom_attribution_func(multipliers, inputs)
elif len(custom_attr_func_params) == 3:
return custom_attribution_func(multipliers, inputs, baselines)
else:
raise AssertionError(
"`custom_attribution_func` must take at least one and at most 3 arguments."
)
def _find_output_mode_and_verify(
initial_eval: Union[int, float, Tensor],
num_examples: int,
perturbations_per_eval: int,
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric],
) -> bool:
"""
This method identifies whether the model outputs a single output for a batch
(agg_output_mode = True) or whether it outputs a single output per example
(agg_output_mode = False) and returns agg_output_mode. The method also
verifies that perturbations_per_eval is 1 in the case that agg_output_mode is True
and also verifies that the first dimension of each feature mask if the model
returns a single output for a batch.
"""
if isinstance(initial_eval, (int, float)) or (
isinstance(initial_eval, torch.Tensor)
and (
len(initial_eval.shape) == 0
or (num_examples > 1 and initial_eval.numel() == 1)
)
):
agg_output_mode = True
assert (
perturbations_per_eval == 1
), "Cannot have perturbations_per_eval > 1 when function returns scalar."
if feature_mask is not None:
for single_mask in feature_mask:
assert single_mask.shape[0] == 1, (
"Cannot provide different masks for each example when function "
"returns a scalar."
)
else:
agg_output_mode = False
assert (
isinstance(initial_eval, torch.Tensor) and initial_eval[0].numel() == 1
), "Target should identify a single element in the model output."
return agg_output_mode
def _construct_default_feature_mask(
inputs: Tuple[Tensor, ...]
) -> Tuple[Tuple[Tensor, ...], int]:
feature_mask = []
current_num_features = 0
for i in range(len(inputs)):
num_features = torch.numel(inputs[i][0])
feature_mask.append(
current_num_features
+ torch.reshape(
torch.arange(num_features, device=inputs[i].device),
inputs[i][0:1].shape,
)
)
current_num_features += num_features
total_features = current_num_features
feature_mask = tuple(feature_mask)
return feature_mask, total_features
|
#!/usr/bin/env python3
from collections import defaultdict
from typing import Any, Dict, List, Optional, Union
from captum._utils.common import _format_tensor_into_tuples
from captum._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._utils.stat import Stat
from captum.attr._utils.summarizer import Summarizer
from captum.log import log_usage
from torch import Tensor
class ClassSummarizer(Summarizer):
r"""
Used to keep track of summaries for associated classes. The
classes/labels can be of any type that are supported by `dict`.
This also keeps track of an aggregate of all class summaries.
"""
@log_usage()
def __init__(self, stats: List[Stat]) -> None:
Summarizer.__init__.__wrapped__(self, stats)
self.summaries: Dict[Any, Summarizer] = defaultdict(
lambda: Summarizer(stats=stats)
)
def update( # type: ignore
self,
x: TensorOrTupleOfTensorsGeneric,
labels: TargetType = None,
):
r"""
Updates the stats of the summarizer, optionally associated to classes.
This accepts either a single tensor to summarise or a tuple of tensors.
Args:
x (Tensor or tuple[Tensor, ...]):
The input tensor to be summarised. The first
dimension of this input must be associated to
the batch size of the inputs.
labels (int, tuple, Tensor, or list, optional):
The associated labels for `x`. If Any, we
assume `labels` represents the label for all inputs in `x`.
If this is None we simply aggregate the total summary.
"""
if labels is None:
super().update(x)
return
x = _format_tensor_into_tuples(x)
num_labels = 1
labels_typed: Union[List[Any], Tensor]
if isinstance(labels, list) or isinstance(labels, Tensor):
labels_typed = labels
num_labels = len(labels) # = labels.size(0) if tensor
else:
labels_typed = [labels]
# mypy doesn't realise I have made the int a list
if len(labels_typed) > 1:
for x_i in x:
assert x_i.size(0) == num_labels, (
"batch size does not equal amount of labels; "
"please ensure length of labels is equal to 1 "
"or to the `batch_size` corresponding to the "
"number of examples in the input(s)"
)
batch_size = x[0].size(0)
for i in range(batch_size):
tensors_to_summarize = tuple(tensor[i] for tensor in x)
tensors_to_summarize_copy = tuple(tensor[i].clone() for tensor in x)
label = labels_typed[0] if len(labels_typed) == 1 else labels_typed[i]
self.summaries[label].update(tensors_to_summarize)
super().update(tensors_to_summarize_copy)
@property
def class_summaries(
self,
) -> Dict[
Any, Union[None, Dict[str, Optional[Tensor]], List[Dict[str, Optional[Tensor]]]]
]:
r"""
Returns:
The summaries for each class.
"""
return {key: value.summary for key, value in self.summaries.items()}
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Optional, TYPE_CHECKING
import torch
from torch import Tensor
if TYPE_CHECKING:
from captum.attr._utils.summarizer import SummarizerSingleTensor
class Stat:
"""
The Stat class represents a statistic that can be updated and retrieved
at any point in time.
The basic functionality this class provides is:
1. A update/get method to actually compute the statistic
2. A statistic store/cache to retrieve dependent information
(e.g. other stat values that are required for computation)
3. The name of the statistic that is used for the user to refer to
"""
def __init__(self, name: Optional[str] = None, **kwargs: Any) -> None:
"""
Args:
name (str, optional):
The name of the statistic. If not provided,
the class name will be used alongside it's parameters
kwargs (Any):
Additional arguments used to construct the statistic
"""
self.params = kwargs
self._name = name
self._other_stats: Optional[SummarizerSingleTensor] = None
def init(self):
pass
def _get_stat(self, stat: "Stat") -> Optional["Stat"]:
assert self._other_stats is not None
return self._other_stats.get(stat)
def update(self, x: Tensor):
raise NotImplementedError()
def get(self) -> Optional[Tensor]:
raise NotImplementedError()
def __hash__(self):
return hash((self.__class__, frozenset(self.params.items())))
def __eq__(self, other: object) -> bool:
if isinstance(other, Stat):
return self.__class__ == other.__class__ and frozenset(
self.params.items()
) == frozenset(other.params.items())
else:
return False
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
@property
def name(self):
"""
The name of the statistic. i.e. it is the key in a .summary
This will be the class name or a custom name if provided.
See Summarizer or SummarizerSingleTensor
"""
default_name = self.__class__.__name__.lower()
if len(self.params) > 0:
default_name += f"({self.params})"
return default_name if self._name is None else self._name
class Count(Stat):
"""
Counts the number of elements, i.e. the
number of `update`'s called
"""
def __init__(self, name: Optional[str] = None) -> None:
super().__init__(name=name)
self.n = None
def get(self):
return self.n
def update(self, x):
if self.n is None:
self.n = 0
self.n += 1
class Mean(Stat):
"""
Calculates the average of a tensor
"""
def __init__(self, name: Optional[str] = None) -> None:
super().__init__(name=name)
self.rolling_mean: Optional[Tensor] = None
self.n: Optional[Count] = None
def get(self) -> Optional[Tensor]:
return self.rolling_mean
def init(self):
self.n = self._get_stat(Count())
def update(self, x):
n = self.n.get()
if self.rolling_mean is None:
# Ensures rolling_mean is a float tensor
self.rolling_mean = x.clone() if x.is_floating_point() else x.double()
else:
delta = x - self.rolling_mean
self.rolling_mean += delta / n
class MSE(Stat):
"""
Calculates the mean squared error of a tensor
"""
def __init__(self, name: Optional[str] = None) -> None:
super().__init__(name=name)
self.prev_mean = None
self.mse = None
def init(self):
self.mean = self._get_stat(Mean())
def get(self) -> Optional[Tensor]:
if self.mse is None and self.prev_mean is not None:
return torch.zeros_like(self.prev_mean)
return self.mse
def update(self, x: Tensor):
mean = self.mean.get()
if mean is not None and self.prev_mean is not None:
rhs = (x - self.prev_mean) * (x - mean)
if self.mse is None:
self.mse = rhs
else:
self.mse += rhs
# do not not clone
self.prev_mean = mean.clone()
class Var(Stat):
"""
Calculates the variance of a tensor, with an order. e.g.
if `order = 1` then it will calculate sample variance.
This is equal to mse / (n - order)
"""
def __init__(self, name: Optional[str] = None, order: int = 0) -> None:
if name is None:
if order == 0:
name = "variance"
elif order == 1:
name = "sample_variance"
else:
name = f"variance({order})"
super().__init__(name=name, order=order)
self.order = order
def init(self):
self.mse = self._get_stat(MSE())
self.n = self._get_stat(Count())
def update(self, x: Tensor):
pass
def get(self) -> Optional[Tensor]:
mse = self.mse.get()
n = self.n.get()
if mse is None:
return None
if n <= self.order:
return torch.zeros_like(mse)
# NOTE: The following ensures mse is a float tensor.
# torch.true_divide is available in PyTorch 1.5 and later.
# This is for compatibility with 1.4.
return mse.to(torch.float64) / (n - self.order)
class StdDev(Stat):
"""
The standard deviation, with an associated order.
"""
def __init__(self, name: Optional[str] = None, order: int = 0) -> None:
if name is None:
if order == 0:
name = "std_dev"
elif order == 1:
name = "sample_std_dev"
else:
name = f"std_dev{order})"
super().__init__(name=name, order=order)
self.order = order
def init(self):
self.var = self._get_stat(Var(order=self.order))
def update(self, x: Tensor):
pass
def get(self) -> Optional[Tensor]:
var = self.var.get()
return var**0.5 if var is not None else None
class GeneralAccumFn(Stat):
"""
Performs update(x): result = fn(result, x)
where fn is a custom function
"""
def __init__(self, fn: Callable, name: Optional[str] = None) -> None:
super().__init__(name=name)
self.result = None
self.fn = fn
def get(self) -> Optional[Tensor]:
return self.result
def update(self, x):
if self.result is None:
self.result = x
else:
self.result = self.fn(self.result, x)
class Min(GeneralAccumFn):
def __init__(
self, name: Optional[str] = None, min_fn: Callable = torch.min
) -> None:
super().__init__(name=name, fn=min_fn)
class Max(GeneralAccumFn):
def __init__(
self, name: Optional[str] = None, max_fn: Callable = torch.max
) -> None:
super().__init__(name=name, fn=max_fn)
class Sum(GeneralAccumFn):
def __init__(
self, name: Optional[str] = None, add_fn: Callable = torch.add
) -> None:
super().__init__(name=name, fn=add_fn)
def CommonStats() -> List[Stat]:
r"""
Returns common summary statistics, specifically:
Mean, Sample Variance, Sample Std Dev, Min, Max
"""
return [Mean(), Var(order=1), StdDev(order=1), Min(), Max()]
|
#!/usr/bin/env python3
from typing import Dict, List, Optional, Tuple, Type, Union
import torch
from captum.attr._utils.stat import Count, Max, Mean, Min, MSE, Stat, StdDev, Sum, Var
from captum.log import log_usage
from torch import Tensor
class Summarizer:
r"""
This class simply wraps over a given a set of SummarizerSingleTensor's in order
to summarise multiple input tensors.
Basic usage:
>>>from captum.attr.aggregator import Summarizer
>>>from captum.attr._utils.stats import Mean, StdDev
>>>
>>>attrib = torch.tensor([1, 2, 3, 4, 5])
>>>
>>>summ = Summarizer([Mean(), StdDev(0])
>>>summ.update(attrib)
>>>
>>>print(summ.summary['mean'])
"""
@log_usage()
def __init__(self, stats: List[Stat]) -> None:
r"""
Args:
stats (List[Stat]):
The list of statistics you wish to track
"""
self._summarizers: List[SummarizerSingleTensor] = []
self._is_inputs_tuple: Optional[bool] = None
self._stats, self._summary_stats_indicies = _reorder_stats(stats)
def _copy_stats(self):
import copy
return copy.deepcopy(self._stats)
def update(self, x: Union[float, Tensor, Tuple[Union[float, Tensor], ...]]):
r"""
Calls `update` on each `Stat` object within the summarizer
Args:
x (Tensor or Tuple[Tensor, ...]):
The input(s) you wish to summarize
"""
if self._is_inputs_tuple is None:
self._is_inputs_tuple = isinstance(x, tuple)
else:
# we want input to be consistently a single input or a tuple
assert not (self._is_inputs_tuple ^ isinstance(x, tuple))
from captum._utils.common import _format_float_or_tensor_into_tuples
x = _format_float_or_tensor_into_tuples(x)
for i, inp in enumerate(x):
if i >= len(self._summarizers):
# _summarizers[i] is a new SummarizerSingleTensor, which
# aims to summarize input i (i.e. x[i])
#
# Thus, we must copy our stats, as otherwise
# in the best case the statistics for each input will be mangled
# and in the worst case we will run into an error due to different
# dimensionality in the input tensors tensors (i.e.
# x[i].shape != x[j].shape for some pair i, j)
stats = self._copy_stats()
self._summarizers.append(
SummarizerSingleTensor(
stats=stats, summary_stats_indices=self._summary_stats_indicies
)
)
if not isinstance(inp, torch.Tensor):
inp = torch.tensor(inp, dtype=torch.float)
self._summarizers[i].update(inp)
@property
def summary(
self,
) -> Optional[
Union[Dict[str, Optional[Tensor]], List[Dict[str, Optional[Tensor]]]]
]:
r"""
Effectively calls `get` on each `Stat` object within this object for each input
Returns:
A dict or list of dict: mapping from the Stat
object's `name` to the associated value of `get`
"""
if len(self._summarizers) == 0:
return None
temp = [summ.summary for summ in self._summarizers]
return temp if self._is_inputs_tuple else temp[0]
def _reorder_stats(stats: List[Stat]) -> Tuple[List[Stat], List[int]]:
# We want to want to store two things:
# 1. A mapping from a Stat to Stat object (self._stat_to_stat):
# This is to retrieve an existing Stat object for dependency
# resolution, e.g. Mean needs the Count stat - we want to
# retrieve it in O(1)
#
# 2. All of the necessary stats, in the correct order,
# to perform an update for each Stat (self.stats) trivially
# As a reference, the dependency graph for our stats is as follows:
# StdDev(x) -> Var(x) -> MSE -> Mean -> Count, for all valid x
#
# Step 1:
# Ensure we have all the necessary stats
# i.e. ensure we have the dependencies
# Step 2:
# Figure out the order to update them
dep_order = [StdDev, Var, MSE, Mean, Count]
# remove dupe stats
stats = set(stats)
summary_stats = set(stats)
from collections import defaultdict
stats_by_module: Dict[Type, List[Stat]] = defaultdict(list)
for stat in stats:
stats_by_module[stat.__class__].append(stat)
# StdDev is an odd case since it is parameterized, thus
# for each StdDev(order) we must ensure there is an associated Var(order)
for std_dev in stats_by_module[StdDev]:
stat_to_add = Var(order=std_dev.order) # type: ignore
stats.add(stat_to_add)
stats_by_module[stat_to_add.__class__].append(stat_to_add)
# For the other modules (deps[1:n-1]): if i exists =>
# we want to ensure i...n-1 exists
for i, dep in enumerate(dep_order[1:]):
if dep in stats_by_module:
stats.update([mod() for mod in dep_order[i + 1 :]])
break
# Step 2: get the correct order
# NOTE: we are sorting via a given topological order
sort_order = {mod: i for i, mod in enumerate(dep_order)}
sort_order[Min] = -1
sort_order[Max] = -1
sort_order[Sum] = -1
stats = list(stats)
stats.sort(key=lambda x: sort_order[x.__class__], reverse=True)
# get the summary stat indices
summary_stat_indexs = []
for i, stat in enumerate(stats):
if stat in summary_stats:
summary_stat_indexs.append(i)
return stats, summary_stat_indexs
class SummarizerSingleTensor:
r"""
A simple class that summarizes a single tensor. The basic functionality
of this class is two operations .update and .summary
If possible use `Summarizer` instead.
"""
def __init__(self, stats: List[Stat], summary_stats_indices: List[int]) -> None:
r"""
Args:
stats (list[Stat]): A list of all the Stat objects that
need to be updated. This must be in the appropriate order for
updates (see `_reorder_stats`)
summary_stats (list[int]): A list of indicies, referencing `stats`,
which are the stats you want to show in the .summary property. This
does not require any specific order.
"""
self._stats = stats
self._stat_to_stat = {stat: stat for stat in self._stats}
self._summary_stats = [stats[i] for i in summary_stats_indices]
for stat in stats:
stat._other_stats = self
stat.init()
def update(self, x: Tensor):
r"""
Updates the summary of a given tensor `x`
Args:
x (Tensor):
The tensor to summarize
"""
for stat in self._stats:
stat.update(x)
def get(self, stat: Stat) -> Optional[Stat]:
r"""
Retrieves `stat` from cache if this summarizer contains it.
Note that `Stat` has it's hash/equality method overridden, such
that an object with the same class and parameters will have the
same hash. Thus, if you call `get` with a `Stat`, an associated
`Stat` with the same class and parameters belonging to this object
will be retrieved if it exists.
If no such object is retrieved then `None` is returned.
Args:
stat (Stat):
The stat to retrieve
Returns:
Stat
The cached stat object or `None`
"""
if stat not in self._stat_to_stat:
return None
return self._stat_to_stat[stat]
@property
def summary(self) -> Dict[str, Optional[Tensor]]:
"""
Returns:
Optional[Dict[str, Optional[Tensor]]]
The cached stat object
"""
return {stat.name: stat.get() for stat in self._summary_stats}
|
#!/usr/bin/env python3
from abc import ABC, abstractmethod
import torch
from ..._utils.common import _format_tensor_into_tuples
class PropagationRule(ABC):
"""
Base class for all propagation rule classes, also called Z-Rule.
STABILITY_FACTOR is used to assure that no zero divison occurs.
"""
STABILITY_FACTOR = 1e-9
def forward_hook(self, module, inputs, outputs):
"""Register backward hooks on input and output
tensors of linear layers in the model."""
inputs = _format_tensor_into_tuples(inputs)
self._has_single_input = len(inputs) == 1
self._handle_input_hooks = []
for input in inputs:
if not hasattr(input, "hook_registered"):
input_hook = self._create_backward_hook_input(input.data)
self._handle_input_hooks.append(input.register_hook(input_hook))
input.hook_registered = True
output_hook = self._create_backward_hook_output(outputs.data)
self._handle_output_hook = outputs.register_hook(output_hook)
return outputs.clone()
@staticmethod
def backward_hook_activation(module, grad_input, grad_output):
"""Backward hook to propagate relevance over non-linear activations."""
# replace_out is set in _backward_hook_input, this is necessary
# due to 2 tensor hooks on the same tensor
if hasattr(grad_output, "replace_out"):
hook_out = grad_output.replace_out
del grad_output.replace_out
return hook_out
return grad_output
def _create_backward_hook_input(self, inputs):
def _backward_hook_input(grad):
relevance = grad * inputs
device = grad.device
if self._has_single_input:
self.relevance_input[device] = relevance.data
else:
self.relevance_input[device].append(relevance.data)
# replace_out is needed since two hooks are set on the same tensor
# The output of this hook is needed in backward_hook_activation
grad.replace_out = relevance
return relevance
return _backward_hook_input
def _create_backward_hook_output(self, outputs):
def _backward_hook_output(grad):
sign = torch.sign(outputs)
sign[sign == 0] = 1
relevance = grad / (outputs + sign * self.STABILITY_FACTOR)
self.relevance_output[grad.device] = grad.data
return relevance
return _backward_hook_output
def forward_hook_weights(self, module, inputs, outputs):
"""Save initial activations a_j before modules are changed"""
device = inputs[0].device if isinstance(inputs, tuple) else inputs.device
if hasattr(module, "activations") and device in module.activations:
raise RuntimeError(
"Module {} is being used more than once in the network, which "
"is not supported by LRP. "
"Please ensure that module is being used only once in the "
"network.".format(module)
)
module.activations[device] = tuple(input.data for input in inputs)
self._manipulate_weights(module, inputs, outputs)
@abstractmethod
def _manipulate_weights(self, module, inputs, outputs):
raise NotImplementedError
def forward_pre_hook_activations(self, module, inputs):
"""Pass initial activations to graph generation pass"""
device = inputs[0].device if isinstance(inputs, tuple) else inputs.device
for input, activation in zip(inputs, module.activations[device]):
input.data = activation
return inputs
class EpsilonRule(PropagationRule):
"""
Rule for relevance propagation using a small value of epsilon
to avoid numerical instabilities and remove noise.
Use for middle layers.
Args:
epsilon (integer, float): Value by which is added to the
discriminator during propagation.
"""
def __init__(self, epsilon=1e-9) -> None:
self.STABILITY_FACTOR = epsilon
def _manipulate_weights(self, module, inputs, outputs):
pass
class GammaRule(PropagationRule):
"""
Gamma rule for relevance propagation, gives more importance to
positive relevance.
Use for lower layers.
Args:
gamma (float): The gamma parameter determines by how much
the positive relevance is increased.
"""
def __init__(self, gamma=0.25, set_bias_to_zero=False) -> None:
self.gamma = gamma
self.set_bias_to_zero = set_bias_to_zero
def _manipulate_weights(self, module, inputs, outputs):
if hasattr(module, "weight"):
module.weight.data = (
module.weight.data + self.gamma * module.weight.data.clamp(min=0)
)
if self.set_bias_to_zero and hasattr(module, "bias"):
if module.bias is not None:
module.bias.data = torch.zeros_like(module.bias.data)
class Alpha1_Beta0_Rule(PropagationRule):
"""
Alpha1_Beta0 rule for relevance backpropagation, also known
as Deep-Taylor. Only positive relevance is propagated, resulting
in stable results, therefore recommended as the initial choice.
Warning: Does not work for BatchNorm modules because weight and bias
are defined differently.
Use for lower layers.
"""
def __init__(self, set_bias_to_zero=False) -> None:
self.set_bias_to_zero = set_bias_to_zero
def _manipulate_weights(self, module, inputs, outputs):
if hasattr(module, "weight"):
module.weight.data = module.weight.data.clamp(min=0)
if self.set_bias_to_zero and hasattr(module, "bias"):
if module.bias is not None:
module.bias.data = torch.zeros_like(module.bias.data)
class IdentityRule(EpsilonRule):
"""
Identity rule for skipping layer manipulation and propagating the
relevance over a layer. Only valid for modules with same dimensions for
inputs and outputs.
Can be used for BatchNorm2D.
"""
def _create_backward_hook_input(self, inputs):
def _backward_hook_input(grad):
return self.relevance_output[grad.device]
return _backward_hook_input
|
#!/usr/bin/env python3
import math
from typing import Any, Callable, cast, Tuple, Union
import torch
from captum._utils.common import (
_expand_additional_forward_args,
_expand_target,
_format_additional_forward_args,
_format_feature_mask,
_format_output,
_is_tuple,
_run_forward,
)
from captum._utils.progress import progress
from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._utils.attribution import PerturbationAttribution
from captum.attr._utils.common import _format_input_baseline
from captum.log import log_usage
from torch import dtype, Tensor
class FeatureAblation(PerturbationAttribution):
r"""
A perturbation based approach to computing attribution, involving
replacing each input feature with a given baseline / reference, and
computing the difference in output. By default, each scalar value within
each input tensor is taken as a feature and replaced independently. Passing
a feature mask, allows grouping features to be ablated together. This can
be used in cases such as images, where an entire segment or region
can be ablated, measuring the importance of the segment (feature group).
Each input scalar in the group will be given the same attribution value
equal to the change in target as a result of ablating the entire feature
group.
The forward function can either return a scalar per example or a tensor
of a fixed sized tensor (or scalar value) for the full batch, i.e. the
output does not grow as the batch size increase. If the output is fixed
we consider this model to be an "aggregation" of the inputs. In the fixed
sized output mode we require `perturbations_per_eval == 1` and the
`feature_mask` to be either `None` or for all of them to have 1 as their
first dimension (i.e. a feature mask requires to be applied to all inputs).
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or
any modification of it.
"""
PerturbationAttribution.__init__(self, forward_func)
self.use_weights = False
# only used when perturbations_per_eval > 1, where the 1st dim of forward_func's
# output must grow as the input batch size. If forward's output is aggregated,
# we cannot expand the input to include more perturbations in one call.
# If it's False, we will force the validation by comparing the outpus of
# the original input and the modified input whose batch size expanded based on
# perturbations_per_eval. Set the flag to True if the output of the modified
# input grow as expected. Once it turns to True, we will assume the model's
# behavior stays consistent and no longer check again
self._is_output_shape_valid = False
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
feature_mask: Union[None, Tensor, Tuple[Tensor, ...]] = None,
perturbations_per_eval: int = 1,
show_progress: bool = False,
**kwargs: Any,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which ablation
attributions are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define reference value which replaces each
feature when ablated.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or
broadcastable to match the dimensions of inputs
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. For all other types,
the given argument is used for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
feature_mask (Tensor or tuple[Tensor, ...], optional):
feature_mask defines a mask for the input, grouping
features which should be ablated together. feature_mask
should contain the same number of tensors as inputs.
Each tensor should
be the same size as the corresponding input or
broadcastable to match the input tensor. Each tensor
should contain integers in the range 0 to num_features
- 1, and indices corresponding to the same feature should
have the same value.
Note that features within each input tensor are ablated
independently (not across tensors).
If the forward function returns a single scalar per batch,
we enforce that the first dimension of each mask must be 1,
since attributions are returned batch-wise rather than per
example, so the attributions must correspond to the
same features (indices) in each input example.
If None, then a feature mask is constructed which assigns
each scalar within a tensor as a separate feature, which
is ablated independently.
Default: None
perturbations_per_eval (int, optional): Allows ablation of multiple
features to be processed simultaneously in one call to
forward_fn.
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
If the forward function's number of outputs does not
change as the batch size grows (e.g. if it outputs a
scalar value), you must set perturbations_per_eval to 1
and use a single feature mask to describe the features
for all examples in the batch.
Default: 1
show_progress (bool, optional): Displays the progress of computation.
It will try to use tqdm if available for advanced features
(e.g. time estimation). Otherwise, it will fallback to
a simple output of progress.
Default: False
**kwargs (Any, optional): Any additional arguments used by child
classes of FeatureAblation (such as Occlusion) to construct
ablations. These arguments are ignored when using
FeatureAblation directly.
Default: None
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The attributions with respect to each input feature.
If the forward function returns
a scalar value per example, attributions will be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If the forward function returns a scalar per batch, then
attribution tensor(s) will have first dimension 1 and
the remaining dimensions will match the input.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple of tensors is provided for inputs, a
tuple of corresponding sized tensors is returned.
Examples::
>>> # SimpleClassifier takes a single input tensor of size Nx4x4,
>>> # and returns an Nx3 tensor of class probabilities.
>>> net = SimpleClassifier()
>>> # Generating random input with size 2 x 4 x 4
>>> input = torch.randn(2, 4, 4)
>>> # Defining FeatureAblation interpreter
>>> ablator = FeatureAblation(net)
>>> # Computes ablation attribution, ablating each of the 16
>>> # scalar input independently.
>>> attr = ablator.attribute(input, target=1)
>>> # Alternatively, we may want to ablate features in groups, e.g.
>>> # grouping each 2x2 square of the inputs and ablating them together.
>>> # This can be done by creating a feature mask as follows, which
>>> # defines the feature groups, e.g.:
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # With this mask, all inputs with the same value are ablated
>>> # simultaneously, and the attribution for each input in the same
>>> # group (0, 1, 2, and 3) per example are the same.
>>> # The attributions can be calculated as follows:
>>> # feature mask has dimensions 1 x 4 x 4
>>> feature_mask = torch.tensor([[[0,0,1,1],[0,0,1,1],
>>> [2,2,3,3],[2,2,3,3]]])
>>> attr = ablator.attribute(input, target=1, feature_mask=feature_mask)
"""
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = _is_tuple(inputs)
inputs, baselines = _format_input_baseline(inputs, baselines)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
num_examples = inputs[0].shape[0]
feature_mask = _format_feature_mask(feature_mask, inputs)
assert (
isinstance(perturbations_per_eval, int) and perturbations_per_eval >= 1
), "Perturbations per evaluation must be an integer and at least 1."
with torch.no_grad():
if show_progress:
feature_counts = self._get_feature_counts(
inputs, feature_mask, **kwargs
)
total_forwards = (
sum(
math.ceil(count / perturbations_per_eval)
for count in feature_counts
)
+ 1
) # add 1 for the initial eval
attr_progress = progress(
desc=f"{self.get_name()} attribution", total=total_forwards
)
attr_progress.update(0)
# Computes initial evaluation with all features, which is compared
# to each ablated result.
initial_eval = self._strict_run_forward(
self.forward_func, inputs, target, additional_forward_args
)
if show_progress:
attr_progress.update()
# number of elements in the output of forward_func
n_outputs = initial_eval.numel() if isinstance(initial_eval, Tensor) else 1
# flatten eval outputs into 1D (n_outputs)
# add the leading dim for n_feature_perturbed
flattened_initial_eval = initial_eval.reshape(1, -1)
# Initialize attribution totals and counts
attrib_type = cast(dtype, flattened_initial_eval.dtype)
total_attrib = [
# attribute w.r.t each output element
torch.zeros(
(n_outputs,) + input.shape[1:],
dtype=attrib_type,
device=input.device,
)
for input in inputs
]
# Weights are used in cases where ablations may be overlapping.
if self.use_weights:
weights = [
torch.zeros(
(n_outputs,) + input.shape[1:], device=input.device
).float()
for input in inputs
]
# Iterate through each feature tensor for ablation
for i in range(len(inputs)):
# Skip any empty input tensors
if torch.numel(inputs[i]) == 0:
continue
for (
current_inputs,
current_add_args,
current_target,
current_mask,
) in self._ith_input_ablation_generator(
i,
inputs,
additional_forward_args,
target,
baselines,
feature_mask,
perturbations_per_eval,
**kwargs,
):
# modified_eval has (n_feature_perturbed * n_outputs) elements
# shape:
# agg mode: (*initial_eval.shape)
# non-agg mode:
# (feature_perturbed * batch_size, *initial_eval.shape[1:])
modified_eval = self._strict_run_forward(
self.forward_func,
current_inputs,
current_target,
current_add_args,
)
if show_progress:
attr_progress.update()
# if perturbations_per_eval > 1, the output shape must grow with
# input and not be aggregated
if perturbations_per_eval > 1 and not self._is_output_shape_valid:
current_batch_size = current_inputs[0].shape[0]
# number of perturbation, which is not the same as
# perturbations_per_eval when not enough features to perturb
n_perturb = current_batch_size / num_examples
current_output_shape = modified_eval.shape
# use initial_eval as the forward of perturbations_per_eval = 1
initial_output_shape = initial_eval.shape
assert (
# check if the output is not a scalar
current_output_shape
and initial_output_shape
# check if the output grow in same ratio, i.e., not agg
and current_output_shape[0]
== n_perturb * initial_output_shape[0]
), (
"When perturbations_per_eval > 1, forward_func's output "
"should be a tensor whose 1st dim grow with the input "
f"batch size: when input batch size is {num_examples}, "
f"the output shape is {initial_output_shape}; "
f"when input batch size is {current_batch_size}, "
f"the output shape is {current_output_shape}"
)
self._is_output_shape_valid = True
# reshape the leading dim for n_feature_perturbed
# flatten each feature's eval outputs into 1D of (n_outputs)
modified_eval = modified_eval.reshape(-1, n_outputs)
# eval_diff in shape (n_feature_perturbed, n_outputs)
eval_diff = flattened_initial_eval - modified_eval
# append the shape of one input example
# to make it broadcastable to mask
eval_diff = eval_diff.reshape(
eval_diff.shape + (inputs[i].dim() - 1) * (1,)
)
eval_diff = eval_diff.to(total_attrib[i].device)
if self.use_weights:
weights[i] += current_mask.float().sum(dim=0)
total_attrib[i] += (eval_diff * current_mask.to(attrib_type)).sum(
dim=0
)
if show_progress:
attr_progress.close()
# Divide total attributions by counts and return formatted attributions
if self.use_weights:
attrib = tuple(
single_attrib.float() / weight
for single_attrib, weight in zip(total_attrib, weights)
)
else:
attrib = tuple(total_attrib)
_result = _format_output(is_inputs_tuple, attrib)
return _result
def _ith_input_ablation_generator(
self,
i,
inputs,
additional_args,
target,
baselines,
input_mask,
perturbations_per_eval,
**kwargs,
):
"""
This method returns a generator of ablation perturbations of the i-th input
Returns:
ablation_iter (Generator): yields each perturbation to be evaluated
as a tuple (inputs, additional_forward_args, targets, mask).
"""
extra_args = {}
for key, value in kwargs.items():
# For any tuple argument in kwargs, we choose index i of the tuple.
if isinstance(value, tuple):
extra_args[key] = value[i]
else:
extra_args[key] = value
input_mask = input_mask[i] if input_mask is not None else None
min_feature, num_features, input_mask = self._get_feature_range_and_mask(
inputs[i], input_mask, **extra_args
)
num_examples = inputs[0].shape[0]
perturbations_per_eval = min(perturbations_per_eval, num_features)
baseline = baselines[i] if isinstance(baselines, tuple) else baselines
if isinstance(baseline, torch.Tensor):
baseline = baseline.reshape((1,) + baseline.shape)
if perturbations_per_eval > 1:
# Repeat features and additional args for batch size.
all_features_repeated = [
torch.cat([inputs[j]] * perturbations_per_eval, dim=0)
for j in range(len(inputs))
]
additional_args_repeated = (
_expand_additional_forward_args(additional_args, perturbations_per_eval)
if additional_args is not None
else None
)
target_repeated = _expand_target(target, perturbations_per_eval)
else:
all_features_repeated = list(inputs)
additional_args_repeated = additional_args
target_repeated = target
num_features_processed = min_feature
while num_features_processed < num_features:
current_num_ablated_features = min(
perturbations_per_eval, num_features - num_features_processed
)
# Store appropriate inputs and additional args based on batch size.
if current_num_ablated_features != perturbations_per_eval:
current_features = [
feature_repeated[0 : current_num_ablated_features * num_examples]
for feature_repeated in all_features_repeated
]
current_additional_args = (
_expand_additional_forward_args(
additional_args, current_num_ablated_features
)
if additional_args is not None
else None
)
current_target = _expand_target(target, current_num_ablated_features)
else:
current_features = all_features_repeated
current_additional_args = additional_args_repeated
current_target = target_repeated
# Store existing tensor before modifying
original_tensor = current_features[i]
# Construct ablated batch for features in range num_features_processed
# to num_features_processed + current_num_ablated_features and return
# mask with same size as ablated batch. ablated_features has dimension
# (current_num_ablated_features, num_examples, inputs[i].shape[1:])
# Note that in the case of sparse tensors, the second dimension
# may not necessarilly be num_examples and will match the first
# dimension of this tensor.
current_reshaped = current_features[i].reshape(
(current_num_ablated_features, -1) + current_features[i].shape[1:]
)
ablated_features, current_mask = self._construct_ablated_input(
current_reshaped,
input_mask,
baseline,
num_features_processed,
num_features_processed + current_num_ablated_features,
**extra_args,
)
# current_features[i] has dimension
# (current_num_ablated_features * num_examples, inputs[i].shape[1:]),
# which can be provided to the model as input.
current_features[i] = ablated_features.reshape(
(-1,) + ablated_features.shape[2:]
)
yield tuple(
current_features
), current_additional_args, current_target, current_mask
# Replace existing tensor at index i.
current_features[i] = original_tensor
num_features_processed += current_num_ablated_features
def _construct_ablated_input(
self, expanded_input, input_mask, baseline, start_feature, end_feature, **kwargs
):
r"""
Ablates given expanded_input tensor with given feature mask, feature range,
and baselines. expanded_input shape is (`num_features`, `num_examples`, ...)
with remaining dimensions corresponding to remaining original tensor
dimensions and `num_features` = `end_feature` - `start_feature`.
input_mask has same number of dimensions as original input tensor (one less
than `expanded_input`), and can have first dimension either 1, applying same
feature mask to all examples, or `num_examples`. baseline is expected to
be broadcastable to match `expanded_input`.
This method returns the ablated input tensor, which has the same
dimensionality as `expanded_input` as well as the corresponding mask with
either the same dimensionality as `expanded_input` or second dimension
being 1. This mask contains 1s in locations which have been ablated (and
thus counted towards ablations for that feature) and 0s otherwise.
"""
current_mask = torch.stack(
[input_mask == j for j in range(start_feature, end_feature)], dim=0
).long()
ablated_tensor = (
expanded_input * (1 - current_mask).to(expanded_input.dtype)
) + (baseline * current_mask.to(expanded_input.dtype))
return ablated_tensor, current_mask
def _get_feature_range_and_mask(self, input, input_mask, **kwargs):
if input_mask is None:
# Obtain feature mask for selected input tensor, matches size of
# 1 input example, (1 x inputs[i].shape[1:])
input_mask = torch.reshape(
torch.arange(torch.numel(input[0]), device=input.device),
input[0:1].shape,
).long()
return (
torch.min(input_mask).item(),
torch.max(input_mask).item() + 1,
input_mask,
)
def _get_feature_counts(self, inputs, feature_mask, **kwargs):
"""return the numbers of input features"""
if not feature_mask:
return tuple(inp[0].numel() if inp.numel() else 0 for inp in inputs)
return tuple(
(mask.max() - mask.min()).item() + 1
if mask is not None
else (inp[0].numel() if inp.numel() else 0)
for inp, mask in zip(inputs, feature_mask)
)
def _strict_run_forward(self, *args, **kwargs) -> Tensor:
"""
A temp wrapper for global _run_forward util to force forward output
type assertion & conversion.
Remove after the strict logic is supported by all attr classes
"""
forward_output = _run_forward(*args, **kwargs)
if isinstance(forward_output, Tensor):
return forward_output
output_type = type(forward_output)
assert output_type is int or output_type is float, (
"the return of forward_func must be a tensor, int, or float,"
f" received: {forward_output}"
)
# using python built-in type as torch dtype
# int -> torch.int64, float -> torch.float64
# ref: https://github.com/pytorch/pytorch/pull/21215
return torch.tensor(forward_output, dtype=output_type)
|
#!/usr/bin/env python3
from typing import Any, Callable, Tuple, Union
import torch
from captum._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.feature_ablation import FeatureAblation
from captum.log import log_usage
from torch import Tensor
def _permute_feature(x: Tensor, feature_mask: Tensor) -> Tensor:
n = x.size(0)
assert n > 1, "cannot permute features with batch_size = 1"
perm = torch.randperm(n)
no_perm = torch.arange(n)
while (perm == no_perm).all():
perm = torch.randperm(n)
return (x[perm] * feature_mask.to(dtype=x.dtype)) + (
x * feature_mask.bitwise_not().to(dtype=x.dtype)
)
class FeaturePermutation(FeatureAblation):
r"""
A perturbation based approach to compute attribution, which
takes each input feature, permutes the feature values within a batch,
and computes the difference between original and shuffled outputs for
the given batch. This difference signifies the feature importance
for the permuted feature.
Example pseudocode for the algorithm is as follows::
perm_feature_importance(batch):
importance = dict()
baseline_error = error_metric(model(batch), batch_labels)
for each feature:
permute this feature across the batch
error = error_metric(model(permuted_batch), batch_labels)
importance[feature] = baseline_error - error
"un-permute" the feature across the batch
return importance
It should be noted that the `error_metric` must be called in the
`forward_func`. You do not need to have an error metric, e.g. you
could simply return the logits (the model output), but this may or may
not provide a meaningful attribution.
This method, unlike other attribution methods, requires a batch
of examples to compute attributions and cannot be performed on a single example.
By default, each scalar value within
each input tensor is taken as a feature and shuffled independently. Passing
a feature mask, allows grouping features to be shuffled together.
Each input scalar in the group will be given the same attribution value
equal to the change in target as a result of shuffling the entire feature
group.
The forward function can either return a scalar per example, or a single
scalar for the full batch. If a single scalar is returned for the batch,
`perturbations_per_eval` must be 1, and the returned attributions will have
first dimension 1, corresponding to feature importance across all
examples in the batch.
More information can be found in the permutation feature
importance algorithm description here:
https://christophm.github.io/interpretable-ml-book/feature-importance.html
"""
def __init__(
self, forward_func: Callable, perm_func: Callable = _permute_feature
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or
any modification of it.
perm_func (Callable, optional): A function that accepts a batch of
inputs and a feature mask, and "permutes" the feature using
feature mask across the batch. This defaults to a function
which applies a random permutation, this argument only needs
to be provided if a custom permutation behavior is desired.
Default: `_permute_feature`
"""
FeatureAblation.__init__(self, forward_func=forward_func)
self.perm_func = perm_func
# suppressing error caused by the child class not having a matching
# signature to the parent
@log_usage()
def attribute( # type: ignore
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
perturbations_per_eval: int = 1,
show_progress: bool = False,
**kwargs: Any,
) -> TensorOrTupleOfTensorsGeneric:
r"""
This function is almost equivalent to
:func:`FeatureAblation.attribute <captum.attr.FeatureAblation.attribute>`. The
main difference is the way ablated examples are generated. Specifically they
are generated through the ``perm_func``, as we set the baselines for
:func:`FeatureAblation.attribute <captum.attr.FeatureAblation.attribute>` to
``None``.
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which
permutation attributions are computed. If
forward_func takes a single tensor as input, a
single input tensor should be provided. If
forward_func takes multiple tensors as input, a
tuple of the input tensors should be provided. It is
assumed that for all given input tensors, dimension
0 corresponds to the number of examples (aka batch
size), and if multiple input tensors are provided,
the examples must be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which difference is computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. For all other types,
the given argument is used for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
feature_mask (Tensor or tuple[Tensor, ...], optional):
feature_mask defines a mask for the input, grouping
features which should be ablated together. feature_mask
should contain the same number of tensors as inputs.
Each tensor should be the same size as the
corresponding input or broadcastable to match the
input tensor. Each tensor should contain integers in
the range 0 to num_features - 1, and indices
corresponding to the same feature should have the
same value. Note that features within each input
tensor are ablated independently (not across
tensors).
The first dimension of each mask must be 1, as we require
to have the same group of features for each input sample.
If None, then a feature mask is constructed which assigns
each scalar within a tensor as a separate feature, which
is permuted independently.
Default: None
perturbations_per_eval (int, optional): Allows permutations
of multiple features to be processed simultaneously
in one call to forward_fn. Each forward pass will
contain a maximum of perturbations_per_eval * #examples
samples. For DataParallel models, each batch is
split among the available devices, so evaluations on
each available device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
If the forward function returns a single scalar per batch,
perturbations_per_eval must be set to 1.
Default: 1
show_progress (bool, optional): Displays the progress of computation.
It will try to use tqdm if available for advanced features
(e.g. time estimation). Otherwise, it will fallback to
a simple output of progress.
Default: False
**kwargs (Any, optional): Any additional arguments used by child
classes of :class:`.FeatureAblation` (such as
:class:`.Occlusion`) to construct ablations. These
arguments are ignored when using FeatureAblation directly.
Default: None
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The attributions with respect to each input feature.
If the forward function returns
a scalar value per example, attributions will be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If the forward function returns a scalar per batch, then
attribution tensor(s) will have first dimension 1 and
the remaining dimensions will match the input.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple of tensors is provided for inputs,
a tuple of corresponding sized tensors is returned.
Examples::
>>> # SimpleClassifier takes a single input tensor of size Nx4x4,
>>> # and returns an Nx3 tensor of class probabilities.
>>> net = SimpleClassifier()
>>> # Generating random input with size 10 x 4 x 4
>>> input = torch.randn(10, 4, 4)
>>> # Defining FeaturePermutation interpreter
>>> feature_perm = FeaturePermutation(net)
>>> # Computes permutation attribution, shuffling each of the 16
>>> # scalar input independently.
>>> attr = feature_perm.attribute(input, target=1)
>>> # Alternatively, we may want to permute features in groups, e.g.
>>> # grouping each 2x2 square of the inputs and shuffling them together.
>>> # This can be done by creating a feature mask as follows, which
>>> # defines the feature groups, e.g.:
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # With this mask, all inputs with the same value are shuffled
>>> # simultaneously, and the attribution for each input in the same
>>> # group (0, 1, 2, and 3) per example are the same.
>>> # The attributions can be calculated as follows:
>>> # feature mask has dimensions 1 x 4 x 4
>>> feature_mask = torch.tensor([[[0,0,1,1],[0,0,1,1],
>>> [2,2,3,3],[2,2,3,3]]])
>>> attr = feature_perm.attribute(input, target=1,
>>> feature_mask=feature_mask)
"""
return FeatureAblation.attribute.__wrapped__(
self,
inputs,
baselines=None,
target=target,
additional_forward_args=additional_forward_args,
feature_mask=feature_mask,
perturbations_per_eval=perturbations_per_eval,
show_progress=show_progress,
**kwargs,
)
def _construct_ablated_input(
self,
expanded_input: Tensor,
input_mask: Tensor,
baseline: Union[int, float, Tensor],
start_feature: int,
end_feature: int,
**kwargs: Any,
) -> Tuple[Tensor, Tensor]:
r"""
This function permutes the features of `expanded_input` with a given
feature mask and feature range. Permutation occurs via calling
`self.perm_func` across each batch within `expanded_input`. As with
`FeatureAblation._construct_ablated_input`:
- `expanded_input.shape = (num_features, num_examples, ...)`
- `num_features = end_feature - start_feature` (i.e. start and end is a
half-closed interval)
- `input_mask` is a tensor of the same shape as one input, which
describes the locations of each feature via their "index"
Since `baselines` is set to None for `FeatureAblation.attribute, this
will be the zero tensor, however, it is not used.
"""
assert input_mask.shape[0] == 1, (
"input_mask.shape[0] != 1: pass in one mask in order to permute"
"the same features for each input"
)
current_mask = torch.stack(
[input_mask == j for j in range(start_feature, end_feature)], dim=0
).bool()
output = torch.stack(
[
self.perm_func(x, mask.squeeze(0))
for x, mask in zip(expanded_input, current_mask)
]
)
return output, current_mask
|
#!/usr/bin/env python3
from typing import Any, Callable, Tuple, Union
import numpy as np
import torch
from captum._utils.common import _format_tensor_into_tuples
from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.feature_ablation import FeatureAblation
from captum.attr._utils.common import (
_format_and_verify_sliding_window_shapes,
_format_and_verify_strides,
)
from captum.log import log_usage
from torch import Tensor
class Occlusion(FeatureAblation):
r"""
A perturbation based approach to compute attribution, involving
replacing each contiguous rectangular region with a given baseline /
reference, and computing the difference in output. For features located
in multiple regions (hyperrectangles), the corresponding output differences
are averaged to compute the attribution for that feature.
The first patch is applied with the corner aligned with all indices 0,
and strides are applied until the entire dimension range is covered. Note
that this may cause the final patch applied in a direction to be cut-off
and thus smaller than the target occlusion shape.
More details regarding the occlusion (or grey-box / sliding window)
method can be found in the original paper and in the DeepExplain
implementation.
https://arxiv.org/abs/1311.2901
https://github.com/marcoancona/DeepExplain/blob/master/deepexplain\
/tensorflow/methods.py#L401
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or
any modification of it.
"""
FeatureAblation.__init__(self, forward_func)
self.use_weights = True
@log_usage()
def attribute( # type: ignore
self,
inputs: TensorOrTupleOfTensorsGeneric,
sliding_window_shapes: Union[Tuple[int, ...], Tuple[Tuple[int, ...], ...]],
strides: Union[
None, int, Tuple[int, ...], Tuple[Union[int, Tuple[int, ...]], ...]
] = None,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
perturbations_per_eval: int = 1,
show_progress: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which occlusion
attributions are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
sliding_window_shapes (tuple or tuple[tuple]): Shape of patch
(hyperrectangle) to occlude each input. For a single
input tensor, this must be a tuple of length equal to the
number of dimensions of the input tensor - 1, defining
the dimensions of the patch. If the input tensor is 1-d,
this should be an empty tuple. For multiple input tensors,
this must be a tuple containing one tuple for each input
tensor defining the dimensions of the patch for that
input tensor, as described for the single tensor case.
strides (int, tuple, tuple[int], or tuple[tuple], optional):
This defines the step by which the occlusion hyperrectangle
should be shifted by in each direction for each iteration.
For a single tensor input, this can be either a single
integer, which is used as the step size in each direction,
or a tuple of integers matching the number of dimensions
in the occlusion shape, defining the step size in the
corresponding dimension. For multiple tensor inputs, this
can be either a tuple of integers, one for each input
tensor (used for all dimensions of the corresponding
tensor), or a tuple of tuples, providing the stride per
dimension for each tensor.
To ensure that all inputs are covered by at least one
sliding window, the stride for any dimension must be
<= the corresponding sliding window dimension if the
sliding window dimension is less than the input
dimension.
If None is provided, a stride of 1 is used for each
dimension of each input tensor.
Default: None
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define reference value which replaces each
feature when occluded.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or
broadcastable to match the dimensions of inputs
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which difference is computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. For all other types,
the given argument is used for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
perturbations_per_eval (int, optional): Allows multiple occlusions
to be included in one batch (one call to forward_fn).
By default, perturbations_per_eval is 1, so each occlusion
is processed individually.
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
Default: 1
show_progress (bool, optional): Displays the progress of computation.
It will try to use tqdm if available for advanced features
(e.g. time estimation). Otherwise, it will fallback to
a simple output of progress.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The attributions with respect to each input feature.
Attributions will always be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # SimpleClassifier takes a single input tensor of size Nx4x4,
>>> # and returns an Nx3 tensor of class probabilities.
>>> net = SimpleClassifier()
>>> # Generating random input with size 2 x 4 x 4
>>> input = torch.randn(2, 4, 4)
>>> # Defining Occlusion interpreter
>>> ablator = Occlusion(net)
>>> # Computes occlusion attribution, ablating each 3x3 patch,
>>> # shifting in each direction by the default of 1.
>>> attr = ablator.attribute(input, target=1, sliding_window_shapes=(3,3))
"""
formatted_inputs = _format_tensor_into_tuples(inputs)
# Formatting strides
strides = _format_and_verify_strides(strides, formatted_inputs)
# Formatting sliding window shapes
sliding_window_shapes = _format_and_verify_sliding_window_shapes(
sliding_window_shapes, formatted_inputs
)
# Construct tensors from sliding window shapes
sliding_window_tensors = tuple(
torch.ones(window_shape, device=formatted_inputs[i].device)
for i, window_shape in enumerate(sliding_window_shapes)
)
# Construct counts, defining number of steps to make of occlusion block in
# each dimension.
shift_counts = []
for i, inp in enumerate(formatted_inputs):
current_shape = np.subtract(inp.shape[1:], sliding_window_shapes[i])
# Verify sliding window doesn't exceed input dimensions.
assert (np.array(current_shape) >= 0).all(), (
"Sliding window dimensions {} cannot exceed input dimensions" "{}."
).format(sliding_window_shapes[i], tuple(inp.shape[1:]))
# Stride cannot be larger than sliding window for any dimension where
# the sliding window doesn't cover the entire input.
assert np.logical_or(
np.array(current_shape) == 0,
np.array(strides[i]) <= sliding_window_shapes[i],
).all(), (
"Stride dimension {} cannot be larger than sliding window "
"shape dimension {}."
).format(
strides[i], sliding_window_shapes[i]
)
shift_counts.append(
tuple(
np.add(np.ceil(np.divide(current_shape, strides[i])).astype(int), 1)
)
)
# Use ablation attribute method
return super().attribute.__wrapped__(
self,
inputs,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
perturbations_per_eval=perturbations_per_eval,
sliding_window_tensors=sliding_window_tensors,
shift_counts=tuple(shift_counts),
strides=strides,
show_progress=show_progress,
)
def _construct_ablated_input(
self,
expanded_input: Tensor,
input_mask: Union[None, Tensor],
baseline: Union[Tensor, int, float],
start_feature: int,
end_feature: int,
**kwargs: Any,
) -> Tuple[Tensor, Tensor]:
r"""
Ablates given expanded_input tensor with given feature mask, feature range,
and baselines, and any additional arguments.
expanded_input shape is (num_features, num_examples, ...)
with remaining dimensions corresponding to remaining original tensor
dimensions and num_features = end_feature - start_feature.
input_mask is None for occlusion, and the mask is constructed
using sliding_window_tensors, strides, and shift counts, which are provided in
kwargs. baseline is expected to
be broadcastable to match expanded_input.
This method returns the ablated input tensor, which has the same
dimensionality as expanded_input as well as the corresponding mask with
either the same dimensionality as expanded_input or second dimension
being 1. This mask contains 1s in locations which have been ablated (and
thus counted towards ablations for that feature) and 0s otherwise.
"""
input_mask = torch.stack(
[
self._occlusion_mask(
expanded_input,
j,
kwargs["sliding_window_tensors"],
kwargs["strides"],
kwargs["shift_counts"],
)
for j in range(start_feature, end_feature)
],
dim=0,
).long()
ablated_tensor = (
expanded_input
* (
torch.ones(1, dtype=torch.long, device=expanded_input.device)
- input_mask
).to(expanded_input.dtype)
) + (baseline * input_mask.to(expanded_input.dtype))
return ablated_tensor, input_mask
def _occlusion_mask(
self,
expanded_input: Tensor,
ablated_feature_num: int,
sliding_window_tsr: Tensor,
strides: Union[int, Tuple[int, ...]],
shift_counts: Tuple[int, ...],
) -> Tensor:
"""
This constructs the current occlusion mask, which is the appropriate
shift of the sliding window tensor based on the ablated feature number.
The feature number ranges between 0 and the product of the shift counts
(# of times the sliding window should be shifted in each dimension).
First, the ablated feature number is converted to the number of steps in
each dimension from the origin, based on shift counts. This procedure
is similar to a base conversion, with the position values equal to shift
counts. The feature number is first taken modulo shift_counts[0] to
get the number of shifts in the first dimension (each shift
by shift_count[0]), and then divided by shift_count[0].
The procedure is then continued for each element of shift_count. This
computes the total shift in each direction for the sliding window.
We then need to compute the padding required after the window in each
dimension, which is equal to the total input dimension minus the sliding
window dimension minus the (left) shift amount. We construct the
array pad_values which contains the left and right pad values for each
dimension, in reverse order of dimensions, starting from the last one.
Once these padding values are computed, we pad the sliding window tensor
of 1s with 0s appropriately, which is the corresponding mask,
and the result will match the input shape.
"""
remaining_total = ablated_feature_num
current_index = []
for i, shift_count in enumerate(shift_counts):
stride = strides[i] if isinstance(strides, tuple) else strides
current_index.append((remaining_total % shift_count) * stride)
remaining_total = remaining_total // shift_count
remaining_padding = np.subtract(
expanded_input.shape[2:], np.add(current_index, sliding_window_tsr.shape)
)
pad_values = [
val for pair in zip(remaining_padding, current_index) for val in pair
]
pad_values.reverse()
padded_tensor = torch.nn.functional.pad(
sliding_window_tsr, tuple(pad_values) # type: ignore
)
return padded_tensor.reshape((1,) + padded_tensor.shape)
def _get_feature_range_and_mask(
self, input: Tensor, input_mask: Tensor, **kwargs: Any
) -> Tuple[int, int, None]:
feature_max = np.prod(kwargs["shift_counts"])
return 0, feature_max, None
def _get_feature_counts(self, inputs, feature_mask, **kwargs):
"""return the numbers of possible input features"""
return tuple(np.prod(counts).astype(int) for counts in kwargs["shift_counts"])
|
#!/usr/bin/env python3
import typing
from typing import Any, Callable, Tuple, Union
import numpy as np
import torch
from captum._utils.common import _is_tuple
from captum._utils.typing import (
BaselineType,
Literal,
TargetType,
Tensor,
TensorOrTupleOfTensorsGeneric,
)
from captum.attr._core.noise_tunnel import NoiseTunnel
from captum.attr._utils.attribution import GradientAttribution
from captum.attr._utils.common import (
_compute_conv_delta_and_format_attrs,
_format_callable_baseline,
_format_input_baseline,
)
from captum.log import log_usage
class GradientShap(GradientAttribution):
r"""
Implements gradient SHAP based on the implementation from SHAP's primary
author. For reference, please view the original
`implementation
<https://github.com/slundberg/shap#deep-learning-example-with-gradientexplainer-tensorflowkeraspytorch-models>`_
and the paper: `A Unified Approach to Interpreting Model Predictions
<https://papers.nips.cc/paper/7062-a-unified-approach-to-interpreting-model-predictions>`_
GradientShap approximates SHAP values by computing the expectations of
gradients by randomly sampling from the distribution of baselines/references.
It adds white noise to each input sample `n_samples` times, selects a
random baseline from baselines' distribution and a random point along the
path between the baseline and the input, and computes the gradient of outputs
with respect to those selected random points. The final SHAP values represent
the expected values of gradients * (inputs - baselines).
GradientShap makes an assumption that the input features are independent
and that the explanation model is linear, meaning that the explanations
are modeled through the additive composition of feature effects.
Under those assumptions, SHAP value can be approximated as the expectation
of gradients that are computed for randomly generated `n_samples` input
samples after adding gaussian noise `n_samples` times to each input for
different baselines/references.
In some sense it can be viewed as an approximation of integrated gradients
by computing the expectations of gradients for different baselines.
Current implementation uses Smoothgrad from :class:`.NoiseTunnel` in order to
randomly draw samples from the distribution of baselines, add noise to input
samples and compute the expectation (smoothgrad).
"""
def __init__(self, forward_func: Callable, multiply_by_inputs: bool = True) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or
any modification of it.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in
then this type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of gradient shap, if `multiply_by_inputs`
is set to True, the sensitivity scores of scaled inputs
are being multiplied by (inputs - baselines).
"""
GradientAttribution.__init__(self, forward_func)
self._multiply_by_inputs = multiply_by_inputs
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: Union[
TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric]
],
n_samples: int = 5,
stdevs: Union[float, Tuple[float, ...]] = 0.0,
target: TargetType = None,
additional_forward_args: Any = None,
*,
return_convergence_delta: Literal[True],
) -> Tuple[TensorOrTupleOfTensorsGeneric, Tensor]:
...
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: Union[
TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric]
],
n_samples: int = 5,
stdevs: Union[float, Tuple[float, ...]] = 0.0,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: Literal[False] = False,
) -> TensorOrTupleOfTensorsGeneric:
...
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: Union[
TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric]
],
n_samples: int = 5,
stdevs: Union[float, Tuple[float, ...]] = 0.0,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: bool = False,
) -> Union[
TensorOrTupleOfTensorsGeneric, Tuple[TensorOrTupleOfTensorsGeneric, Tensor]
]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which SHAP attribution
values are computed. If `forward_func` takes a single
tensor as input, a single input tensor should be provided.
If `forward_func` takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
baselines (Tensor, tuple[Tensor, ...], or Callable):
Baselines define the starting point from which expectation
is computed and can be provided as:
- a single tensor, if inputs is a single tensor, with
the first dimension equal to the number of examples
in the baselines' distribution. The remaining dimensions
must match with input tensor's dimension starting from
the second dimension.
- a tuple of tensors, if inputs is a tuple of tensors,
with the first dimension of any tensor inside the tuple
equal to the number of examples in the baseline's
distribution. The remaining dimensions must match
the dimensions of the corresponding input tensor
starting from the second dimension.
- callable function, optionally takes `inputs` as an
argument and either returns a single tensor
or a tuple of those.
It is recommended that the number of samples in the baselines'
tensors is larger than one.
n_samples (int, optional): The number of randomly generated examples
per sample in the input batch. Random examples are
generated by adding gaussian random noise to each sample.
Default: `5` if `n_samples` is not provided.
stdevs (float or tuple of float, optional): The standard deviation
of gaussian noise with zero mean that is added to each
input in the batch. If `stdevs` is a single float value
then that same value is used for all inputs. If it is
a tuple, then it must have the same length as the inputs
tuple. In this case, each stdev value in the stdevs tuple
corresponds to the input with the same index in the inputs
tuple.
Default: 0.0
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It can contain a tuple of ND tensors or
any arbitrary python type of any shape.
In case of the ND tensor the first dimension of the
tensor must correspond to the batch size. It will be
repeated for each `n_steps` for each randomly generated
input sample.
Note that the gradients are not computed with respect
to these arguments.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attribution score computed based on GradientSHAP with respect
to each input feature. Attributions will always be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
- **delta** (*Tensor*, returned if return_convergence_delta=True):
This is computed using the property that the total
sum of forward_func(inputs) - forward_func(baselines)
must be very close to the total sum of the attributions
based on GradientSHAP.
Delta is calculated for each example in the input after adding
`n_samples` times gaussian noise to each of them. Therefore,
the dimensionality of the deltas tensor is equal to the
`number of examples in the input` * `n_samples`
The deltas are ordered by each input example and `n_samples`
noisy samples generated for it.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> gradient_shap = GradientShap(net)
>>> input = torch.randn(3, 3, 32, 32, requires_grad=True)
>>> # choosing baselines randomly
>>> baselines = torch.randn(20, 3, 32, 32)
>>> # Computes gradient shap for the input
>>> # Attribution size matches input size: 3x3x32x32
>>> attribution = gradient_shap.attribute(input, baselines,
target=5)
"""
# since `baselines` is a distribution, we can generate it using a function
# rather than passing it as an input argument
baselines = _format_callable_baseline(baselines, inputs)
assert isinstance(baselines[0], torch.Tensor), (
"Baselines distribution has to be provided in a form "
"of a torch.Tensor {}.".format(baselines[0])
)
input_min_baseline_x_grad = InputBaselineXGradient(
self.forward_func, self.multiplies_by_inputs
)
input_min_baseline_x_grad.gradient_func = self.gradient_func
nt = NoiseTunnel(input_min_baseline_x_grad)
# NOTE: using attribute.__wrapped__ to not log
attributions = nt.attribute.__wrapped__(
nt, # self
inputs,
nt_type="smoothgrad",
nt_samples=n_samples,
stdevs=stdevs,
draw_baseline_from_distrib=True,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
return_convergence_delta=return_convergence_delta,
)
return attributions
def has_convergence_delta(self) -> bool:
return True
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
class InputBaselineXGradient(GradientAttribution):
def __init__(self, forward_func: Callable, multiply_by_inputs=True) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or
any modification of it.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in
then this type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of gradient shap, if `multiply_by_inputs`
is set to True, the sensitivity scores of scaled inputs
are being multiplied by (inputs - baselines).
"""
GradientAttribution.__init__(self, forward_func)
self._multiply_by_inputs = multiply_by_inputs
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
*,
return_convergence_delta: Literal[True],
) -> Tuple[TensorOrTupleOfTensorsGeneric, Tensor]:
...
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: Literal[False] = False,
) -> TensorOrTupleOfTensorsGeneric:
...
@log_usage()
def attribute( # type: ignore
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: bool = False,
) -> Union[
TensorOrTupleOfTensorsGeneric, Tuple[TensorOrTupleOfTensorsGeneric, Tensor]
]:
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = _is_tuple(inputs)
inputs, baselines = _format_input_baseline(inputs, baselines)
rand_coefficient = torch.tensor(
np.random.uniform(0.0, 1.0, inputs[0].shape[0]),
device=inputs[0].device,
dtype=inputs[0].dtype,
)
input_baseline_scaled = tuple(
_scale_input(input, baseline, rand_coefficient)
for input, baseline in zip(inputs, baselines)
)
grads = self.gradient_func(
self.forward_func, input_baseline_scaled, target, additional_forward_args
)
if self.multiplies_by_inputs:
input_baseline_diffs = tuple(
input - baseline for input, baseline in zip(inputs, baselines)
)
attributions = tuple(
input_baseline_diff * grad
for input_baseline_diff, grad in zip(input_baseline_diffs, grads)
)
else:
attributions = grads
return _compute_conv_delta_and_format_attrs(
self,
return_convergence_delta,
attributions,
baselines,
inputs,
additional_forward_args,
target,
is_inputs_tuple,
)
def has_convergence_delta(self) -> bool:
return True
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
def _scale_input(
input: Tensor, baseline: Union[Tensor, int, float], rand_coefficient: Tensor
) -> Tensor:
# batch size
bsz = input.shape[0]
inp_shape_wo_bsz = input.shape[1:]
inp_shape = (bsz,) + tuple([1] * len(inp_shape_wo_bsz))
# expand and reshape the indices
rand_coefficient = rand_coefficient.view(inp_shape)
input_baseline_scaled = (
rand_coefficient * input + (1.0 - rand_coefficient) * baseline
).requires_grad_()
return input_baseline_scaled
|
#!/usr/bin/env python3
import inspect
import math
import typing
import warnings
from typing import Any, Callable, cast, List, Optional, Tuple, Union
import torch
from captum._utils.common import (
_expand_additional_forward_args,
_expand_target,
_flatten_tensor_or_tuple,
_format_output,
_format_tensor_into_tuples,
_get_max_feature_index,
_is_tuple,
_reduce_list,
_run_forward,
)
from captum._utils.models.linear_model import SkLearnLasso
from captum._utils.models.model import Model
from captum._utils.progress import progress
from captum._utils.typing import (
BaselineType,
Literal,
TargetType,
TensorOrTupleOfTensorsGeneric,
)
from captum.attr._utils.attribution import PerturbationAttribution
from captum.attr._utils.batching import _batch_example_iterator
from captum.attr._utils.common import (
_construct_default_feature_mask,
_format_input_baseline,
)
from captum.log import log_usage
from torch import Tensor
from torch.nn import CosineSimilarity
from torch.utils.data import DataLoader, TensorDataset
class LimeBase(PerturbationAttribution):
r"""
Lime is an interpretability method that trains an interpretable surrogate model
by sampling points around a specified input example and using model evaluations
at these points to train a simpler interpretable 'surrogate' model, such as a
linear model.
LimeBase provides a generic framework to train a surrogate interpretable model.
This differs from most other attribution methods, since the method returns a
representation of the interpretable model (e.g. coefficients of the linear model).
For a similar interface to other perturbation-based attribution methods, please use
the Lime child class, which defines specific transformations for the interpretable
model.
LimeBase allows sampling points in either the interpretable space or the original
input space to train the surrogate model. The interpretable space is a feature
vector used to train the surrogate interpretable model; this feature space is often
of smaller dimensionality than the original feature space in order for the surrogate
model to be more interpretable.
If sampling in the interpretable space, a transformation function must be provided
to define how a vector sampled in the interpretable space can be transformed into
an example in the original input space. If sampling in the original input space, a
transformation function must be provided to define how the input can be transformed
into its interpretable vector representation.
More details regarding LIME can be found in the original paper:
https://arxiv.org/abs/1602.04938
"""
def __init__(
self,
forward_func: Callable,
interpretable_model: Model,
similarity_func: Callable,
perturb_func: Callable,
perturb_interpretable_space: bool,
from_interp_rep_transform: Optional[Callable],
to_interp_rep_transform: Optional[Callable],
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it. If a batch is provided as input for
attribution, it is expected that forward_func returns a scalar
representing the entire batch.
interpretable_model (Model): Model object to train interpretable model.
A Model object provides a `fit` method to train the model,
given a dataloader, with batches containing three tensors:
- interpretable_inputs: Tensor
[2D num_samples x num_interp_features],
- expected_outputs: Tensor [1D num_samples],
- weights: Tensor [1D num_samples]
The model object must also provide a `representation` method to
access the appropriate coefficients or representation of the
interpretable model after fitting.
Some predefined interpretable linear models are provided in
captum._utils.models.linear_model including wrappers around
SkLearn linear models as well as SGD-based PyTorch linear
models.
Note that calling fit multiple times should retrain the
interpretable model, each attribution call reuses
the same given interpretable model object.
similarity_func (Callable): Function which takes a single sample
along with its corresponding interpretable representation
and returns the weight of the interpretable sample for
training interpretable model. Weight is generally
determined based on similarity to the original input.
The original paper refers to this as a similarity kernel.
The expected signature of this callable is:
>>> similarity_func(
>>> original_input: Tensor or tuple[Tensor, ...],
>>> perturbed_input: Tensor or tuple[Tensor, ...],
>>> perturbed_interpretable_input:
>>> Tensor [2D 1 x num_interp_features],
>>> **kwargs: Any
>>> ) -> float or Tensor containing float scalar
perturbed_input and original_input will be the same type and
contain tensors of the same shape (regardless of whether or not
the sampling function returns inputs in the interpretable
space). original_input is the same as the input provided
when calling attribute.
All kwargs passed to the attribute method are
provided as keyword arguments (kwargs) to this callable.
perturb_func (Callable): Function which returns a single
sampled input, generally a perturbation of the original
input, which is used to train the interpretable surrogate
model. Function can return samples in either
the original input space (matching type and tensor shapes
of original input) or in the interpretable input space,
which is a vector containing the intepretable features.
Alternatively, this function can return a generator
yielding samples to train the interpretable surrogate
model, and n_samples perturbations will be sampled
from this generator.
The expected signature of this callable is:
>>> perturb_func(
>>> original_input: Tensor or tuple[Tensor, ...],
>>> **kwargs: Any
>>> ) -> Tensor, tuple[Tensor, ...], or
>>> generator yielding tensor or tuple[Tensor, ...]
All kwargs passed to the attribute method are
provided as keyword arguments (kwargs) to this callable.
Returned sampled input should match the input type (Tensor
or Tuple of Tensor and corresponding shapes) if
perturb_interpretable_space = False. If
perturb_interpretable_space = True, the return type should
be a single tensor of shape 1 x num_interp_features,
corresponding to the representation of the
sample to train the interpretable model.
All kwargs passed to the attribute method are
provided as keyword arguments (kwargs) to this callable.
perturb_interpretable_space (bool): Indicates whether
perturb_func returns a sample in the interpretable space
(tensor of shape 1 x num_interp_features) or a sample
in the original space, matching the format of the original
input. Once sampled, inputs can be converted to / from
the interpretable representation with either
to_interp_rep_transform or from_interp_rep_transform.
from_interp_rep_transform (Callable): Function which takes a
single sampled interpretable representation (tensor
of shape 1 x num_interp_features) and returns
the corresponding representation in the input space
(matching shapes of original input to attribute).
This argument is necessary if perturb_interpretable_space
is True, otherwise None can be provided for this argument.
The expected signature of this callable is:
>>> from_interp_rep_transform(
>>> curr_sample: Tensor [2D 1 x num_interp_features]
>>> original_input: Tensor or Tuple of Tensors,
>>> **kwargs: Any
>>> ) -> Tensor or tuple[Tensor, ...]
Returned sampled input should match the type of original_input
and corresponding tensor shapes.
All kwargs passed to the attribute method are
provided as keyword arguments (kwargs) to this callable.
to_interp_rep_transform (Callable): Function which takes a
sample in the original input space and converts to
its interpretable representation (tensor
of shape 1 x num_interp_features).
This argument is necessary if perturb_interpretable_space
is False, otherwise None can be provided for this argument.
The expected signature of this callable is:
>>> to_interp_rep_transform(
>>> curr_sample: Tensor or Tuple of Tensors,
>>> original_input: Tensor or Tuple of Tensors,
>>> **kwargs: Any
>>> ) -> Tensor [2D 1 x num_interp_features]
curr_sample will match the type of original_input
and corresponding tensor shapes.
All kwargs passed to the attribute method are
provided as keyword arguments (kwargs) to this callable.
"""
PerturbationAttribution.__init__(self, forward_func)
self.interpretable_model = interpretable_model
self.similarity_func = similarity_func
self.perturb_func = perturb_func
self.perturb_interpretable_space = perturb_interpretable_space
self.from_interp_rep_transform = from_interp_rep_transform
self.to_interp_rep_transform = to_interp_rep_transform
if self.perturb_interpretable_space:
assert (
self.from_interp_rep_transform is not None
), "Must provide transform from interpretable space to original input space"
" when sampling from interpretable space."
else:
assert (
self.to_interp_rep_transform is not None
), "Must provide transform from original input space to interpretable space"
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
n_samples: int = 50,
perturbations_per_eval: int = 1,
show_progress: bool = False,
**kwargs,
) -> Tensor:
r"""
This method attributes the output of the model with given target index
(in case it is provided, otherwise it assumes that output is a
scalar) to the inputs of the model using the approach described above.
It trains an interpretable model and returns a representation of the
interpretable model.
It is recommended to only provide a single example as input (tensors
with first dimension or batch size = 1). This is because LIME is generally
used for sample-based interpretability, training a separate interpretable
model to explain a model's prediction on each individual example.
A batch of inputs can be provided as inputs only if forward_func
returns a single value per batch (e.g. loss).
The interpretable feature representation should still have shape
1 x num_interp_features, corresponding to the interpretable
representation for the full batch, and perturbations_per_eval
must be set to 1.
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which LIME
is computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which surrogate model is trained
(for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. For all other types,
the given argument is used for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
n_samples (int, optional): The number of samples of the original
model used to train the surrogate interpretable model.
Default: `50` if `n_samples` is not provided.
perturbations_per_eval (int, optional): Allows multiple samples
to be processed simultaneously in one call to forward_fn.
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
If the forward function returns a single scalar per batch,
perturbations_per_eval must be set to 1.
Default: 1
show_progress (bool, optional): Displays the progress of computation.
It will try to use tqdm if available for advanced features
(e.g. time estimation). Otherwise, it will fallback to
a simple output of progress.
Default: False
**kwargs (Any, optional): Any additional arguments necessary for
sampling and transformation functions (provided to
constructor).
Default: None
Returns:
**interpretable model representation**:
- **interpretable model representation** (*Any*):
A representation of the interpretable model trained. The return
type matches the return type of train_interpretable_model_func.
For example, this could contain coefficients of a
linear surrogate model.
Examples::
>>> # SimpleClassifier takes a single input tensor of
>>> # float features with size N x 5,
>>> # and returns an Nx3 tensor of class probabilities.
>>> net = SimpleClassifier()
>>>
>>> # We will train an interpretable model with the same
>>> # features by simply sampling with added Gaussian noise
>>> # to the inputs and training a model to predict the
>>> # score of the target class.
>>>
>>> # For interpretable model training, we will use sklearn
>>> # linear model in this example. We have provided wrappers
>>> # around sklearn linear models to fit the Model interface.
>>> # Any arguments provided to the sklearn constructor can also
>>> # be provided to the wrapper, e.g.:
>>> # SkLearnLinearModel("linear_model.Ridge", alpha=2.0)
>>> from captum._utils.models.linear_model import SkLearnLinearModel
>>>
>>>
>>> # Define similarity kernel (exponential kernel based on L2 norm)
>>> def similarity_kernel(
>>> original_input: Tensor,
>>> perturbed_input: Tensor,
>>> perturbed_interpretable_input: Tensor,
>>> **kwargs)->Tensor:
>>> # kernel_width will be provided to attribute as a kwarg
>>> kernel_width = kwargs["kernel_width"]
>>> l2_dist = torch.norm(original_input - perturbed_input)
>>> return torch.exp(- (l2_dist**2) / (kernel_width**2))
>>>
>>>
>>> # Define sampling function
>>> # This function samples in original input space
>>> def perturb_func(
>>> original_input: Tensor,
>>> **kwargs)->Tensor:
>>> return original_input + torch.randn_like(original_input)
>>>
>>> # For this example, we are setting the interpretable input to
>>> # match the model input, so the to_interp_rep_transform
>>> # function simply returns the input. In most cases, the interpretable
>>> # input will be different and may have a smaller feature set, so
>>> # an appropriate transformation function should be provided.
>>>
>>> def to_interp_transform(curr_sample, original_inp,
>>> **kwargs):
>>> return curr_sample
>>>
>>> # Generating random input with size 1 x 5
>>> input = torch.randn(1, 5)
>>> # Defining LimeBase interpreter
>>> lime_attr = LimeBase(net,
SkLearnLinearModel("linear_model.Ridge"),
similarity_func=similarity_kernel,
perturb_func=perturb_func,
perturb_interpretable_space=False,
from_interp_rep_transform=None,
to_interp_rep_transform=to_interp_transform)
>>> # Computes interpretable model, returning coefficients of linear
>>> # model.
>>> attr_coefs = lime_attr.attribute(input, target=1, kernel_width=1.1)
"""
with torch.no_grad():
inp_tensor = (
cast(Tensor, inputs) if isinstance(inputs, Tensor) else inputs[0]
)
device = inp_tensor.device
interpretable_inps = []
similarities = []
outputs = []
curr_model_inputs = []
expanded_additional_args = None
expanded_target = None
perturb_generator = None
if inspect.isgeneratorfunction(self.perturb_func):
perturb_generator = self.perturb_func(inputs, **kwargs)
if show_progress:
attr_progress = progress(
total=math.ceil(n_samples / perturbations_per_eval),
desc=f"{self.get_name()} attribution",
)
attr_progress.update(0)
batch_count = 0
for _ in range(n_samples):
if perturb_generator:
try:
curr_sample = next(perturb_generator)
except StopIteration:
warnings.warn(
"Generator completed prior to given n_samples iterations!"
)
break
else:
curr_sample = self.perturb_func(inputs, **kwargs)
batch_count += 1
if self.perturb_interpretable_space:
interpretable_inps.append(curr_sample)
curr_model_inputs.append(
self.from_interp_rep_transform( # type: ignore
curr_sample, inputs, **kwargs
)
)
else:
curr_model_inputs.append(curr_sample)
interpretable_inps.append(
self.to_interp_rep_transform( # type: ignore
curr_sample, inputs, **kwargs
)
)
curr_sim = self.similarity_func(
inputs, curr_model_inputs[-1], interpretable_inps[-1], **kwargs
)
similarities.append(
curr_sim.flatten()
if isinstance(curr_sim, Tensor)
else torch.tensor([curr_sim], device=device)
)
if len(curr_model_inputs) == perturbations_per_eval:
if expanded_additional_args is None:
expanded_additional_args = _expand_additional_forward_args(
additional_forward_args, len(curr_model_inputs)
)
if expanded_target is None:
expanded_target = _expand_target(target, len(curr_model_inputs))
model_out = self._evaluate_batch(
curr_model_inputs,
expanded_target,
expanded_additional_args,
device,
)
if show_progress:
attr_progress.update()
outputs.append(model_out)
curr_model_inputs = []
if len(curr_model_inputs) > 0:
expanded_additional_args = _expand_additional_forward_args(
additional_forward_args, len(curr_model_inputs)
)
expanded_target = _expand_target(target, len(curr_model_inputs))
model_out = self._evaluate_batch(
curr_model_inputs,
expanded_target,
expanded_additional_args,
device,
)
if show_progress:
attr_progress.update()
outputs.append(model_out)
if show_progress:
attr_progress.close()
combined_interp_inps = torch.cat(interpretable_inps).float()
combined_outputs = (
torch.cat(outputs)
if len(outputs[0].shape) > 0
else torch.stack(outputs)
).float()
combined_sim = (
torch.cat(similarities)
if len(similarities[0].shape) > 0
else torch.stack(similarities)
).float()
dataset = TensorDataset(
combined_interp_inps, combined_outputs, combined_sim
)
self.interpretable_model.fit(DataLoader(dataset, batch_size=batch_count))
return self.interpretable_model.representation()
def _evaluate_batch(
self,
curr_model_inputs: List[TensorOrTupleOfTensorsGeneric],
expanded_target: TargetType,
expanded_additional_args: Any,
device: torch.device,
):
model_out = _run_forward(
self.forward_func,
_reduce_list(curr_model_inputs),
expanded_target,
expanded_additional_args,
)
if isinstance(model_out, Tensor):
assert model_out.numel() == len(curr_model_inputs), (
"Number of outputs is not appropriate, must return "
"one output per perturbed input"
)
if isinstance(model_out, Tensor):
return model_out.flatten()
return torch.tensor([model_out], device=device)
def has_convergence_delta(self) -> bool:
return False
@property
def multiplies_by_inputs(self):
return False
# Default transformations and methods
# for Lime child implementation.
def default_from_interp_rep_transform(curr_sample, original_inputs, **kwargs):
assert (
"feature_mask" in kwargs
), "Must provide feature_mask to use default interpretable representation transform"
assert (
"baselines" in kwargs
), "Must provide baselines to use default interpretable representation transform"
feature_mask = kwargs["feature_mask"]
if isinstance(feature_mask, Tensor):
binary_mask = curr_sample[0][feature_mask].bool()
return (
binary_mask.to(original_inputs.dtype) * original_inputs
+ (~binary_mask).to(original_inputs.dtype) * kwargs["baselines"]
)
else:
binary_mask = tuple(
curr_sample[0][feature_mask[j]].bool() for j in range(len(feature_mask))
)
return tuple(
binary_mask[j].to(original_inputs[j].dtype) * original_inputs[j]
+ (~binary_mask[j]).to(original_inputs[j].dtype) * kwargs["baselines"][j]
for j in range(len(feature_mask))
)
def get_exp_kernel_similarity_function(
distance_mode: str = "cosine", kernel_width: float = 1.0
) -> Callable:
r"""
This method constructs an appropriate similarity function to compute
weights for perturbed sample in LIME. Distance between the original
and perturbed inputs is computed based on the provided distance mode,
and the distance is passed through an exponential kernel with given
kernel width to convert to a range between 0 and 1.
The callable returned can be provided as the similarity_fn for
Lime or LimeBase.
Args:
distance_mode (str, optional): Distance mode can be either "cosine" or
"euclidean" corresponding to either cosine distance
or Euclidean distance respectively. Distance is computed
by flattening the original inputs and perturbed inputs
(concatenating tuples of inputs if necessary) and computing
distances between the resulting vectors.
Default: "cosine"
kernel_width (float, optional):
Kernel width for exponential kernel applied to distance.
Default: 1.0
Returns:
*Callable*:
- **similarity_fn** (*Callable*):
Similarity function. This callable can be provided as the
similarity_fn for Lime or LimeBase.
"""
def default_exp_kernel(original_inp, perturbed_inp, __, **kwargs):
flattened_original_inp = _flatten_tensor_or_tuple(original_inp).float()
flattened_perturbed_inp = _flatten_tensor_or_tuple(perturbed_inp).float()
if distance_mode == "cosine":
cos_sim = CosineSimilarity(dim=0)
distance = 1 - cos_sim(flattened_original_inp, flattened_perturbed_inp)
elif distance_mode == "euclidean":
distance = torch.norm(flattened_original_inp - flattened_perturbed_inp)
else:
raise ValueError("distance_mode must be either cosine or euclidean.")
return math.exp(-1 * (distance**2) / (2 * (kernel_width**2)))
return default_exp_kernel
def default_perturb_func(original_inp, **kwargs):
assert (
"num_interp_features" in kwargs
), "Must provide num_interp_features to use default interpretable sampling function"
if isinstance(original_inp, Tensor):
device = original_inp.device
else:
device = original_inp[0].device
probs = torch.ones(1, kwargs["num_interp_features"]) * 0.5
return torch.bernoulli(probs).to(device=device).long()
def construct_feature_mask(feature_mask, formatted_inputs):
if feature_mask is None:
feature_mask, num_interp_features = _construct_default_feature_mask(
formatted_inputs
)
else:
feature_mask = _format_tensor_into_tuples(feature_mask)
min_interp_features = int(
min(
torch.min(single_mask).item()
for single_mask in feature_mask
if single_mask.numel()
)
)
if min_interp_features != 0:
warnings.warn(
"Minimum element in feature mask is not 0, shifting indices to"
" start at 0."
)
feature_mask = tuple(
single_mask - min_interp_features for single_mask in feature_mask
)
num_interp_features = _get_max_feature_index(feature_mask) + 1
return feature_mask, num_interp_features
class Lime(LimeBase):
r"""
Lime is an interpretability method that trains an interpretable surrogate model
by sampling points around a specified input example and using model evaluations
at these points to train a simpler interpretable 'surrogate' model, such as a
linear model.
Lime provides a more specific implementation than LimeBase in order to expose
a consistent API with other perturbation-based algorithms. For more general
use of the LIME framework, consider using the LimeBase class directly and
defining custom sampling and transformation to / from interpretable
representation functions.
Lime assumes that the interpretable representation is a binary vector,
corresponding to some elements in the input being set to their baseline value
if the corresponding binary interpretable feature value is 0 or being set
to the original input value if the corresponding binary interpretable
feature value is 1. Input values can be grouped to correspond to the same
binary interpretable feature using a feature mask provided when calling
attribute, similar to other perturbation-based attribution methods.
One example of this setting is when applying Lime to an image classifier.
Pixels in an image can be grouped into super-pixels or segments, which
correspond to interpretable features, provided as a feature_mask when
calling attribute. Sampled binary vectors convey whether a super-pixel
is on (retains the original input values) or off (set to the corresponding
baseline value, e.g. black image). An interpretable linear model is trained
with input being the binary vectors and outputs as the corresponding scores
of the image classifier with the appropriate super-pixels masked based on the
binary vector. Coefficients of the trained surrogate
linear model convey the importance of each super-pixel.
More details regarding LIME can be found in the original paper:
https://arxiv.org/abs/1602.04938
"""
def __init__(
self,
forward_func: Callable,
interpretable_model: Optional[Model] = None,
similarity_func: Optional[Callable] = None,
perturb_func: Optional[Callable] = None,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
interpretable_model (Model, optional): Model object to train
interpretable model.
This argument is optional and defaults to SkLearnLasso(alpha=0.01),
which is a wrapper around the Lasso linear model in SkLearn.
This requires having sklearn version >= 0.23 available.
Other predefined interpretable linear models are provided in
captum._utils.models.linear_model.
Alternatively, a custom model object must provide a `fit` method to
train the model, given a dataloader, with batches containing
three tensors:
- interpretable_inputs: Tensor
[2D num_samples x num_interp_features],
- expected_outputs: Tensor [1D num_samples],
- weights: Tensor [1D num_samples]
The model object must also provide a `representation` method to
access the appropriate coefficients or representation of the
interpretable model after fitting.
Note that calling fit multiple times should retrain the
interpretable model, each attribution call reuses
the same given interpretable model object.
similarity_func (Callable, optional): Function which takes a single sample
along with its corresponding interpretable representation
and returns the weight of the interpretable sample for
training the interpretable model.
This is often referred to as a similarity kernel.
This argument is optional and defaults to a function which
applies an exponential kernel to the cosine distance between
the original input and perturbed input, with a kernel width
of 1.0.
A similarity function applying an exponential
kernel to cosine / euclidean distances can be constructed
using the provided get_exp_kernel_similarity_function in
captum.attr._core.lime.
Alternately, a custom callable can also be provided.
The expected signature of this callable is:
>>> def similarity_func(
>>> original_input: Tensor or tuple[Tensor, ...],
>>> perturbed_input: Tensor or tuple[Tensor, ...],
>>> perturbed_interpretable_input:
>>> Tensor [2D 1 x num_interp_features],
>>> **kwargs: Any
>>> ) -> float or Tensor containing float scalar
perturbed_input and original_input will be the same type and
contain tensors of the same shape, with original_input
being the same as the input provided when calling attribute.
kwargs includes baselines, feature_mask, num_interp_features
(integer, determined from feature mask).
perturb_func (Callable, optional): Function which returns a single
sampled input, which is a binary vector of length
num_interp_features, or a generator of such tensors.
This function is optional, the default function returns
a binary vector where each element is selected
independently and uniformly at random. Custom
logic for selecting sampled binary vectors can
be implemented by providing a function with the
following expected signature:
>>> perturb_func(
>>> original_input: Tensor or tuple[Tensor, ...],
>>> **kwargs: Any
>>> ) -> Tensor [Binary 2D Tensor 1 x num_interp_features]
>>> or generator yielding such tensors
kwargs includes baselines, feature_mask, num_interp_features
(integer, determined from feature mask).
"""
if interpretable_model is None:
interpretable_model = SkLearnLasso(alpha=0.01)
if similarity_func is None:
similarity_func = get_exp_kernel_similarity_function()
if perturb_func is None:
perturb_func = default_perturb_func
LimeBase.__init__(
self,
forward_func,
interpretable_model,
similarity_func,
perturb_func,
True,
default_from_interp_rep_transform,
None,
)
@log_usage()
def attribute( # type: ignore
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
feature_mask: Union[None, Tensor, Tuple[Tensor, ...]] = None,
n_samples: int = 25,
perturbations_per_eval: int = 1,
return_input_shape: bool = True,
show_progress: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
This method attributes the output of the model with given target index
(in case it is provided, otherwise it assumes that output is a
scalar) to the inputs of the model using the approach described above,
training an interpretable model and returning a representation of the
interpretable model.
It is recommended to only provide a single example as input (tensors
with first dimension or batch size = 1). This is because LIME is generally
used for sample-based interpretability, training a separate interpretable
model to explain a model's prediction on each individual example.
A batch of inputs can also be provided as inputs, similar to
other perturbation-based attribution methods. In this case, if forward_fn
returns a scalar per example, attributions will be computed for each
example independently, with a separate interpretable model trained for each
example. Note that provided similarity and perturbation functions will be
provided each example separately (first dimension = 1) in this case.
If forward_fn returns a scalar per batch (e.g. loss), attributions will
still be computed using a single interpretable model for the full batch.
In this case, similarity and perturbation functions will be provided the
same original input containing the full batch.
The number of interpretable features is determined from the provided
feature mask, or if none is provided, from the default feature mask,
which considers each scalar input as a separate feature. It is
generally recommended to provide a feature mask which groups features
into a small number of interpretable features / components (e.g.
superpixels in images).
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which LIME
is computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define reference value which replaces each
feature when the corresponding interpretable feature
is set to 0.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which surrogate model is trained
(for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. It will be
repeated for each of `n_steps` along the integrated
path. For all other types, the given argument is used
for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
feature_mask (Tensor or tuple[Tensor, ...], optional):
feature_mask defines a mask for the input, grouping
features which correspond to the same
interpretable feature. feature_mask
should contain the same number of tensors as inputs.
Each tensor should
be the same size as the corresponding input or
broadcastable to match the input tensor. Values across
all tensors should be integers in the range 0 to
num_interp_features - 1, and indices corresponding to the
same feature should have the same value.
Note that features are grouped across tensors
(unlike feature ablation and occlusion), so
if the same index is used in different tensors, those
features are still grouped and added simultaneously.
If None, then a feature mask is constructed which assigns
each scalar within a tensor as a separate feature.
Default: None
n_samples (int, optional): The number of samples of the original
model used to train the surrogate interpretable model.
Default: `50` if `n_samples` is not provided.
perturbations_per_eval (int, optional): Allows multiple samples
to be processed simultaneously in one call to forward_fn.
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
If the forward function returns a single scalar per batch,
perturbations_per_eval must be set to 1.
Default: 1
return_input_shape (bool, optional): Determines whether the returned
tensor(s) only contain the coefficients for each interp-
retable feature from the trained surrogate model, or
whether the returned attributions match the input shape.
When return_input_shape is True, the return type of attribute
matches the input shape, with each element containing the
coefficient of the corresponding interpretale feature.
All elements with the same value in the feature mask
will contain the same coefficient in the returned
attributions. If return_input_shape is False, a 1D
tensor is returned, containing only the coefficients
of the trained interpreatable models, with length
num_interp_features.
show_progress (bool, optional): Displays the progress of computation.
It will try to use tqdm if available for advanced features
(e.g. time estimation). Otherwise, it will fallback to
a simple output of progress.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The attributions with respect to each input feature.
If return_input_shape = True, attributions will be
the same size as the provided inputs, with each value
providing the coefficient of the corresponding
interpretale feature.
If return_input_shape is False, a 1D
tensor is returned, containing only the coefficients
of the trained interpreatable models, with length
num_interp_features.
Examples::
>>> # SimpleClassifier takes a single input tensor of size Nx4x4,
>>> # and returns an Nx3 tensor of class probabilities.
>>> net = SimpleClassifier()
>>> # Generating random input with size 1 x 4 x 4
>>> input = torch.randn(1, 4, 4)
>>> # Defining Lime interpreter
>>> lime = Lime(net)
>>> # Computes attribution, with each of the 4 x 4 = 16
>>> # features as a separate interpretable feature
>>> attr = lime.attribute(input, target=1, n_samples=200)
>>> # Alternatively, we can group each 2x2 square of the inputs
>>> # as one 'interpretable' feature and perturb them together.
>>> # This can be done by creating a feature mask as follows, which
>>> # defines the feature groups, e.g.:
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # With this mask, all inputs with the same value are set to their
>>> # baseline value, when the corresponding binary interpretable
>>> # feature is set to 0.
>>> # The attributions can be calculated as follows:
>>> # feature mask has dimensions 1 x 4 x 4
>>> feature_mask = torch.tensor([[[0,0,1,1],[0,0,1,1],
>>> [2,2,3,3],[2,2,3,3]]])
>>> # Computes interpretable model and returning attributions
>>> # matching input shape.
>>> attr = lime.attribute(input, target=1, feature_mask=feature_mask)
"""
return self._attribute_kwargs(
inputs=inputs,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
feature_mask=feature_mask,
n_samples=n_samples,
perturbations_per_eval=perturbations_per_eval,
return_input_shape=return_input_shape,
show_progress=show_progress,
)
def _attribute_kwargs( # type: ignore
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
feature_mask: Union[None, Tensor, Tuple[Tensor, ...]] = None,
n_samples: int = 25,
perturbations_per_eval: int = 1,
return_input_shape: bool = True,
show_progress: bool = False,
**kwargs,
) -> TensorOrTupleOfTensorsGeneric:
is_inputs_tuple = _is_tuple(inputs)
formatted_inputs, baselines = _format_input_baseline(inputs, baselines)
bsz = formatted_inputs[0].shape[0]
feature_mask, num_interp_features = construct_feature_mask(
feature_mask, formatted_inputs
)
if num_interp_features > 10000:
warnings.warn(
"Attempting to construct interpretable model with > 10000 features."
"This can be very slow or lead to OOM issues. Please provide a feature"
"mask which groups input features to reduce the number of interpretable"
"features. "
)
coefs: Tensor
if bsz > 1:
test_output = _run_forward(
self.forward_func, inputs, target, additional_forward_args
)
if isinstance(test_output, Tensor) and torch.numel(test_output) > 1:
if torch.numel(test_output) == bsz:
warnings.warn(
"You are providing multiple inputs for Lime / Kernel SHAP "
"attributions. This trains a separate interpretable model "
"for each example, which can be time consuming. It is "
"recommended to compute attributions for one example at a time."
)
output_list = []
for (
curr_inps,
curr_target,
curr_additional_args,
curr_baselines,
curr_feature_mask,
) in _batch_example_iterator(
bsz,
formatted_inputs,
target,
additional_forward_args,
baselines,
feature_mask,
):
coefs = super().attribute.__wrapped__(
self,
inputs=curr_inps if is_inputs_tuple else curr_inps[0],
target=curr_target,
additional_forward_args=curr_additional_args,
n_samples=n_samples,
perturbations_per_eval=perturbations_per_eval,
baselines=curr_baselines
if is_inputs_tuple
else curr_baselines[0],
feature_mask=curr_feature_mask
if is_inputs_tuple
else curr_feature_mask[0],
num_interp_features=num_interp_features,
show_progress=show_progress,
**kwargs,
)
if return_input_shape:
output_list.append(
self._convert_output_shape(
curr_inps,
curr_feature_mask,
coefs,
num_interp_features,
is_inputs_tuple,
)
)
else:
output_list.append(coefs.reshape(1, -1)) # type: ignore
return _reduce_list(output_list)
else:
raise AssertionError(
"Invalid number of outputs, forward function should return a"
"scalar per example or a scalar per input batch."
)
else:
assert perturbations_per_eval == 1, (
"Perturbations per eval must be 1 when forward function"
"returns single value per batch!"
)
coefs = super().attribute.__wrapped__(
self,
inputs=inputs,
target=target,
additional_forward_args=additional_forward_args,
n_samples=n_samples,
perturbations_per_eval=perturbations_per_eval,
baselines=baselines if is_inputs_tuple else baselines[0],
feature_mask=feature_mask if is_inputs_tuple else feature_mask[0],
num_interp_features=num_interp_features,
show_progress=show_progress,
**kwargs,
)
if return_input_shape:
return self._convert_output_shape(
formatted_inputs,
feature_mask,
coefs,
num_interp_features,
is_inputs_tuple,
)
else:
return coefs
@typing.overload
def _convert_output_shape(
self,
formatted_inp: Tuple[Tensor, ...],
feature_mask: Tuple[Tensor, ...],
coefs: Tensor,
num_interp_features: int,
is_inputs_tuple: Literal[True],
) -> Tuple[Tensor, ...]:
...
@typing.overload
def _convert_output_shape(
self,
formatted_inp: Tuple[Tensor, ...],
feature_mask: Tuple[Tensor, ...],
coefs: Tensor,
num_interp_features: int,
is_inputs_tuple: Literal[False],
) -> Tensor:
...
def _convert_output_shape(
self,
formatted_inp: Tuple[Tensor, ...],
feature_mask: Tuple[Tensor, ...],
coefs: Tensor,
num_interp_features: int,
is_inputs_tuple: bool,
) -> Union[Tensor, Tuple[Tensor, ...]]:
coefs = coefs.flatten()
attr = [
torch.zeros_like(single_inp, dtype=torch.float)
for single_inp in formatted_inp
]
for tensor_ind in range(len(formatted_inp)):
for single_feature in range(num_interp_features):
attr[tensor_ind] += (
coefs[single_feature].item()
* (feature_mask[tensor_ind] == single_feature).float()
)
return _format_output(is_inputs_tuple, tuple(attr))
|
#!/usr/bin/env python3
from enum import Enum
from typing import Any, cast, List, Tuple, Union
import torch
from captum._utils.common import (
_expand_and_update_additional_forward_args,
_expand_and_update_baselines,
_expand_and_update_feature_mask,
_expand_and_update_target,
_format_output,
_format_tensor_into_tuples,
_is_tuple,
)
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._utils.attribution import Attribution, GradientAttribution
from captum.attr._utils.common import _validate_noise_tunnel_type
from captum.log import log_usage
from torch import Tensor
class NoiseTunnelType(Enum):
smoothgrad = 1
smoothgrad_sq = 2
vargrad = 3
SUPPORTED_NOISE_TUNNEL_TYPES = list(NoiseTunnelType.__members__.keys())
class NoiseTunnel(Attribution):
r"""
Adds gaussian noise to each input in the batch `nt_samples` times
and applies the given attribution algorithm to each of the samples.
The attributions of the samples are combined based on the given noise
tunnel type (nt_type):
If nt_type is `smoothgrad`, the mean of the sampled attributions is
returned. This approximates smoothing the given attribution method
with a Gaussian Kernel.
If nt_type is `smoothgrad_sq`, the mean of the squared sample attributions
is returned.
If nt_type is `vargrad`, the variance of the sample attributions is
returned.
More details about adding noise can be found in the following papers:
* https://arxiv.org/abs/1810.03292
* https://arxiv.org/abs/1810.03307
* https://arxiv.org/abs/1706.03825
* https://arxiv.org/abs/1806.10758
This method currently also supports batches of multiple examples input,
however it can be computationally expensive depending on the model,
the dimensionality of the data and execution environment.
It is assumed that the batch size is the first dimension of input tensors.
"""
def __init__(self, attribution_method: Attribution) -> None:
r"""
Args:
attribution_method (Attribution): An instance of any attribution algorithm
of type `Attribution`. E.g. Integrated Gradients,
Conductance or Saliency.
"""
self.attribution_method = attribution_method
self.is_delta_supported = self.attribution_method.has_convergence_delta()
self._multiply_by_inputs = self.attribution_method.multiplies_by_inputs
self.is_gradient_method = isinstance(
self.attribution_method, GradientAttribution
)
Attribution.__init__(self, self.attribution_method.forward_func)
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
@log_usage()
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
nt_type: str = "smoothgrad",
nt_samples: int = 5,
nt_samples_batch_size: int = None,
stdevs: Union[float, Tuple[float, ...]] = 1.0,
draw_baseline_from_distrib: bool = False,
**kwargs: Any,
) -> Union[
Union[
Tensor,
Tuple[Tensor, Tensor],
Tuple[Tensor, ...],
Tuple[Tuple[Tensor, ...], Tensor],
]
]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which integrated
gradients are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
nt_type (str, optional): Smoothing type of the attributions.
`smoothgrad`, `smoothgrad_sq` or `vargrad`
Default: `smoothgrad` if `type` is not provided.
nt_samples (int, optional): The number of randomly generated examples
per sample in the input batch. Random examples are
generated by adding gaussian random noise to each sample.
Default: `5` if `nt_samples` is not provided.
nt_samples_batch_size (int, optional): The number of the `nt_samples`
that will be processed together. With the help
of this parameter we can avoid out of memory situation and
reduce the number of randomly generated examples per sample
in each batch.
Default: None if `nt_samples_batch_size` is not provided. In
this case all `nt_samples` will be processed together.
stdevs (float or tuple of float, optional): The standard deviation
of gaussian noise with zero mean that is added to each
input in the batch. If `stdevs` is a single float value
then that same value is used for all inputs. If it is
a tuple, then it must have the same length as the inputs
tuple. In this case, each stdev value in the stdevs tuple
corresponds to the input with the same index in the inputs
tuple.
Default: `1.0` if `stdevs` is not provided.
draw_baseline_from_distrib (bool, optional): Indicates whether to
randomly draw baseline samples from the `baselines`
distribution provided as an input tensor.
Default: False
**kwargs (Any, optional): Contains a list of arguments that are passed
to `attribution_method` attribution algorithm.
Any additional arguments that should be used for the
chosen attribution method should be included here.
For instance, such arguments include
`additional_forward_args` and `baselines`.
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attribution with
respect to each input feature. attributions will always be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
- **delta** (*float*, returned if return_convergence_delta=True):
Approximation error computed by the
attribution algorithm. Not all attribution algorithms
return delta value. It is computed only for some
algorithms, e.g. integrated gradients.
Delta is computed for each input in the batch
and represents the arithmetic mean
across all `nt_samples` perturbed tensors for that input.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> ig = IntegratedGradients(net)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Creates noise tunnel
>>> nt = NoiseTunnel(ig)
>>> # Generates 10 perturbed input tensors per image.
>>> # Computes integrated gradients for class 3 for each generated
>>> # input and averages attributions across all 10
>>> # perturbed inputs per image
>>> attribution = nt.attribute(input, nt_type='smoothgrad',
>>> nt_samples=10, target=3)
"""
def add_noise_to_inputs(nt_samples_partition: int) -> Tuple[Tensor, ...]:
if isinstance(stdevs, tuple):
assert len(stdevs) == len(inputs), (
"The number of input tensors "
"in {} must be equal to the number of stdevs values {}".format(
len(inputs), len(stdevs)
)
)
else:
assert isinstance(
stdevs, float
), "stdevs must be type float. " "Given: {}".format(type(stdevs))
stdevs_ = (stdevs,) * len(inputs)
return tuple(
add_noise_to_input(input, stdev, nt_samples_partition).requires_grad_()
if self.is_gradient_method
else add_noise_to_input(input, stdev, nt_samples_partition)
for (input, stdev) in zip(inputs, stdevs_)
)
def add_noise_to_input(
input: Tensor, stdev: float, nt_samples_partition: int
) -> Tensor:
# batch size
bsz = input.shape[0]
# expand input size by the number of drawn samples
input_expanded_size = (bsz * nt_samples_partition,) + input.shape[1:]
# expand stdev for the shape of the input and number of drawn samples
stdev_expanded = torch.tensor(stdev, device=input.device).repeat(
input_expanded_size
)
# draws `np.prod(input_expanded_size)` samples from normal distribution
# with given input parametrization
# FIXME it look like it is very difficult to make torch.normal
# deterministic this needs an investigation
noise = torch.normal(0, stdev_expanded)
return input.repeat_interleave(nt_samples_partition, dim=0) + noise
def update_sum_attribution_and_sq(
sum_attribution: List[Tensor],
sum_attribution_sq: List[Tensor],
attribution: Tensor,
i: int,
nt_samples_batch_size_inter: int,
) -> None:
bsz = attribution.shape[0] // nt_samples_batch_size_inter
attribution_shape = cast(
Tuple[int, ...], (bsz, nt_samples_batch_size_inter)
)
if len(attribution.shape) > 1:
attribution_shape += cast(Tuple[int, ...], tuple(attribution.shape[1:]))
attribution = attribution.view(attribution_shape)
current_attribution_sum = attribution.sum(dim=1, keepdim=False)
current_attribution_sq = torch.sum(attribution**2, dim=1, keepdim=False)
sum_attribution[i] = (
current_attribution_sum
if not isinstance(sum_attribution[i], torch.Tensor)
else sum_attribution[i] + current_attribution_sum
)
sum_attribution_sq[i] = (
current_attribution_sq
if not isinstance(sum_attribution_sq[i], torch.Tensor)
else sum_attribution_sq[i] + current_attribution_sq
)
def compute_partial_attribution(
inputs_with_noise_partition: Tuple[Tensor, ...], kwargs_partition: Any
) -> Tuple[Tuple[Tensor, ...], bool, Union[None, Tensor]]:
# smoothgrad_Attr(x) = 1 / n * sum(Attr(x + N(0, sigma^2))
# NOTE: using __wrapped__ such that it does not log the inner logs
attributions = attr_func.__wrapped__( # type: ignore
self.attribution_method, # self
inputs_with_noise_partition
if is_inputs_tuple
else inputs_with_noise_partition[0],
**kwargs_partition,
)
delta = None
if self.is_delta_supported and return_convergence_delta:
attributions, delta = attributions
is_attrib_tuple = _is_tuple(attributions)
attributions = _format_tensor_into_tuples(attributions)
return (
cast(Tuple[Tensor, ...], attributions),
cast(bool, is_attrib_tuple),
delta,
)
def expand_partial(nt_samples_partition: int, kwargs_partial: dict) -> None:
# if the algorithm supports targets, baselines and/or
# additional_forward_args they will be expanded based
# on the nt_samples_partition and corresponding kwargs
# variables will be updated accordingly
_expand_and_update_additional_forward_args(
nt_samples_partition, kwargs_partial
)
_expand_and_update_target(nt_samples_partition, kwargs_partial)
_expand_and_update_baselines(
cast(Tuple[Tensor, ...], inputs),
nt_samples_partition,
kwargs_partial,
draw_baseline_from_distrib=draw_baseline_from_distrib,
)
_expand_and_update_feature_mask(nt_samples_partition, kwargs_partial)
def compute_smoothing(
expected_attributions: Tuple[Union[Tensor], ...],
expected_attributions_sq: Tuple[Union[Tensor], ...],
) -> Tuple[Tensor, ...]:
if NoiseTunnelType[nt_type] == NoiseTunnelType.smoothgrad:
return expected_attributions
if NoiseTunnelType[nt_type] == NoiseTunnelType.smoothgrad_sq:
return expected_attributions_sq
vargrad = tuple(
expected_attribution_sq - expected_attribution * expected_attribution
for expected_attribution, expected_attribution_sq in zip(
expected_attributions, expected_attributions_sq
)
)
return cast(Tuple[Tensor, ...], vargrad)
def update_partial_attribution_and_delta(
attributions_partial: Tuple[Tensor, ...],
delta_partial: Tensor,
sum_attributions: List[Tensor],
sum_attributions_sq: List[Tensor],
delta_partial_list: List[Tensor],
nt_samples_partial: int,
) -> None:
for i, attribution_partial in enumerate(attributions_partial):
update_sum_attribution_and_sq(
sum_attributions,
sum_attributions_sq,
attribution_partial,
i,
nt_samples_partial,
)
if self.is_delta_supported and return_convergence_delta:
delta_partial_list.append(delta_partial)
return_convergence_delta: bool
return_convergence_delta = (
"return_convergence_delta" in kwargs and kwargs["return_convergence_delta"]
)
with torch.no_grad():
nt_samples_batch_size = (
nt_samples
if nt_samples_batch_size is None
else min(nt_samples, nt_samples_batch_size)
)
nt_samples_partition = nt_samples // nt_samples_batch_size
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = isinstance(inputs, tuple)
inputs = _format_tensor_into_tuples(inputs) # type: ignore
_validate_noise_tunnel_type(nt_type, SUPPORTED_NOISE_TUNNEL_TYPES)
kwargs_copy = kwargs.copy()
expand_partial(nt_samples_batch_size, kwargs_copy)
attr_func = self.attribution_method.attribute
sum_attributions: List[Union[None, Tensor]] = []
sum_attributions_sq: List[Union[None, Tensor]] = []
delta_partial_list: List[Tensor] = []
for _ in range(nt_samples_partition):
inputs_with_noise = add_noise_to_inputs(nt_samples_batch_size)
(
attributions_partial,
is_attrib_tuple,
delta_partial,
) = compute_partial_attribution(inputs_with_noise, kwargs_copy)
if len(sum_attributions) == 0:
sum_attributions = [None] * len(attributions_partial)
sum_attributions_sq = [None] * len(attributions_partial)
update_partial_attribution_and_delta(
cast(Tuple[Tensor, ...], attributions_partial),
cast(Tensor, delta_partial),
cast(List[Tensor], sum_attributions),
cast(List[Tensor], sum_attributions_sq),
delta_partial_list,
nt_samples_batch_size,
)
nt_samples_remaining = (
nt_samples - nt_samples_partition * nt_samples_batch_size
)
if nt_samples_remaining > 0:
inputs_with_noise = add_noise_to_inputs(nt_samples_remaining)
expand_partial(nt_samples_remaining, kwargs)
(
attributions_partial,
is_attrib_tuple,
delta_partial,
) = compute_partial_attribution(inputs_with_noise, kwargs)
update_partial_attribution_and_delta(
cast(Tuple[Tensor, ...], attributions_partial),
cast(Tensor, delta_partial),
cast(List[Tensor], sum_attributions),
cast(List[Tensor], sum_attributions_sq),
delta_partial_list,
nt_samples_remaining,
)
expected_attributions = tuple(
[
cast(Tensor, sum_attribution) * 1 / nt_samples
for sum_attribution in sum_attributions
]
)
expected_attributions_sq = tuple(
[
cast(Tensor, sum_attribution_sq) * 1 / nt_samples
for sum_attribution_sq in sum_attributions_sq
]
)
attributions = compute_smoothing(
cast(Tuple[Tensor, ...], expected_attributions),
cast(Tuple[Tensor, ...], expected_attributions_sq),
)
delta = None
if self.is_delta_supported and return_convergence_delta:
delta = torch.cat(delta_partial_list, dim=0)
return self._apply_checks_and_return_attributions(
attributions, is_attrib_tuple, return_convergence_delta, delta
)
def _apply_checks_and_return_attributions(
self,
attributions: Tuple[Tensor, ...],
is_attrib_tuple: bool,
return_convergence_delta: bool,
delta: Union[None, Tensor],
) -> Union[
TensorOrTupleOfTensorsGeneric, Tuple[TensorOrTupleOfTensorsGeneric, Tensor]
]:
attributions = _format_output(is_attrib_tuple, attributions)
ret = (
(attributions, cast(Tensor, delta))
if self.is_delta_supported and return_convergence_delta
else attributions
)
ret = cast(
Union[
TensorOrTupleOfTensorsGeneric,
Tuple[TensorOrTupleOfTensorsGeneric, Tensor],
],
ret,
)
return ret
def has_convergence_delta(self) -> bool:
return self.is_delta_supported
|
#!/usr/bin/env python3
from collections import defaultdict
from copy import copy
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import torch
from captum._utils.common import (
_format_baseline,
_format_feature_mask,
_format_output,
_format_tensor_into_tuples,
_get_max_feature_index,
_run_forward,
)
from captum._utils.typing import BaselineType
from captum.attr._core.feature_ablation import FeatureAblation
from captum.attr._utils.attribution import Attribution
from torch import Tensor
class InputRole:
need_attr = 0
need_forward = 1
no_forward = 2
SUPPORTED_METHODS = {FeatureAblation}
# default reducer wehn reduce is None. Simply concat the outputs by the batch dimension
def _concat_tensors(accum, cur_output, _):
return cur_output if accum is None else torch.cat([accum, cur_output])
def _create_perturbation_mask(
perturbed_feature_indices: Tensor, # 1D tensor of one-hot feature indices
feature_mask: Tuple[Tensor, ...],
feature_idx_to_mask_idx: Dict[int, List[int]],
) -> Tuple[Union[Tensor, None], ...]:
"""
Create binary mask for inputs based on perturbed one-hot feature indices
Use None if no perturbation is needed for the corresponding input
"""
# a set of input/mask indices that need perturbation
perturbation_mask_indices = set()
for i, v in enumerate(perturbed_feature_indices.tolist()):
# value 0 means the feature has been perturbed
if not v:
perturbation_mask_indices |= set(feature_idx_to_mask_idx[i])
# create binary mask for inputs & set it to None if no perturbation is needed
perturbation_mask = tuple(
perturbed_feature_indices[mask_elem] if i in perturbation_mask_indices else None
for i, mask_elem in enumerate(feature_mask)
)
return perturbation_mask
def _perturb_inputs(
inputs: Iterable[Any],
input_roles: Tuple[int],
baselines: Tuple[Union[int, float, Tensor], ...],
perturbation_mask: Tuple[Union[Tensor, None], ...],
) -> Tuple[Any, ...]:
"""
Perturb inputs based on perturbation mask and baselines
"""
perturbed_inputs = []
attr_inp_count = 0
for inp, role in zip(inputs, input_roles):
if role != InputRole.need_attr:
perturbed_inputs.append(inp)
continue
pert_mask = perturbation_mask[attr_inp_count]
# no perturbation is needed for this input
if pert_mask is None:
perturbed_inputs.append(inp)
else:
baseline = baselines[attr_inp_count]
perturbed_inp = inp * pert_mask + baseline * (1 - pert_mask)
perturbed_inputs.append(perturbed_inp)
attr_inp_count += 1
perturbed_inputs = tuple(perturbed_inputs)
return perturbed_inputs
def _convert_output_shape(
unique_attr: Tensor,
attr_inputs: Tuple[Tensor, ...],
feature_mask: Tuple[Tensor, ...],
) -> Tuple[Tensor, ...]:
"""
Convert the shape of a single tensor of unique feature attributionto
to match the shape of the inputs returned by dataloader
"""
# unique_attr in shape(*output_dims, n_features)
output_dims = unique_attr.shape[:-1]
n_features = unique_attr.shape[-1]
attr = []
for inp, mask in zip(attr_inputs, feature_mask):
# input in shape(batch_size, *inp_feature_dims)
# attribute in shape(*output_dims, *inp_feature_dims)
attr_shape = (*output_dims, *inp.shape[1:])
expanded_feature_indices = mask.expand(attr_shape)
if len(inp.shape) > 2:
# exclude batch_size & last of actual value
extra_inp_dims = list(inp.shape[1:-1])
# unsqueeze unqiue_attr to have same number of dims as inp
# (*output_dims, 1..., 1, n_features)
# then broadcast to (*output_dims, *inp.shape[1:-1], n_features)
n_extra_dims = len(extra_inp_dims)
unsqueezed_shape = (*output_dims, *(1,) * n_extra_dims, n_features)
expanded_shape = (*output_dims, *extra_inp_dims, n_features)
expanded_unqiue_attr = unique_attr.reshape(unsqueezed_shape).expand(
expanded_shape
)
else:
expanded_unqiue_attr = unique_attr
# gather from (*output_dims, *inp.shape[1:-1], n_features)
inp_attr = torch.gather(expanded_unqiue_attr, -1, expanded_feature_indices)
attr.append(inp_attr)
return tuple(attr)
class DataLoaderAttribution(Attribution):
r"""
Decorate a perturbation-based attribution algorthm to make it work with dataloaders.
The decorated instance will calculate attribution in the
same way as configured in the original attribution instance, but it will provide a
new "attribute" function which accepts a pytorch "dataloader" instance as the input
instead of a single batched "tensor" and supports customizing a "reduce" function to
determine how the forward return of each iteration of the dataloader should be
aggregated to single metric tensor to attribute. This would
be specially useful to attribute against some corpus-wise metrics,
e.g., Precision & Recall.
"""
def __init__(self, attr_method: Attribution) -> None:
r"""
Args:
attr_method (Attribution): An instance of any attribution algorithm
of type `Attribution`. E.g. Integrated Gradients,
Conductance or Saliency.
"""
assert (
type(attr_method) in SUPPORTED_METHODS
), f"DataloaderAttribution does not support {type(attr_method)}"
super().__init__(attr_method.forward_func)
# shallow copy is enough to avoid modifying original instance
self.attr_method = copy(attr_method)
self.attr_method.forward_func = self._forward_with_dataloader
def _forward_with_dataloader(
self,
batched_perturbed_feature_indices: Tensor,
dataloader: torch.utils.data.DataLoader,
input_roles: Tuple[int],
baselines: Tuple[Union[int, float, Tensor], ...],
feature_mask: Tuple[Tensor, ...],
reduce: Callable,
to_metric: Optional[Callable],
show_progress: bool,
feature_idx_to_mask_idx: Dict[int, List[int]],
):
"""
Wrapper of the original given forward_func to be used in the attribution method
It iterates over the dataloader with the given forward_func
"""
# batched_perturbed_feature_indices in shape(n_perturb, n_features)
# n_perturb is not always the same as perturb_per_pass if not enough perturb
perturbation_mask_list: List[Tuple[Union[Tensor, None], ...]] = [
_create_perturbation_mask(
perturbed_feature_indices,
feature_mask,
feature_idx_to_mask_idx,
)
for perturbed_feature_indices in batched_perturbed_feature_indices
]
# each perturbation needs an accum state
accum_states = [None for _ in range(len(perturbation_mask_list))]
# tranverse the dataloader
for inputs in dataloader:
# for each batch read from the dataloader,
# apply every perturbation based on perturbations_per_pass
for i, perturbation_mask in enumerate(perturbation_mask_list):
perturbed_inputs = _perturb_inputs(
inputs, input_roles, baselines, perturbation_mask
)
# due to explicitly defined roles
# we can keep inputs in their original order
# regardless of if they need attr
# instead of using additional_forward_inputs
forward_inputs = tuple(
_
for _, role in zip(perturbed_inputs, input_roles)
if role != InputRole.no_forward
)
output = _run_forward(
self.forward_func,
forward_inputs,
)
accum_states[i] = reduce(accum_states[i], output, perturbed_inputs)
accum_results = [
to_metric(accum) if to_metric else accum for accum in accum_states
]
assert all(type(r) is Tensor for r in accum_results), (
"Accumulated metrics for attribution must be a Tensor,"
f"received: {next(r for r in accum_results if type(r) is not Tensor)}"
)
# shape(n_perturb * output_dims[0], *output_dims[1:])
# the underneath attr method needs to support forward_func output's
# 1st dim to grow with perturb_per_eval
batched_accum = torch.stack(accum_results, dim=0)
return batched_accum
def attribute(
self,
dataloader: torch.utils.data.DataLoader,
input_roles: Optional[Tuple[int, ...]] = None,
baselines: BaselineType = None,
feature_mask: Union[None, Tensor, Tuple[Tensor, ...]] = None,
reduce: Optional[Callable] = None,
to_metric: Optional[Callable] = None,
perturbations_per_pass: int = 1,
show_progress: bool = False,
return_input_shape: bool = True,
) -> Union[Tensor, Tuple[Tensor, ...]]:
r"""
Args:
dataloader (torch.Dataloader): the dataloader to attribute, which should
return a tuple of consistant size for every iteration
input_roles (tuple[int, ...], optional): a tuple of integers to define the
role of each element returned from the dataloader. It should
have the same size as the return of the dataloader.
The available roles are:
0: the element is passed to forward_func and needs attribution.
It must be a tensor.
1: the element is excluded for forward_func. A typical example
is the label.
2: the element is passed to forward_func but does not need
attribution. Like additional_forward_args
baselines (Union[Tensor, tuple[Tensor, ...]], optional): same as the
baseline in attribute. The same baseline will be
applied to the entire dataloader. The first dimension is
assumed to be batch size and it must be 1. Baselines should only
be specififed for the dataloader's returns that need
attribution (role = 0)
feature_mask (Union[Tensor, tuple[Tensor, ...]], optional): same as the
feature_mask in attribute. The same feature_mask will be
applied to the entire dataloader. The first dimension is
assumed to be batch size and it must be 1. Mask should only
be specififed for the dataloader's returns that need
attribution (role = 0)
reduce (Callable, optional): a function to accumulate the forward output of
each iteration of the dataloader. The function signature is:
``reduce(accum, current_output, current_inputs) -> accum``,
where:
accum (Any): accumulated states, can be any type
current_output (Tensor): current output tensor from forward_func
current_inputs (tuple[Any,...]): current inputs from dataloader
to_metric (Callable, optional): an optional function to further convert
accumulated results through "reduce" after tranversing the whole
dataloader to a single tensor of metrics to calculate
attribution against. The function signature is:
``to_metric(accum) -> metric``, where:
accum (Any): accumulated state from reduce function
metric (Tensor): final result to be attributed, must be a Tensor
If None, will directly attribute w.r.t the reduced ``accum``
perturbations_per_pass (int, optional) the number perturbations to execute
concurrently in each traverse of the dataloader. The number of
traverses needed is
ceil(n_perturbations / perturbations_per_pass).
This arguement offers control of the trade-off between memory
and efficiency. If the dataloader involves slow operations like
remote request or file I/O, multiple traversals can be
inefficient. On the other hand, each perturbation needs to
store its accumulated outputs of the reduce
function until the end of the data traverse.
return_input_shape (bool, optional): if True, returns the attribution
following the input shapes given by the dataloader.
Otherwise, returns a single tensor for the attributions of
all the features, where the last dimension
is the number of features.
Returns:
**attributions** :
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attribution with respect to each input feature.
if return_input_shape is True, attributions will be
the same size as the given dataloader's returns that need
attribution (role = 0), with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
If return_input_shape is False, a single tensor is returned
where each index of the last dimension represents a feature
"""
inputs = next(iter(dataloader))
is_inputs_tuple = True
if type(inputs) is list:
# support list as it is a common return type for dataloader in torch
inputs = tuple(inputs)
elif type(inputs) is not tuple:
is_inputs_tuple = False
inputs = _format_tensor_into_tuples(inputs)
if input_roles:
assert len(input_roles) == len(inputs), (
"input_roles must have the same size as the return of the dataloader,",
f"length of input_roles is {len(input_roles)} ",
f"whereas the length of dataloader return is {len(inputs)}",
)
assert any(role == InputRole.need_attr for role in input_roles), (
"input_roles must contain at least one element need attribution"
f"({InputRole.need_attr}), received input_roles: {input_roles}"
)
else:
# by default, assume every element in the dataloader needs attribution
input_roles = tuple(InputRole.need_attr for _ in inputs)
attr_inputs = tuple(
inp for role, inp in zip(input_roles, inputs) if role == InputRole.need_attr
)
baselines = _format_baseline(baselines, attr_inputs)
assert len(attr_inputs) == len(baselines), (
"Baselines must have the same size as the return of the dataloader ",
"that need attribution",
f"length of baseline is {len(baselines)} ",
f'whereas the length of dataloader return with role "0" is {len(inputs)}',
)
for i, baseline in enumerate(baselines):
if isinstance(baseline, Tensor):
assert baseline.size(0) == 1, (
"If the baseline is a tensor, "
"its 1st dim of baseline must be 1 so it can be broadacasted to "
"any batch of the dataloader:"
f"baselines[{i}].shape = {baseline.shape}"
)
feature_mask = _format_feature_mask(feature_mask, attr_inputs)
assert len(attr_inputs) == len(feature_mask), (
"Feature mask must have the same size as the return of the dataloader ",
"that need attribution",
f"length of feature_mask is {len(feature_mask)} ",
f'whereas the length of dataloader return with role "0" is {len(inputs)}',
)
for i, each_mask in enumerate(feature_mask):
assert each_mask.size(0) == 1, (
"The 1st dim of feature_mask must be 1 so it can be broadcasted to "
"any batch of the dataloader:"
f"feature_mask[{i}].shape = {each_mask.shape}"
)
# map to retrieve masks contain a given feature index
feature_idx_to_mask_idx = defaultdict(list)
for i, mask in enumerate(feature_mask):
unqiue_feature_indices = torch.unique(mask).tolist()
for feature_idx in unqiue_feature_indices:
feature_idx_to_mask_idx[feature_idx].append(i)
max_feature_idx = _get_max_feature_index(feature_mask)
n_features = max_feature_idx + 1
if reduce is None:
reduce = _concat_tensors
# onehot tensor for feature indices
feature_indices = torch.ones((1, n_features), device=attr_inputs[0].device)
# unique_attr in shape(*output_dims, n_features)
unique_attr = self.attr_method.attribute(
feature_indices,
perturbations_per_eval=perturbations_per_pass,
additional_forward_args=(
dataloader,
input_roles,
baselines,
feature_mask,
reduce,
to_metric,
show_progress,
feature_idx_to_mask_idx,
),
)
if not return_input_shape:
return unique_attr
else:
attr = _convert_output_shape(
unique_attr,
attr_inputs,
feature_mask,
)
return _format_output(is_inputs_tuple, attr)
|
#!/usr/bin/env python3
import warnings
from typing import Any, List, Tuple, Union
import torch
import torch.nn.functional as F
from captum._utils.common import (
_format_output,
_format_tensor_into_tuples,
_is_tuple,
_register_backward_hook,
)
from captum._utils.gradient import (
apply_gradient_requirements,
undo_gradient_requirements,
)
from captum._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._utils.attribution import GradientAttribution
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
from torch.utils.hooks import RemovableHandle
class ModifiedReluGradientAttribution(GradientAttribution):
def __init__(self, model: Module, use_relu_grad_output: bool = False) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
"""
GradientAttribution.__init__(self, model)
self.model = model
self.backward_hooks: List[RemovableHandle] = []
self.use_relu_grad_output = use_relu_grad_output
assert isinstance(self.model, torch.nn.Module), (
"Given model must be an instance of torch.nn.Module to properly hook"
" ReLU layers."
)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Computes attribution by overriding relu gradients. Based on constructor
flag use_relu_grad_output, performs either GuidedBackpropagation if False
and Deconvolution if True. This class is the parent class of both these
methods, more information on usage can be found in the docstrings for each
implementing class.
"""
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = _is_tuple(inputs)
inputs = _format_tensor_into_tuples(inputs)
gradient_mask = apply_gradient_requirements(inputs)
# set hooks for overriding ReLU gradients
warnings.warn(
"Setting backward hooks on ReLU activations."
"The hooks will be removed after the attribution is finished"
)
try:
self.model.apply(self._register_hooks)
gradients = self.gradient_func(
self.forward_func, inputs, target, additional_forward_args
)
finally:
self._remove_hooks()
undo_gradient_requirements(inputs, gradient_mask)
return _format_output(is_inputs_tuple, gradients)
def _register_hooks(self, module: Module):
if isinstance(module, torch.nn.ReLU):
hooks = _register_backward_hook(module, self._backward_hook, self)
self.backward_hooks.extend(hooks)
def _backward_hook(
self,
module: Module,
grad_input: Union[Tensor, Tuple[Tensor, ...]],
grad_output: Union[Tensor, Tuple[Tensor, ...]],
):
to_override_grads = grad_output if self.use_relu_grad_output else grad_input
if isinstance(to_override_grads, tuple):
return tuple(
F.relu(to_override_grad) for to_override_grad in to_override_grads
)
else:
return F.relu(to_override_grads)
def _remove_hooks(self):
for hook in self.backward_hooks:
hook.remove()
class GuidedBackprop(ModifiedReluGradientAttribution):
r"""
Computes attribution using guided backpropagation. Guided backpropagation
computes the gradient of the target output with respect to the input,
but gradients of ReLU functions are overridden so that only
non-negative gradients are backpropagated.
More details regarding the guided backpropagation algorithm can be found
in the original paper here:
https://arxiv.org/abs/1412.6806
Warning: Ensure that all ReLU operations in the forward function of the
given model are performed using a module (nn.module.ReLU).
If nn.functional.ReLU is used, gradients are not overridden appropriately.
"""
def __init__(self, model: Module) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
"""
ModifiedReluGradientAttribution.__init__(
self, model, use_relu_grad_output=False
)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which
attributions are computed. If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The guided backprop gradients with respect to each
input feature. Attributions will always
be the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> gbp = GuidedBackprop(net)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes Guided Backprop attribution scores for class 3.
>>> attribution = gbp.attribute(input, target=3)
"""
return super().attribute.__wrapped__(
self, inputs, target, additional_forward_args
)
class Deconvolution(ModifiedReluGradientAttribution):
r"""
Computes attribution using deconvolution. Deconvolution
computes the gradient of the target output with respect to the input,
but gradients of ReLU functions are overridden so that the gradient
of the ReLU input is simply computed taking ReLU of the output gradient,
essentially only propagating non-negative gradients (without
dependence on the sign of the ReLU input).
More details regarding the deconvolution algorithm can be found
in these papers:
https://arxiv.org/abs/1311.2901
https://link.springer.com/chapter/10.1007/978-3-319-46466-4_8
Warning: Ensure that all ReLU operations in the forward function of the
given model are performed using a module (nn.module.ReLU).
If nn.functional.ReLU is used, gradients are not overridden appropriately.
"""
def __init__(self, model: Module) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
"""
ModifiedReluGradientAttribution.__init__(self, model, use_relu_grad_output=True)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which
attributions are computed. If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The deconvolution attributions with respect to each
input feature. Attributions will always
be the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> deconv = Deconvolution(net)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes Deconvolution attribution scores for class 3.
>>> attribution = deconv.attribute(input, target=3)
"""
return super().attribute.__wrapped__(
self, inputs, target, additional_forward_args
)
|
#!/usr/bin/env python3
import typing
from collections import defaultdict
from typing import Any, cast, List, Tuple, Union
import torch.nn as nn
from captum._utils.common import (
_format_output,
_format_tensor_into_tuples,
_is_tuple,
_register_backward_hook,
_run_forward,
)
from captum._utils.gradient import (
apply_gradient_requirements,
undo_gradient_requirements,
)
from captum._utils.typing import Literal, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._utils.attribution import GradientAttribution
from captum.attr._utils.common import _sum_rows
from captum.attr._utils.custom_modules import Addition_Module
from captum.attr._utils.lrp_rules import EpsilonRule, PropagationRule
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
from torch.utils.hooks import RemovableHandle
class LRP(GradientAttribution):
r"""
Layer-wise relevance propagation is based on a backward propagation
mechanism applied sequentially to all layers of the model. Here, the
model output score represents the initial relevance which is decomposed
into values for each neuron of the underlying layers. The decomposition
is defined by rules that are chosen for each layer, involving its weights
and activations. Details on the model can be found in the original paper
[https://doi.org/10.1371/journal.pone.0130140]. The implementation is
inspired by the tutorial of the same group
[https://doi.org/10.1016/j.dsp.2017.10.011] and the publication by
Ancona et al. [https://openreview.net/forum?id=Sy21R9JAW].
"""
def __init__(self, model: Module) -> None:
r"""
Args:
model (Module): The forward function of the model or any modification of
it. Custom rules for a given layer need to be defined as attribute
`module.rule` and need to be of type PropagationRule. If no rule is
specified for a layer, a pre-defined default rule for the module type
is used.
"""
GradientAttribution.__init__(self, model)
self.model = model
self._check_rules()
@property
def multiplies_by_inputs(self) -> bool:
return True
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: Literal[False] = False,
verbose: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
...
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
*,
return_convergence_delta: Literal[True],
verbose: bool = False,
) -> Tuple[TensorOrTupleOfTensorsGeneric, Tensor]:
...
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: bool = False,
verbose: bool = False,
) -> Union[
TensorOrTupleOfTensorsGeneric, Tuple[TensorOrTupleOfTensorsGeneric, Tensor]
]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which relevance is
propagated. If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (tuple, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
verbose (bool, optional): Indicates whether information on application
of rules is printed during propagation.
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**
or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The propagated relevance values with respect to each
input feature. The values are normalized by the output score
value (sum(relevance)=1). To obtain values comparable to other
methods or implementations these values need to be multiplied
by the output score. Attributions will always
be the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned. The sum of attributions
is one and not corresponding to the prediction score as in other
implementations.
- **delta** (*Tensor*, returned if return_convergence_delta=True):
Delta is calculated per example, meaning that the number of
elements in returned delta tensor is equal to the number of
of examples in the inputs.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities. It has one
>>> # Conv2D and a ReLU layer.
>>> net = ImageClassifier()
>>> lrp = LRP(net)
>>> input = torch.randn(3, 3, 32, 32)
>>> # Attribution size matches input size: 3x3x32x32
>>> attribution = lrp.attribute(input, target=5)
"""
self.verbose = verbose
self._original_state_dict = self.model.state_dict()
self.layers: List[Module] = []
self._get_layers(self.model)
self._check_and_attach_rules()
self.backward_handles: List[RemovableHandle] = []
self.forward_handles: List[RemovableHandle] = []
is_inputs_tuple = _is_tuple(inputs)
inputs = _format_tensor_into_tuples(inputs)
gradient_mask = apply_gradient_requirements(inputs)
try:
# 1. Forward pass: Change weights of layers according to selected rules.
output = self._compute_output_and_change_weights(
inputs, target, additional_forward_args
)
# 2. Forward pass + backward pass: Register hooks to configure relevance
# propagation and execute back-propagation.
self._register_forward_hooks()
normalized_relevances = self.gradient_func(
self._forward_fn_wrapper, inputs, target, additional_forward_args
)
relevances = tuple(
normalized_relevance
* output.reshape((-1,) + (1,) * (normalized_relevance.dim() - 1))
for normalized_relevance in normalized_relevances
)
finally:
self._restore_model()
undo_gradient_requirements(inputs, gradient_mask)
if return_convergence_delta:
return (
_format_output(is_inputs_tuple, relevances),
self.compute_convergence_delta(relevances, output),
)
else:
return _format_output(is_inputs_tuple, relevances) # type: ignore
def has_convergence_delta(self) -> bool:
return True
def compute_convergence_delta(
self, attributions: Union[Tensor, Tuple[Tensor, ...]], output: Tensor
) -> Tensor:
"""
Here, we use the completeness property of LRP: The relevance is conserved
during the propagation through the models' layers. Therefore, the difference
between the sum of attribution (relevance) values and model output is taken as
the convergence delta. It should be zero for functional attribution. However,
when rules with an epsilon value are used for stability reasons, relevance is
absorbed during propagation and the convergence delta is non-zero.
Args:
attributions (Tensor or tuple[Tensor, ...]): Attribution scores that
are precomputed by an attribution algorithm.
Attributions can be provided in form of a single tensor
or a tuple of those. It is assumed that attribution
tensor's dimension 0 corresponds to the number of
examples, and if multiple input tensors are provided,
the examples must be aligned appropriately.
output (Tensor): The output value with respect to which
the attribution values are computed. This value corresponds to
the target score of a classification model. The given tensor
should only have a single element.
Returns:
*Tensor*:
- **delta** Difference of relevance in output layer and input layer.
"""
if isinstance(attributions, tuple):
for attr in attributions:
summed_attr = cast(
Tensor, sum(_sum_rows(attr) for attr in attributions)
)
else:
summed_attr = _sum_rows(attributions)
return output.flatten() - summed_attr.flatten()
def _get_layers(self, model: Module) -> None:
for layer in model.children():
if len(list(layer.children())) == 0:
self.layers.append(layer)
else:
self._get_layers(layer)
def _check_and_attach_rules(self) -> None:
for layer in self.layers:
if hasattr(layer, "rule"):
layer.activations = {} # type: ignore
layer.rule.relevance_input = defaultdict(list) # type: ignore
layer.rule.relevance_output = {} # type: ignore
pass
elif type(layer) in SUPPORTED_LAYERS_WITH_RULES.keys():
layer.activations = {} # type: ignore
layer.rule = SUPPORTED_LAYERS_WITH_RULES[type(layer)]() # type: ignore
layer.rule.relevance_input = defaultdict(list) # type: ignore
layer.rule.relevance_output = {} # type: ignore
elif type(layer) in SUPPORTED_NON_LINEAR_LAYERS:
layer.rule = None # type: ignore
else:
raise TypeError(
(
f"Module of type {type(layer)} has no rule defined and no"
"default rule exists for this module type. Please, set a rule"
"explicitly for this module and assure that it is appropriate"
"for this type of layer."
)
)
def _check_rules(self) -> None:
for module in self.model.modules():
if hasattr(module, "rule"):
if (
not isinstance(module.rule, PropagationRule)
and module.rule is not None
):
raise TypeError(
(
f"Please select propagation rules inherited from class "
f"PropagationRule for module: {module}"
)
)
def _register_forward_hooks(self) -> None:
for layer in self.layers:
if type(layer) in SUPPORTED_NON_LINEAR_LAYERS:
backward_handles = _register_backward_hook(
layer, PropagationRule.backward_hook_activation, self
)
self.backward_handles.extend(backward_handles)
else:
forward_handle = layer.register_forward_hook(
layer.rule.forward_hook # type: ignore
)
self.forward_handles.append(forward_handle)
if self.verbose:
print(f"Applied {layer.rule} on layer {layer}")
def _register_weight_hooks(self) -> None:
for layer in self.layers:
if layer.rule is not None:
forward_handle = layer.register_forward_hook(
layer.rule.forward_hook_weights # type: ignore
)
self.forward_handles.append(forward_handle)
def _register_pre_hooks(self) -> None:
for layer in self.layers:
if layer.rule is not None:
forward_handle = layer.register_forward_pre_hook(
layer.rule.forward_pre_hook_activations # type: ignore
)
self.forward_handles.append(forward_handle)
def _compute_output_and_change_weights(
self,
inputs: Tuple[Tensor, ...],
target: TargetType,
additional_forward_args: Any,
) -> Tensor:
try:
self._register_weight_hooks()
output = _run_forward(self.model, inputs, target, additional_forward_args)
finally:
self._remove_forward_hooks()
# Register pre_hooks that pass the initial activations from before weight
# adjustments as inputs to the layers with adjusted weights. This procedure
# is important for graph generation in the 2nd forward pass.
self._register_pre_hooks()
return output
def _remove_forward_hooks(self) -> None:
for forward_handle in self.forward_handles:
forward_handle.remove()
def _remove_backward_hooks(self) -> None:
for backward_handle in self.backward_handles:
backward_handle.remove()
for layer in self.layers:
if hasattr(layer.rule, "_handle_input_hooks"):
for handle in layer.rule._handle_input_hooks: # type: ignore
handle.remove()
if hasattr(layer.rule, "_handle_output_hook"):
layer.rule._handle_output_hook.remove() # type: ignore
def _remove_rules(self) -> None:
for layer in self.layers:
if hasattr(layer, "rule"):
del layer.rule
def _clear_properties(self) -> None:
for layer in self.layers:
if hasattr(layer, "activation"):
del layer.activation
def _restore_state(self) -> None:
self.model.load_state_dict(self._original_state_dict) # type: ignore
def _restore_model(self) -> None:
self._restore_state()
self._remove_backward_hooks()
self._remove_forward_hooks()
self._remove_rules()
self._clear_properties()
def _forward_fn_wrapper(self, *inputs: Tensor) -> Tensor:
"""
Wraps a forward function with addition of zero as a workaround to
https://github.com/pytorch/pytorch/issues/35802 discussed in
https://github.com/pytorch/captum/issues/143#issuecomment-611750044
#TODO: Remove when bugs are fixed
"""
adjusted_inputs = tuple(
input + 0 if input is not None else input for input in inputs
)
return self.model(*adjusted_inputs)
SUPPORTED_LAYERS_WITH_RULES = {
nn.MaxPool1d: EpsilonRule,
nn.MaxPool2d: EpsilonRule,
nn.MaxPool3d: EpsilonRule,
nn.Conv2d: EpsilonRule,
nn.AvgPool2d: EpsilonRule,
nn.AdaptiveAvgPool2d: EpsilonRule,
nn.Linear: EpsilonRule,
nn.BatchNorm2d: EpsilonRule,
Addition_Module: EpsilonRule,
}
SUPPORTED_NON_LINEAR_LAYERS = [nn.ReLU, nn.Dropout, nn.Tanh]
|
#!/usr/bin/env python3
import warnings
from typing import Any, List, Union
import torch
from captum._utils.common import _format_output, _format_tensor_into_tuples, _is_tuple
from captum._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.guided_backprop_deconvnet import GuidedBackprop
from captum.attr._core.layer.grad_cam import LayerGradCam
from captum.attr._utils.attribution import GradientAttribution, LayerAttribution
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
class GuidedGradCam(GradientAttribution):
r"""
Computes element-wise product of guided backpropagation attributions
with upsampled (non-negative) GradCAM attributions.
GradCAM attributions are computed with respect to the layer
provided in the constructor, and attributions
are upsampled to match the input size. GradCAM is designed for
convolutional neural networks, and is usually applied to the last
convolutional layer.
Note that if multiple input tensors are provided, attributions for
each input tensor are computed by upsampling the GradCAM
attributions to match that input's dimensions. If interpolation is
not possible for the input tensor dimensions and interpolation mode,
then an empty tensor is returned in the attributions for the
corresponding position of that input tensor. This can occur if the
input tensor does not have the same number of dimensions as the chosen
layer's output or is not either 3D, 4D or 5D.
Note that attributions are only meaningful for input tensors
which are spatially alligned with the chosen layer, e.g. an input
image tensor for a convolutional layer.
More details regarding GuidedGradCAM can be found in the original
GradCAM paper here:
https://arxiv.org/abs/1610.02391
Warning: Ensure that all ReLU operations in the forward function of the
given model are performed using a module (nn.module.ReLU).
If nn.functional.ReLU is used, gradients are not overridden appropriately.
"""
def __init__(
self, model: Module, layer: Module, device_ids: Union[None, List[int]] = None
) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
layer (torch.nn.Module): Layer for which GradCAM attributions are computed.
Currently, only layers with a single tensor output are
supported.
device_ids (list[int]): Device ID list, necessary only if model
is a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If model is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
GradientAttribution.__init__(self, model)
self.grad_cam = LayerGradCam(model, layer, device_ids)
self.guided_backprop = GuidedBackprop(model)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
interpolate_mode: str = "nearest",
attribute_to_layer_input: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which attributions
are computed. If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to model in order following the
arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
interpolate_mode (str, optional): Method for interpolation, which
must be a valid input interpolation mode for
torch.nn.functional. These methods are
"nearest", "area", "linear" (3D-only), "bilinear"
(4D-only), "bicubic" (4D-only), "trilinear" (5D-only)
based on the number of dimensions of the chosen layer
output (which must also match the number of
dimensions for the input tensor). Note that
the original GradCAM paper uses "bilinear"
interpolation, but we default to "nearest" for
applicability to any of 3D, 4D or 5D tensors.
Default: "nearest"
attribute_to_layer_input (bool, optional): Indicates whether to
compute the attribution with respect to the layer input
or output in `LayerGradCam`.
If `attribute_to_layer_input` is set to True
then the attributions will be computed with respect to
layer inputs, otherwise it will be computed with respect
to layer outputs.
Note that currently it is assumed that either the input
or the output of internal layer, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
*Tensor* of **attributions**:
- **attributions** (*Tensor*):
Element-wise product of (upsampled) GradCAM
and Guided Backprop attributions.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Attributions will be the same size as the provided inputs,
with each value providing the attribution of the
corresponding input index.
If the GradCAM attributions cannot be upsampled to the shape
of a given input tensor, None is returned in the corresponding
index position.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv4, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx50x8x8.
>>> # It is the last convolution layer, which is the recommended
>>> # use case for GuidedGradCAM.
>>> net = ImageClassifier()
>>> guided_gc = GuidedGradCam(net, net.conv4)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes guided GradCAM attributions for class 3.
>>> # attribution size matches input size, Nx3x32x32
>>> attribution = guided_gc.attribute(input, 3)
"""
is_inputs_tuple = _is_tuple(inputs)
inputs = _format_tensor_into_tuples(inputs)
grad_cam_attr = self.grad_cam.attribute.__wrapped__(
self.grad_cam, # self
inputs=inputs,
target=target,
additional_forward_args=additional_forward_args,
attribute_to_layer_input=attribute_to_layer_input,
relu_attributions=True,
)
if isinstance(grad_cam_attr, tuple):
assert len(grad_cam_attr) == 1, (
"GuidedGradCAM attributions for layer with multiple inputs / "
"outputs is not supported."
)
grad_cam_attr = grad_cam_attr[0]
guided_backprop_attr = self.guided_backprop.attribute.__wrapped__(
self.guided_backprop, # self
inputs=inputs,
target=target,
additional_forward_args=additional_forward_args,
)
output_attr: List[Tensor] = []
for i in range(len(inputs)):
try:
output_attr.append(
guided_backprop_attr[i]
* LayerAttribution.interpolate(
grad_cam_attr,
inputs[i].shape[2:],
interpolate_mode=interpolate_mode,
)
)
except Exception:
warnings.warn(
"Couldn't appropriately interpolate GradCAM attributions for some "
"input tensors, returning empty tensor for corresponding "
"attributions."
)
output_attr.append(torch.empty(0))
return _format_output(is_inputs_tuple, tuple(output_attr))
|
#!/usr/bin/env python3
import itertools
import math
import warnings
from typing import Any, Callable, Iterable, Sequence, Tuple, Union
import torch
from captum._utils.common import (
_expand_additional_forward_args,
_expand_target,
_format_additional_forward_args,
_format_feature_mask,
_format_output,
_format_tensor_into_tuples,
_get_max_feature_index,
_is_tuple,
_run_forward,
)
from captum._utils.progress import progress
from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._utils.attribution import PerturbationAttribution
from captum.attr._utils.common import (
_find_output_mode_and_verify,
_format_input_baseline,
_tensorize_baseline,
)
from captum.log import log_usage
from torch import Tensor
def _all_perm_generator(num_features: int, num_samples: int) -> Iterable[Sequence[int]]:
for perm in itertools.permutations(range(num_features)):
yield perm
def _perm_generator(num_features: int, num_samples: int) -> Iterable[Sequence[int]]:
for _ in range(num_samples):
yield torch.randperm(num_features).tolist()
class ShapleyValueSampling(PerturbationAttribution):
"""
A perturbation based approach to compute attribution, based on the concept
of Shapley Values from cooperative game theory. This method involves taking
a random permutation of the input features and adding them one-by-one to the
given baseline. The output difference after adding each feature corresponds
to its attribution, and these difference are averaged when repeating this
process n_samples times, each time choosing a new random permutation of
the input features.
By default, each scalar value within
the input tensors are taken as a feature and added independently. Passing
a feature mask, allows grouping features to be added together. This can
be used in cases such as images, where an entire segment or region
can be grouped together, measuring the importance of the segment
(feature group). Each input scalar in the group will be given the same
attribution value equal to the change in output as a result of adding back
the entire feature group.
More details regarding Shapley Value sampling can be found in these papers:
https://www.sciencedirect.com/science/article/pii/S0305054808000804
https://pdfs.semanticscholar.org/7715/bb1070691455d1fcfc6346ff458dbca77b2c.pdf
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or
any modification of it. The forward function can either
return a scalar per example, or a single scalar for the
full batch. If a single scalar is returned for the batch,
`perturbations_per_eval` must be 1, and the returned
attributions will have first dimension 1, corresponding to
feature importance across all examples in the batch.
"""
PerturbationAttribution.__init__(self, forward_func)
self.permutation_generator = _perm_generator
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
n_samples: int = 25,
perturbations_per_eval: int = 1,
show_progress: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
NOTE: The feature_mask argument differs from other perturbation based
methods, since feature indices can overlap across tensors. See the
description of the feature_mask argument below for more details.
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which Shapley value
sampling attributions are computed. If forward_func takes
a single tensor as input, a single input tensor should
be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define reference value which replaces each
feature when ablated.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which difference is computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. For all other types,
the given argument is used for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
feature_mask (Tensor or tuple[Tensor, ...], optional):
feature_mask defines a mask for the input, grouping
features which should be added together. feature_mask
should contain the same number of tensors as inputs.
Each tensor should
be the same size as the corresponding input or
broadcastable to match the input tensor. Values across
all tensors should be integers in the range 0 to
num_features - 1, and indices corresponding to the same
feature should have the same value.
Note that features are grouped across tensors
(unlike feature ablation and occlusion), so
if the same index is used in different tensors, those
features are still grouped and added simultaneously.
If the forward function returns a single scalar per batch,
we enforce that the first dimension of each mask must be 1,
since attributions are returned batch-wise rather than per
example, so the attributions must correspond to the
same features (indices) in each input example.
If None, then a feature mask is constructed which assigns
each scalar within a tensor as a separate feature
Default: None
n_samples (int, optional): The number of feature permutations
tested.
Default: `25` if `n_samples` is not provided.
perturbations_per_eval (int, optional): Allows multiple ablations
to be processed simultaneously in one call to forward_fn.
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
If the forward function returns a single scalar per batch,
perturbations_per_eval must be set to 1.
Default: 1
show_progress (bool, optional): Displays the progress of computation.
It will try to use tqdm if available for advanced features
(e.g. time estimation). Otherwise, it will fallback to
a simple output of progress.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The attributions with respect to each input feature.
If the forward function returns
a scalar value per example, attributions will be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If the forward function returns a scalar per batch, then
attribution tensor(s) will have first dimension 1 and
the remaining dimensions will match the input.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # SimpleClassifier takes a single input tensor of size Nx4x4,
>>> # and returns an Nx3 tensor of class probabilities.
>>> net = SimpleClassifier()
>>> # Generating random input with size 2 x 4 x 4
>>> input = torch.randn(2, 4, 4)
>>> # Defining ShapleyValueSampling interpreter
>>> svs = ShapleyValueSampling(net)
>>> # Computes attribution, taking random orderings
>>> # of the 16 features and computing the output change when adding
>>> # each feature. We average over 200 trials (random permutations).
>>> attr = svs.attribute(input, target=1, n_samples=200)
>>> # Alternatively, we may want to add features in groups, e.g.
>>> # grouping each 2x2 square of the inputs and adding them together.
>>> # This can be done by creating a feature mask as follows, which
>>> # defines the feature groups, e.g.:
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # With this mask, all inputs with the same value are added
>>> # together, and the attribution for each input in the same
>>> # group (0, 1, 2, and 3) per example are the same.
>>> # The attributions can be calculated as follows:
>>> # feature mask has dimensions 1 x 4 x 4
>>> feature_mask = torch.tensor([[[0,0,1,1],[0,0,1,1],
>>> [2,2,3,3],[2,2,3,3]]])
>>> attr = svs.attribute(input, target=1, feature_mask=feature_mask)
"""
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = _is_tuple(inputs)
inputs, baselines = _format_input_baseline(inputs, baselines)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
feature_mask = _format_feature_mask(feature_mask, inputs)
assert (
isinstance(perturbations_per_eval, int) and perturbations_per_eval >= 1
), "Ablations per evaluation must be at least 1."
with torch.no_grad():
baselines = _tensorize_baseline(inputs, baselines)
num_examples = inputs[0].shape[0]
total_features = _get_max_feature_index(feature_mask) + 1
if show_progress:
attr_progress = progress(
desc=f"{self.get_name()} attribution",
total=self._get_n_evaluations(
total_features, n_samples, perturbations_per_eval
)
+ 1, # add 1 for the initial eval
)
attr_progress.update(0)
initial_eval = _run_forward(
self.forward_func, baselines, target, additional_forward_args
)
if show_progress:
attr_progress.update()
agg_output_mode = _find_output_mode_and_verify(
initial_eval, num_examples, perturbations_per_eval, feature_mask
)
# Initialize attribution totals and counts
total_attrib = [
torch.zeros_like(
input[0:1] if agg_output_mode else input, dtype=torch.float
)
for input in inputs
]
iter_count = 0
# Iterate for number of samples, generate a permutation of the features
# and evalute the incremental increase for each feature.
for feature_permutation in self.permutation_generator(
total_features, n_samples
):
iter_count += 1
prev_results = initial_eval
for (
current_inputs,
current_add_args,
current_target,
current_masks,
) in self._perturbation_generator(
inputs,
additional_forward_args,
target,
baselines,
feature_mask,
feature_permutation,
perturbations_per_eval,
):
if sum(torch.sum(mask).item() for mask in current_masks) == 0:
warnings.warn(
"Feature mask is missing some integers between 0 and "
"num_features, for optimal performance, make sure each"
" consecutive integer corresponds to a feature."
)
# modified_eval dimensions: 1D tensor with length
# equal to #num_examples * #features in batch
modified_eval = _run_forward(
self.forward_func,
current_inputs,
current_target,
current_add_args,
)
if show_progress:
attr_progress.update()
if agg_output_mode:
eval_diff = modified_eval - prev_results
prev_results = modified_eval
else:
all_eval = torch.cat((prev_results, modified_eval), dim=0)
eval_diff = all_eval[num_examples:] - all_eval[:-num_examples]
prev_results = all_eval[-num_examples:]
for j in range(len(total_attrib)):
current_eval_diff = eval_diff
if not agg_output_mode:
# current_eval_diff dimensions:
# (#features in batch, #num_examples, 1,.. 1)
# (contains 1 more dimension than inputs). This adds extra
# dimensions of 1 to make the tensor broadcastable with the
# inputs tensor.
current_eval_diff = current_eval_diff.reshape(
(-1, num_examples) + (len(inputs[j].shape) - 1) * (1,)
)
total_attrib[j] += (
current_eval_diff * current_masks[j].float()
).sum(dim=0)
if show_progress:
attr_progress.close()
# Divide total attributions by number of random permutations and return
# formatted attributions.
attrib = tuple(
tensor_attrib_total / iter_count for tensor_attrib_total in total_attrib
)
formatted_attr = _format_output(is_inputs_tuple, attrib)
return formatted_attr
def _perturbation_generator(
self,
inputs: Tuple[Tensor, ...],
additional_args: Any,
target: TargetType,
baselines: Tuple[Tensor, ...],
input_masks: TensorOrTupleOfTensorsGeneric,
feature_permutation: Sequence[int],
perturbations_per_eval: int,
) -> Iterable[Tuple[Tuple[Tensor, ...], Any, TargetType, Tuple[Tensor, ...]]]:
"""
This method is a generator which yields each perturbation to be evaluated
including inputs, additional_forward_args, targets, and mask.
"""
# current_tensors starts at baselines and includes each additional feature as
# added based on the permutation order.
current_tensors = baselines
current_tensors_list = []
current_mask_list = []
# Compute repeated additional args and targets
additional_args_repeated = (
_expand_additional_forward_args(additional_args, perturbations_per_eval)
if additional_args is not None
else None
)
target_repeated = _expand_target(target, perturbations_per_eval)
for i in range(len(feature_permutation)):
current_tensors = tuple(
current * (~(mask == feature_permutation[i])).to(current.dtype)
+ input * (mask == feature_permutation[i]).to(input.dtype)
for input, current, mask in zip(inputs, current_tensors, input_masks)
)
current_tensors_list.append(current_tensors)
current_mask_list.append(
tuple(mask == feature_permutation[i] for mask in input_masks)
)
if len(current_tensors_list) == perturbations_per_eval:
combined_inputs = tuple(
torch.cat(aligned_tensors, dim=0)
for aligned_tensors in zip(*current_tensors_list)
)
combined_masks = tuple(
torch.stack(aligned_masks, dim=0)
for aligned_masks in zip(*current_mask_list)
)
yield (
combined_inputs,
additional_args_repeated,
target_repeated,
combined_masks,
)
current_tensors_list = []
current_mask_list = []
# Create batch with remaining evaluations, may not be a complete batch
# (= perturbations_per_eval)
if len(current_tensors_list) != 0:
additional_args_repeated = (
_expand_additional_forward_args(
additional_args, len(current_tensors_list)
)
if additional_args is not None
else None
)
target_repeated = _expand_target(target, len(current_tensors_list))
combined_inputs = tuple(
torch.cat(aligned_tensors, dim=0)
for aligned_tensors in zip(*current_tensors_list)
)
combined_masks = tuple(
torch.stack(aligned_masks, dim=0)
for aligned_masks in zip(*current_mask_list)
)
yield (
combined_inputs,
additional_args_repeated,
target_repeated,
combined_masks,
)
def _get_n_evaluations(self, total_features, n_samples, perturbations_per_eval):
"""return the total number of forward evaluations needed"""
return math.ceil(total_features / perturbations_per_eval) * n_samples
class ShapleyValues(ShapleyValueSampling):
"""
A perturbation based approach to compute attribution, based on the concept
of Shapley Values from cooperative game theory. This method involves taking
each permutation of the input features and adding them one-by-one to the
given baseline. The output difference after adding each feature corresponds
to its attribution, and these difference are averaged over all possible
random permutations of the input features.
By default, each scalar value within
the input tensors are taken as a feature and added independently. Passing
a feature mask, allows grouping features to be added together. This can
be used in cases such as images, where an entire segment or region
can be grouped together, measuring the importance of the segment
(feature group). Each input scalar in the group will be given the same
attribution value equal to the change in output as a result of adding back
the entire feature group.
More details regarding Shapley Values can be found in these papers:
https://apps.dtic.mil/dtic/tr/fulltext/u2/604084.pdf
https://www.sciencedirect.com/science/article/pii/S0305054808000804
https://pdfs.semanticscholar.org/7715/bb1070691455d1fcfc6346ff458dbca77b2c.pdf
NOTE: The method implemented here is very computationally intensive, and
should only be used with a very small number of features (e.g. < 7).
This implementation simply extends ShapleyValueSampling and
evaluates all permutations, leading to a total of n * n! evaluations for n
features. Shapley values can alternatively be computed with only 2^n
evaluations, and we plan to add this approach in the future.
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or
any modification of it. The forward function can either
return a scalar per example, or a single scalar for the
full batch. If a single scalar is returned for the batch,
`perturbations_per_eval` must be 1, and the returned
attributions will have first dimension 1, corresponding to
feature importance across all examples in the batch.
"""
ShapleyValueSampling.__init__(self, forward_func)
self.permutation_generator = _all_perm_generator
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
perturbations_per_eval: int = 1,
show_progress: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
NOTE: The feature_mask argument differs from other perturbation based
methods, since feature indices can overlap across tensors. See the
description of the feature_mask argument below for more details.
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which Shapley value
sampling attributions are computed. If forward_func takes
a single tensor as input, a single input tensor should
be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define reference value which replaces each
feature when ablated.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which difference is computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. For all other types,
the given argument is used for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
feature_mask (Tensor or tuple[Tensor, ...], optional):
feature_mask defines a mask for the input, grouping
features which should be added together. feature_mask
should contain the same number of tensors as inputs.
Each tensor should
be the same size as the corresponding input or
broadcastable to match the input tensor. Values across
all tensors should be integers in the range 0 to
num_features - 1, and indices corresponding to the same
feature should have the same value.
Note that features are grouped across tensors
(unlike feature ablation and occlusion), so
if the same index is used in different tensors, those
features are still grouped and added simultaneously.
If the forward function returns a single scalar per batch,
we enforce that the first dimension of each mask must be 1,
since attributions are returned batch-wise rather than per
example, so the attributions must correspond to the
same features (indices) in each input example.
If None, then a feature mask is constructed which assigns
each scalar within a tensor as a separate feature
Default: None
perturbations_per_eval (int, optional): Allows multiple ablations
to be processed simultaneously in one call to forward_fn.
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
If the forward function returns a single scalar per batch,
perturbations_per_eval must be set to 1.
Default: 1
show_progress (bool, optional): Displays the progress of computation.
It will try to use tqdm if available for advanced features
(e.g. time estimation). Otherwise, it will fallback to
a simple output of progress.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The attributions with respect to each input feature.
If the forward function returns
a scalar value per example, attributions will be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If the forward function returns a scalar per batch, then
attribution tensor(s) will have first dimension 1 and
the remaining dimensions will match the input.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # SimpleClassifier takes a single input tensor of size Nx4x4,
>>> # and returns an Nx3 tensor of class probabilities.
>>> net = SimpleClassifier()
>>> # Generating random input with size 2 x 4 x 4
>>> input = torch.randn(2, 4, 4)
>>> # We may want to add features in groups, e.g.
>>> # grouping each 2x2 square of the inputs and adding them together.
>>> # This can be done by creating a feature mask as follows, which
>>> # defines the feature groups, e.g.:
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # With this mask, all inputs with the same value are added
>>> # together, and the attribution for each input in the same
>>> # group (0, 1, 2, and 3) per example are the same.
>>> # The attributions can be calculated as follows:
>>> # feature mask has dimensions 1 x 4 x 4
>>> feature_mask = torch.tensor([[[0,0,1,1],[0,0,1,1],
>>> [2,2,3,3],[2,2,3,3]]])
>>> # With only 4 features, it is feasible to compute exact
>>> # Shapley Values. These can be computed as follows:
>>> sv = ShapleyValues(net)
>>> attr = sv.attribute(input, target=1, feature_mask=feature_mask)
"""
if feature_mask is None:
total_features = sum(
torch.numel(inp[0]) for inp in _format_tensor_into_tuples(inputs)
)
else:
total_features = (
int(max(torch.max(single_mask).item() for single_mask in feature_mask))
+ 1
)
if total_features >= 10:
warnings.warn(
"You are attempting to compute Shapley Values with at least 10 "
"features, which will likely be very computationally expensive."
"Consider using Shapley Value Sampling instead."
)
return super().attribute.__wrapped__(
self,
inputs=inputs,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
feature_mask=feature_mask,
perturbations_per_eval=perturbations_per_eval,
show_progress=show_progress,
)
def _get_n_evaluations(self, total_features, n_samples, perturbations_per_eval):
"""return the total number of forward evaluations needed"""
return math.ceil(total_features / perturbations_per_eval) * math.factorial(
total_features
)
|
#!/usr/bin/env python3
import typing
import warnings
from typing import Any, Callable, cast, List, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from captum._utils.common import (
_expand_additional_forward_args,
_expand_target,
_format_additional_forward_args,
_format_baseline,
_format_output,
_format_tensor_into_tuples,
_is_tuple,
_register_backward_hook,
_run_forward,
_select_targets,
ExpansionTypes,
)
from captum._utils.gradient import (
apply_gradient_requirements,
undo_gradient_requirements,
)
from captum._utils.typing import (
BaselineType,
Literal,
TargetType,
TensorOrTupleOfTensorsGeneric,
)
from captum.attr._utils.attribution import GradientAttribution
from captum.attr._utils.common import (
_call_custom_attribution_func,
_compute_conv_delta_and_format_attrs,
_format_callable_baseline,
_tensorize_baseline,
_validate_input,
)
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
from torch.utils.hooks import RemovableHandle
class DeepLift(GradientAttribution):
r"""
Implements DeepLIFT algorithm based on the following paper:
Learning Important Features Through Propagating Activation Differences,
Avanti Shrikumar, et. al.
https://arxiv.org/abs/1704.02685
and the gradient formulation proposed in:
Towards better understanding of gradient-based attribution methods for
deep neural networks, Marco Ancona, et.al.
https://openreview.net/pdf?id=Sy21R9JAW
This implementation supports only Rescale rule. RevealCancel rule will
be supported in later releases.
In addition to that, in order to keep the implementation cleaner, DeepLIFT
for internal neurons and layers extends current implementation and is
implemented separately in LayerDeepLift and NeuronDeepLift.
Although DeepLIFT's(Rescale Rule) attribution quality is comparable with
Integrated Gradients, it runs significantly faster than Integrated
Gradients and is preferred for large datasets.
Currently we only support a limited number of non-linear activations
but the plan is to expand the list in the future.
Note: As we know, currently we cannot access the building blocks,
of PyTorch's built-in LSTM, RNNs and GRUs such as Tanh and Sigmoid.
Nonetheless, it is possible to build custom LSTMs, RNNS and GRUs
with performance similar to built-in ones using TorchScript.
More details on how to build custom RNNs can be found here:
https://pytorch.org/blog/optimizing-cuda-rnn-with-torchscript/
"""
def __init__(
self,
model: Module,
multiply_by_inputs: bool = True,
eps: float = 1e-10,
) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in
then that type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of DeepLift, if `multiply_by_inputs`
is set to True, final sensitivity scores
are being multiplied by (inputs - baselines).
This flag applies only if `custom_attribution_func` is
set to None.
eps (float, optional): A value at which to consider output/input change
significant when computing the gradients for non-linear layers.
This is useful to adjust, depending on your model's bit depth,
to avoid numerical issues during the gradient computation.
Default: 1e-10
"""
GradientAttribution.__init__(self, model)
self.model = model
self.eps = eps
self.forward_handles: List[RemovableHandle] = []
self.backward_handles: List[RemovableHandle] = []
self._multiply_by_inputs = multiply_by_inputs
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: Literal[False] = False,
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> TensorOrTupleOfTensorsGeneric:
...
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
*,
return_convergence_delta: Literal[True],
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> Tuple[TensorOrTupleOfTensorsGeneric, Tensor]:
...
@log_usage()
def attribute( # type: ignore
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: bool = False,
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> Union[
TensorOrTupleOfTensorsGeneric, Tuple[TensorOrTupleOfTensorsGeneric, Tensor]
]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which
attributions are computed. If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define reference samples that are compared with
the inputs. In order to assign attribution scores DeepLift
computes the differences between the inputs/outputs and
corresponding references.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
custom_attribution_func (Callable, optional): A custom function for
computing final attribution scores. This function can take
at least one and at most three arguments with the
following signature:
- custom_attribution_func(multipliers)
- custom_attribution_func(multipliers, inputs)
- custom_attribution_func(multipliers, inputs, baselines)
In case this function is not provided, we use the default
logic defined as: multipliers * (inputs - baselines)
It is assumed that all input arguments, `multipliers`,
`inputs` and `baselines` are provided in tuples of same
length. `custom_attribution_func` returns a tuple of
attribution tensors that have the same length as the
`inputs`.
Default: None
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attribution score computed based on DeepLift rescale rule with respect
to each input feature. Attributions will always be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
- **delta** (*Tensor*, returned if return_convergence_delta=True):
This is computed using the property that
the total sum of model(inputs) - model(baselines)
must equal the total sum of the attributions computed
based on DeepLift's rescale rule.
Delta is calculated per example, meaning that the number of
elements in returned delta tensor is equal to the number of
examples in input.
Note that the logic described for deltas is guaranteed when the
default logic for attribution computations is used, meaning that the
`custom_attribution_func=None`, otherwise it is not guaranteed and
depends on the specifics of the `custom_attribution_func`.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> dl = DeepLift(net)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes deeplift attribution scores for class 3.
>>> attribution = dl.attribute(input, target=3)
"""
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = _is_tuple(inputs)
inputs = _format_tensor_into_tuples(inputs)
baselines = _format_baseline(baselines, inputs)
gradient_mask = apply_gradient_requirements(inputs)
_validate_input(inputs, baselines)
# set hooks for baselines
warnings.warn(
"""Setting forward, backward hooks and attributes on non-linear
activations. The hooks and attributes will be removed
after the attribution is finished"""
)
baselines = _tensorize_baseline(inputs, baselines)
main_model_hooks = []
try:
main_model_hooks = self._hook_main_model()
self.model.apply(self._register_hooks)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
expanded_target = _expand_target(
target, 2, expansion_type=ExpansionTypes.repeat
)
wrapped_forward_func = self._construct_forward_func(
self.model,
(inputs, baselines),
expanded_target,
additional_forward_args,
)
gradients = self.gradient_func(wrapped_forward_func, inputs)
if custom_attribution_func is None:
if self.multiplies_by_inputs:
attributions = tuple(
(input - baseline) * gradient
for input, baseline, gradient in zip(
inputs, baselines, gradients
)
)
else:
attributions = gradients
else:
attributions = _call_custom_attribution_func(
custom_attribution_func, gradients, inputs, baselines
)
finally:
# Even if any error is raised, remove all hooks before raising
self._remove_hooks(main_model_hooks)
undo_gradient_requirements(inputs, gradient_mask)
return _compute_conv_delta_and_format_attrs(
self,
return_convergence_delta,
attributions,
baselines,
inputs,
additional_forward_args,
target,
is_inputs_tuple,
)
def _construct_forward_func(
self,
forward_func: Callable,
inputs: Tuple,
target: TargetType = None,
additional_forward_args: Any = None,
) -> Callable:
def forward_fn():
model_out = _run_forward(
forward_func, inputs, None, additional_forward_args
)
return _select_targets(
torch.cat((model_out[:, 0], model_out[:, 1])), target
)
if hasattr(forward_func, "device_ids"):
forward_fn.device_ids = forward_func.device_ids # type: ignore
return forward_fn
def _is_non_linear(self, module: Module) -> bool:
return type(module) in SUPPORTED_NON_LINEAR.keys()
def _forward_pre_hook_ref(
self, module: Module, inputs: Union[Tensor, Tuple[Tensor, ...]]
) -> None:
inputs = _format_tensor_into_tuples(inputs)
module.input_ref = tuple( # type: ignore
input.clone().detach() for input in inputs
)
def _forward_pre_hook(
self, module: Module, inputs: Union[Tensor, Tuple[Tensor, ...]]
) -> None:
"""
For the modules that perform in-place operations such as ReLUs, we cannot
use inputs from forward hooks. This is because in that case inputs
and outputs are the same. We need access the inputs in pre-hooks and
set necessary hooks on inputs there.
"""
inputs = _format_tensor_into_tuples(inputs)
module.input = inputs[0].clone().detach()
def _forward_hook(
self,
module: Module,
inputs: Union[Tensor, Tuple[Tensor, ...]],
outputs: Union[Tensor, Tuple[Tensor, ...]],
) -> None:
r"""
we need forward hook to access and detach the inputs and
outputs of a neuron
"""
outputs = _format_tensor_into_tuples(outputs)
module.output = outputs[0].clone().detach()
def _backward_hook(
self,
module: Module,
grad_input: Tensor,
grad_output: Tensor,
) -> Tensor:
r"""
`grad_input` is the gradient of the neuron with respect to its input
`grad_output` is the gradient of the neuron with respect to its output
we can override `grad_input` according to chain rule with.
`grad_output` * delta_out / delta_in.
"""
# before accessing the attributes from the module we want
# to ensure that the properties exist, if not, then it is
# likely that the module is being reused.
attr_criteria = self.satisfies_attribute_criteria(module)
if not attr_criteria:
raise RuntimeError(
"A Module {} was detected that does not contain some of "
"the input/output attributes that are required for DeepLift "
"computations. This can occur, for example, if "
"your module is being used more than once in the network."
"Please, ensure that module is being used only once in the "
"network.".format(module)
)
multipliers = SUPPORTED_NON_LINEAR[type(module)](
module,
module.input,
module.output,
grad_input,
grad_output,
eps=self.eps,
)
# remove all the properies that we set for the inputs and output
del module.input
del module.output
return multipliers
def satisfies_attribute_criteria(self, module: Module) -> bool:
return hasattr(module, "input") and hasattr(module, "output")
def _can_register_hook(self, module: Module) -> bool:
# TODO find a better way of checking if a module is a container or not
module_fullname = str(type(module))
has_already_hooks = len(module._backward_hooks) > 0 # type: ignore
return not (
"nn.modules.container" in module_fullname
or has_already_hooks
or not self._is_non_linear(module)
)
def _register_hooks(
self, module: Module, attribute_to_layer_input: bool = True
) -> None:
if not self._can_register_hook(module) or (
not attribute_to_layer_input and module is self.layer # type: ignore
):
return
# adds forward hook to leaf nodes that are non-linear
forward_handle = module.register_forward_hook(self._forward_hook)
pre_forward_handle = module.register_forward_pre_hook(self._forward_pre_hook)
backward_handles = _register_backward_hook(module, self._backward_hook, self)
self.forward_handles.append(forward_handle)
self.forward_handles.append(pre_forward_handle)
self.backward_handles.extend(backward_handles)
def _remove_hooks(self, extra_hooks_to_remove: List[RemovableHandle]) -> None:
for handle in extra_hooks_to_remove:
handle.remove()
for forward_handle in self.forward_handles:
forward_handle.remove()
for backward_handle in self.backward_handles:
backward_handle.remove()
def _hook_main_model(self) -> List[RemovableHandle]:
def pre_hook(module: Module, baseline_inputs_add_args: Tuple) -> Tuple:
inputs = baseline_inputs_add_args[0]
baselines = baseline_inputs_add_args[1]
additional_args = None
if len(baseline_inputs_add_args) > 2:
additional_args = baseline_inputs_add_args[2:]
baseline_input_tsr = tuple(
torch.cat([input, baseline])
for input, baseline in zip(inputs, baselines)
)
if additional_args is not None:
expanded_additional_args = cast(
Tuple,
_expand_additional_forward_args(
additional_args, 2, ExpansionTypes.repeat
),
)
return (*baseline_input_tsr, *expanded_additional_args)
return baseline_input_tsr
def forward_hook(module: Module, inputs: Tuple, outputs: Tensor):
return torch.stack(torch.chunk(outputs, 2), dim=1)
if isinstance(
self.model, (nn.DataParallel, nn.parallel.DistributedDataParallel)
):
return [
self.model.module.register_forward_pre_hook(pre_hook), # type: ignore
self.model.module.register_forward_hook(forward_hook),
] # type: ignore
else:
return [
self.model.register_forward_pre_hook(pre_hook), # type: ignore
self.model.register_forward_hook(forward_hook),
] # type: ignore
def has_convergence_delta(self) -> bool:
return True
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
class DeepLiftShap(DeepLift):
r"""
Extends DeepLift algorithm and approximates SHAP values using Deeplift.
For each input sample it computes DeepLift attribution with respect to
each baseline and averages resulting attributions.
More details about the algorithm can be found here:
https://papers.nips.cc/paper/7062-a-unified-approach-to-interpreting-model-predictions.pdf
Note that the explanation model:
1. Assumes that input features are independent of one another
2. Is linear, meaning that the explanations are modeled through
the additive composition of feature effects.
Although, it assumes a linear model for each explanation, the overall
model across multiple explanations can be complex and non-linear.
"""
def __init__(self, model: Module, multiply_by_inputs: bool = True) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in
then that type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of DeepLiftShap, if `multiply_by_inputs`
is set to True, final sensitivity scores
are being multiplied by (inputs - baselines).
This flag applies only if `custom_attribution_func` is
set to None.
"""
DeepLift.__init__(self, model, multiply_by_inputs=multiply_by_inputs)
# There's a mismatch between the signatures of DeepLift.attribute and
# DeepLiftShap.attribute, so we ignore typing here
@typing.overload # type: ignore
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: Union[
TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric]
],
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: Literal[False] = False,
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> TensorOrTupleOfTensorsGeneric:
...
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: Union[
TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric]
],
target: TargetType = None,
additional_forward_args: Any = None,
*,
return_convergence_delta: Literal[True],
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> Tuple[TensorOrTupleOfTensorsGeneric, Tensor]:
...
@log_usage()
def attribute( # type: ignore
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: Union[
TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric]
],
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: bool = False,
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> Union[
TensorOrTupleOfTensorsGeneric, Tuple[TensorOrTupleOfTensorsGeneric, Tensor]
]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which
attributions are computed. If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
baselines (Tensor, tuple[Tensor, ...], or Callable):
Baselines define reference samples that are compared with
the inputs. In order to assign attribution scores DeepLift
computes the differences between the inputs/outputs and
corresponding references. Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
the first dimension equal to the number of examples
in the baselines' distribution. The remaining dimensions
must match with input tensor's dimension starting from
the second dimension.
- a tuple of tensors, if inputs is a tuple of tensors,
with the first dimension of any tensor inside the tuple
equal to the number of examples in the baseline's
distribution. The remaining dimensions must match
the dimensions of the corresponding input tensor
starting from the second dimension.
- callable function, optionally takes `inputs` as an
argument and either returns a single tensor
or a tuple of those.
It is recommended that the number of samples in the baselines'
tensors is larger than one.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
custom_attribution_func (Callable, optional): A custom function for
computing final attribution scores. This function can take
at least one and at most three arguments with the
following signature:
- custom_attribution_func(multipliers)
- custom_attribution_func(multipliers, inputs)
- custom_attribution_func(multipliers, inputs, baselines)
In case this function is not provided we use the default
logic defined as: multipliers * (inputs - baselines)
It is assumed that all input arguments, `multipliers`,
`inputs` and `baselines` are provided in tuples of same
length. `custom_attribution_func` returns a tuple of
attribution tensors that have the same length as the
`inputs`.
Default: None
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attribution score computed based on DeepLift rescale rule with
respect to each input feature. Attributions will always be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
- **delta** (*Tensor*, returned if return_convergence_delta=True):
This is computed using the property that the
total sum of model(inputs) - model(baselines)
must be very close to the total sum of attributions
computed based on approximated SHAP values using
Deeplift's rescale rule.
Delta is calculated for each example input and baseline pair,
meaning that the number of elements in returned delta tensor
is equal to the
`number of examples in input` * `number of examples
in baseline`. The deltas are ordered in the first place by
input example, followed by the baseline.
Note that the logic described for deltas is guaranteed
when the default logic for attribution computations is used,
meaning that the `custom_attribution_func=None`, otherwise
it is not guaranteed and depends on the specifics of the
`custom_attribution_func`.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> dl = DeepLiftShap(net)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes shap values using deeplift for class 3.
>>> attribution = dl.attribute(input, target=3)
"""
baselines = _format_callable_baseline(baselines, inputs)
assert isinstance(baselines[0], torch.Tensor) and baselines[0].shape[0] > 1, (
"Baselines distribution has to be provided in form of a torch.Tensor"
" with more than one example but found: {}."
" If baselines are provided in shape of scalars or with a single"
" baseline example, `DeepLift`"
" approach can be used instead.".format(baselines[0])
)
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = _is_tuple(inputs)
inputs = _format_tensor_into_tuples(inputs)
# batch sizes
inp_bsz = inputs[0].shape[0]
base_bsz = baselines[0].shape[0]
(
exp_inp,
exp_base,
exp_tgt,
exp_addit_args,
) = self._expand_inputs_baselines_targets(
baselines, inputs, target, additional_forward_args
)
attributions = super().attribute.__wrapped__( # type: ignore
self,
exp_inp,
exp_base,
target=exp_tgt,
additional_forward_args=exp_addit_args,
return_convergence_delta=cast(
Literal[True, False], return_convergence_delta
),
custom_attribution_func=custom_attribution_func,
)
if return_convergence_delta:
attributions, delta = cast(Tuple[Tuple[Tensor, ...], Tensor], attributions)
attributions = tuple(
self._compute_mean_across_baselines(
inp_bsz, base_bsz, cast(Tensor, attribution)
)
for attribution in attributions
)
if return_convergence_delta:
return _format_output(is_inputs_tuple, attributions), delta
else:
return _format_output(is_inputs_tuple, attributions)
def _expand_inputs_baselines_targets(
self,
baselines: Tuple[Tensor, ...],
inputs: Tuple[Tensor, ...],
target: TargetType,
additional_forward_args: Any,
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...], TargetType, Any]:
inp_bsz = inputs[0].shape[0]
base_bsz = baselines[0].shape[0]
expanded_inputs = tuple(
[
input.repeat_interleave(base_bsz, dim=0).requires_grad_()
for input in inputs
]
)
expanded_baselines = tuple(
[
baseline.repeat(
(inp_bsz,) + tuple([1] * (len(baseline.shape) - 1))
).requires_grad_()
for baseline in baselines
]
)
expanded_target = _expand_target(
target, base_bsz, expansion_type=ExpansionTypes.repeat_interleave
)
input_additional_args = (
_expand_additional_forward_args(
additional_forward_args,
base_bsz,
expansion_type=ExpansionTypes.repeat_interleave,
)
if additional_forward_args is not None
else None
)
return (
expanded_inputs,
expanded_baselines,
expanded_target,
input_additional_args,
)
def _compute_mean_across_baselines(
self, inp_bsz: int, base_bsz: int, attribution: Tensor
) -> Tensor:
# Average for multiple references
attr_shape: Tuple = (inp_bsz, base_bsz)
if len(attribution.shape) > 1:
attr_shape += attribution.shape[1:]
return torch.mean(attribution.view(attr_shape), dim=1, keepdim=False)
def nonlinear(
module: Module,
inputs: Tensor,
outputs: Tensor,
grad_input: Tensor,
grad_output: Tensor,
eps: float = 1e-10,
) -> Tensor:
r"""
grad_input: (dLoss / dprev_layer_out, dLoss / wij, dLoss / bij)
grad_output: (dLoss / dlayer_out)
https://github.com/pytorch/pytorch/issues/12331
"""
delta_in, delta_out = _compute_diffs(inputs, outputs)
new_grad_inp = torch.where(
abs(delta_in) < eps, grad_input, grad_output * delta_out / delta_in
)
return new_grad_inp
def softmax(
module: Module,
inputs: Tensor,
outputs: Tensor,
grad_input: Tensor,
grad_output: Tensor,
eps: float = 1e-10,
):
delta_in, delta_out = _compute_diffs(inputs, outputs)
grad_input_unnorm = torch.where(
abs(delta_in) < eps, grad_input, grad_output * delta_out / delta_in
)
# normalizing
n = grad_input.numel()
# updating only the first half
new_grad_inp = grad_input_unnorm - grad_input_unnorm.sum() * 1 / n
return new_grad_inp
def maxpool1d(
module: Module,
inputs: Tensor,
outputs: Tensor,
grad_input: Tensor,
grad_output: Tensor,
eps: float = 1e-10,
):
return maxpool(
module,
F.max_pool1d,
F.max_unpool1d,
inputs,
outputs,
grad_input,
grad_output,
eps=eps,
)
def maxpool2d(
module: Module,
inputs: Tensor,
outputs: Tensor,
grad_input: Tensor,
grad_output: Tensor,
eps: float = 1e-10,
):
return maxpool(
module,
F.max_pool2d,
F.max_unpool2d,
inputs,
outputs,
grad_input,
grad_output,
eps=eps,
)
def maxpool3d(
module: Module, inputs, outputs, grad_input, grad_output, eps: float = 1e-10
):
return maxpool(
module,
F.max_pool3d,
F.max_unpool3d,
inputs,
outputs,
grad_input,
grad_output,
eps=eps,
)
def maxpool(
module: Module,
pool_func: Callable,
unpool_func: Callable,
inputs,
outputs,
grad_input,
grad_output,
eps: float = 1e-10,
):
with torch.no_grad():
input, input_ref = inputs.chunk(2)
output, output_ref = outputs.chunk(2)
delta_in = input - input_ref
delta_in = torch.cat(2 * [delta_in])
# Extracts cross maximum between the outputs of maxpool for the
# actual inputs and its corresponding references. In case the delta outputs
# for the references are larger the method relies on the references and
# corresponding gradients to compute the multiplies and contributions.
delta_out_xmax = torch.max(output, output_ref)
delta_out = torch.cat([delta_out_xmax - output_ref, output - delta_out_xmax])
_, indices = pool_func(
module.input,
module.kernel_size,
module.stride,
module.padding,
module.dilation,
module.ceil_mode,
True,
)
grad_output_updated = grad_output
unpool_grad_out_delta, unpool_grad_out_ref_delta = torch.chunk(
unpool_func(
grad_output_updated * delta_out,
indices,
module.kernel_size,
module.stride,
module.padding,
list(cast(torch.Size, module.input.shape)),
),
2,
)
unpool_grad_out_delta = unpool_grad_out_delta + unpool_grad_out_ref_delta
unpool_grad_out_delta = torch.cat(2 * [unpool_grad_out_delta])
if grad_input.shape != inputs.shape:
raise AssertionError(
"A problem occurred during maxpool modul's backward pass. "
"The gradients with respect to inputs include only a "
"subset of inputs. More details about this issue can "
"be found here: "
"https://pytorch.org/docs/stable/"
"nn.html#torch.nn.Module.register_backward_hook "
"This can happen for example if you attribute to the outputs of a "
"MaxPool. As a workaround, please, attribute to the inputs of "
"the following layer."
)
new_grad_inp = torch.where(
abs(delta_in) < eps, grad_input[0], unpool_grad_out_delta / delta_in
)
return new_grad_inp
def _compute_diffs(inputs: Tensor, outputs: Tensor) -> Tuple[Tensor, Tensor]:
input, input_ref = inputs.chunk(2)
# if the model is a single non-linear module and we apply Rescale rule on it
# we might not be able to perform chunk-ing because the output of the module is
# usually being replaced by model output.
output, output_ref = outputs.chunk(2)
delta_in = input - input_ref
delta_out = output - output_ref
return torch.cat(2 * [delta_in]), torch.cat(2 * [delta_out])
SUPPORTED_NON_LINEAR = {
nn.ReLU: nonlinear,
nn.ELU: nonlinear,
nn.LeakyReLU: nonlinear,
nn.Sigmoid: nonlinear,
nn.Tanh: nonlinear,
nn.Softplus: nonlinear,
nn.MaxPool1d: maxpool1d,
nn.MaxPool2d: maxpool2d,
nn.MaxPool3d: maxpool3d,
nn.Softmax: softmax,
}
|
#!/usr/bin/env python3
from typing import Any, Callable
import torch
from captum._utils.common import _format_output, _format_tensor_into_tuples, _is_tuple
from captum._utils.gradient import (
apply_gradient_requirements,
undo_gradient_requirements,
)
from captum._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._utils.attribution import GradientAttribution
from captum.log import log_usage
class Saliency(GradientAttribution):
r"""
A baseline approach for computing input attribution. It returns
the gradients with respect to inputs. If `abs` is set to True, which is
the default, the absolute value of the gradients is returned.
More details about the approach can be found in the following paper:
https://arxiv.org/abs/1312.6034
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or
any modification of it.
"""
GradientAttribution.__init__(self, forward_func)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
abs: bool = True,
additional_forward_args: Any = None,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which saliency
is computed. If forward_func takes a single tensor
as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
abs (bool, optional): Returns absolute value of gradients if set
to True, otherwise returns the (signed) gradients if
False.
Default: True
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The gradients with respect to each input feature.
Attributions will always be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> # Generating random input with size 2x3x3x32
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Defining Saliency interpreter
>>> saliency = Saliency(net)
>>> # Computes saliency maps for class 3.
>>> attribution = saliency.attribute(input, target=3)
"""
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = _is_tuple(inputs)
inputs = _format_tensor_into_tuples(inputs)
gradient_mask = apply_gradient_requirements(inputs)
# No need to format additional_forward_args here.
# They are being formated in the `_run_forward` function in `common.py`
gradients = self.gradient_func(
self.forward_func, inputs, target, additional_forward_args
)
if abs:
attributions = tuple(torch.abs(gradient) for gradient in gradients)
else:
attributions = gradients
undo_gradient_requirements(inputs, gradient_mask)
return _format_output(is_inputs_tuple, attributions)
|
#!/usr/bin/env python3
from typing import Any, Callable, Generator, Tuple, Union
import torch
from captum._utils.models.linear_model import SkLearnLinearRegression
from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.lime import construct_feature_mask, Lime
from captum.attr._utils.common import _format_input_baseline
from captum.log import log_usage
from torch import Tensor
from torch.distributions.categorical import Categorical
class KernelShap(Lime):
r"""
Kernel SHAP is a method that uses the LIME framework to compute
Shapley Values. Setting the loss function, weighting kernel and
regularization terms appropriately in the LIME framework allows
theoretically obtaining Shapley Values more efficiently than
directly computing Shapley Values.
More information regarding this method and proof of equivalence
can be found in the original paper here:
https://arxiv.org/abs/1705.07874
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or
any modification of it.
"""
Lime.__init__(
self,
forward_func,
interpretable_model=SkLearnLinearRegression(),
similarity_func=self.kernel_shap_similarity_kernel,
perturb_func=self.kernel_shap_perturb_generator,
)
self.inf_weight = 1000000.0
@log_usage()
def attribute( # type: ignore
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
feature_mask: Union[None, Tensor, Tuple[Tensor, ...]] = None,
n_samples: int = 25,
perturbations_per_eval: int = 1,
return_input_shape: bool = True,
show_progress: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
This method attributes the output of the model with given target index
(in case it is provided, otherwise it assumes that output is a
scalar) to the inputs of the model using the approach described above,
training an interpretable model based on KernelSHAP and returning a
representation of the interpretable model.
It is recommended to only provide a single example as input (tensors
with first dimension or batch size = 1). This is because LIME / KernelShap
is generally used for sample-based interpretability, training a separate
interpretable model to explain a model's prediction on each individual example.
A batch of inputs can also be provided as inputs, similar to
other perturbation-based attribution methods. In this case, if forward_fn
returns a scalar per example, attributions will be computed for each
example independently, with a separate interpretable model trained for each
example. Note that provided similarity and perturbation functions will be
provided each example separately (first dimension = 1) in this case.
If forward_fn returns a scalar per batch (e.g. loss), attributions will
still be computed using a single interpretable model for the full batch.
In this case, similarity and perturbation functions will be provided the
same original input containing the full batch.
The number of interpretable features is determined from the provided
feature mask, or if none is provided, from the default feature mask,
which considers each scalar input as a separate feature. It is
generally recommended to provide a feature mask which groups features
into a small number of interpretable features / components (e.g.
superpixels in images).
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which KernelShap
is computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define the reference value which replaces each
feature when the corresponding interpretable feature
is set to 0.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which surrogate model is trained
(for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. It will be
repeated for each of `n_steps` along the integrated
path. For all other types, the given argument is used
for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
feature_mask (Tensor or tuple[Tensor, ...], optional):
feature_mask defines a mask for the input, grouping
features which correspond to the same
interpretable feature. feature_mask
should contain the same number of tensors as inputs.
Each tensor should
be the same size as the corresponding input or
broadcastable to match the input tensor. Values across
all tensors should be integers in the range 0 to
num_interp_features - 1, and indices corresponding to the
same feature should have the same value.
Note that features are grouped across tensors
(unlike feature ablation and occlusion), so
if the same index is used in different tensors, those
features are still grouped and added simultaneously.
If None, then a feature mask is constructed which assigns
each scalar within a tensor as a separate feature.
Default: None
n_samples (int, optional): The number of samples of the original
model used to train the surrogate interpretable model.
Default: `50` if `n_samples` is not provided.
perturbations_per_eval (int, optional): Allows multiple samples
to be processed simultaneously in one call to forward_fn.
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
If the forward function returns a single scalar per batch,
perturbations_per_eval must be set to 1.
Default: 1
return_input_shape (bool, optional): Determines whether the returned
tensor(s) only contain the coefficients for each interp-
retable feature from the trained surrogate model, or
whether the returned attributions match the input shape.
When return_input_shape is True, the return type of attribute
matches the input shape, with each element containing the
coefficient of the corresponding interpretable feature.
All elements with the same value in the feature mask
will contain the same coefficient in the returned
attributions. If return_input_shape is False, a 1D
tensor is returned, containing only the coefficients
of the trained interpretable model, with length
num_interp_features.
show_progress (bool, optional): Displays the progress of computation.
It will try to use tqdm if available for advanced features
(e.g. time estimation). Otherwise, it will fallback to
a simple output of progress.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The attributions with respect to each input feature.
If return_input_shape = True, attributions will be
the same size as the provided inputs, with each value
providing the coefficient of the corresponding
interpretale feature.
If return_input_shape is False, a 1D
tensor is returned, containing only the coefficients
of the trained interpreatable models, with length
num_interp_features.
Examples::
>>> # SimpleClassifier takes a single input tensor of size Nx4x4,
>>> # and returns an Nx3 tensor of class probabilities.
>>> net = SimpleClassifier()
>>> # Generating random input with size 1 x 4 x 4
>>> input = torch.randn(1, 4, 4)
>>> # Defining KernelShap interpreter
>>> ks = KernelShap(net)
>>> # Computes attribution, with each of the 4 x 4 = 16
>>> # features as a separate interpretable feature
>>> attr = ks.attribute(input, target=1, n_samples=200)
>>> # Alternatively, we can group each 2x2 square of the inputs
>>> # as one 'interpretable' feature and perturb them together.
>>> # This can be done by creating a feature mask as follows, which
>>> # defines the feature groups, e.g.:
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # With this mask, all inputs with the same value are set to their
>>> # baseline value, when the corresponding binary interpretable
>>> # feature is set to 0.
>>> # The attributions can be calculated as follows:
>>> # feature mask has dimensions 1 x 4 x 4
>>> feature_mask = torch.tensor([[[0,0,1,1],[0,0,1,1],
>>> [2,2,3,3],[2,2,3,3]]])
>>> # Computes KernelSHAP attributions with feature mask.
>>> attr = ks.attribute(input, target=1, feature_mask=feature_mask)
"""
formatted_inputs, baselines = _format_input_baseline(inputs, baselines)
feature_mask, num_interp_features = construct_feature_mask(
feature_mask, formatted_inputs
)
num_features_list = torch.arange(num_interp_features, dtype=torch.float)
denom = num_features_list * (num_interp_features - num_features_list)
probs = (num_interp_features - 1) / denom
probs[0] = 0.0
return self._attribute_kwargs(
inputs=inputs,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
feature_mask=feature_mask,
n_samples=n_samples,
perturbations_per_eval=perturbations_per_eval,
return_input_shape=return_input_shape,
num_select_distribution=Categorical(probs),
show_progress=show_progress,
)
def kernel_shap_similarity_kernel(
self, _, __, interpretable_sample: Tensor, **kwargs
) -> Tensor:
assert (
"num_interp_features" in kwargs
), "Must provide num_interp_features to use default similarity kernel"
num_selected_features = int(interpretable_sample.sum(dim=1).item())
num_features = kwargs["num_interp_features"]
if num_selected_features == 0 or num_selected_features == num_features:
# weight should be theoretically infinite when
# num_selected_features = 0 or num_features
# enforcing that trained linear model must satisfy
# end-point criteria. In practice, it is sufficient to
# make this weight substantially larger so setting this
# weight to 1000000 (all other weights are 1).
similarities = self.inf_weight
else:
similarities = 1.0
return torch.tensor([similarities])
def kernel_shap_perturb_generator(
self, original_inp: Union[Tensor, Tuple[Tensor, ...]], **kwargs
) -> Generator[Tensor, None, None]:
r"""
Perturbations are sampled by the following process:
- Choose k (number of selected features), based on the distribution
p(k) = (M - 1) / (k * (M - k))
where M is the total number of features in the interpretable space
- Randomly select a binary vector with k ones, each sample is equally
likely. This is done by generating a random vector of normal
values and thresholding based on the top k elements.
Since there are M choose k vectors with k ones, this weighted sampling
is equivalent to applying the Shapley kernel for the sample weight,
defined as:
k(M, k) = (M - 1) / (k * (M - k) * (M choose k))
"""
assert (
"num_select_distribution" in kwargs and "num_interp_features" in kwargs
), (
"num_select_distribution and num_interp_features are necessary"
" to use kernel_shap_perturb_func"
)
if isinstance(original_inp, Tensor):
device = original_inp.device
else:
device = original_inp[0].device
num_features = kwargs["num_interp_features"]
yield torch.ones(1, num_features, device=device, dtype=torch.long)
yield torch.zeros(1, num_features, device=device, dtype=torch.long)
while True:
num_selected_features = kwargs["num_select_distribution"].sample()
rand_vals = torch.randn(1, num_features)
threshold = torch.kthvalue(
rand_vals, num_features - num_selected_features
).values.item()
yield (rand_vals > threshold).to(device=device).long()
|
#!/usr/bin/env python3
from typing import Any, Callable
from captum._utils.common import _format_output, _format_tensor_into_tuples, _is_tuple
from captum._utils.gradient import (
apply_gradient_requirements,
undo_gradient_requirements,
)
from captum._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._utils.attribution import GradientAttribution
from captum.log import log_usage
class InputXGradient(GradientAttribution):
r"""
A baseline approach for computing the attribution. It multiplies input with
the gradient with respect to input.
https://arxiv.org/abs/1605.01713
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
"""
GradientAttribution.__init__(self, forward_func)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which
attributions are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
forward_func in order following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The input x gradient with
respect to each input feature. Attributions will always be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> # Generating random input with size 2x3x3x32
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Defining InputXGradient interpreter
>>> input_x_gradient = InputXGradient(net)
>>> # Computes inputXgradient for class 4.
>>> attribution = input_x_gradient.attribute(input, target=4)
"""
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = _is_tuple(inputs)
inputs = _format_tensor_into_tuples(inputs)
gradient_mask = apply_gradient_requirements(inputs)
gradients = self.gradient_func(
self.forward_func, inputs, target, additional_forward_args
)
attributions = tuple(
input * gradient for input, gradient in zip(inputs, gradients)
)
undo_gradient_requirements(inputs, gradient_mask)
return _format_output(is_inputs_tuple, attributions)
@property
def multiplies_by_inputs(self):
return True
|
#!/usr/bin/env python3
import typing
from typing import Any, Callable, List, Tuple, Union
import torch
from captum._utils.common import (
_expand_additional_forward_args,
_expand_target,
_format_additional_forward_args,
_format_output,
_is_tuple,
)
from captum._utils.typing import (
BaselineType,
Literal,
TargetType,
TensorOrTupleOfTensorsGeneric,
)
from captum.attr._utils.approximation_methods import approximation_parameters
from captum.attr._utils.attribution import GradientAttribution
from captum.attr._utils.batching import _batch_attribution
from captum.attr._utils.common import (
_format_input_baseline,
_reshape_and_sum,
_validate_input,
)
from captum.log import log_usage
from torch import Tensor
class IntegratedGradients(GradientAttribution):
r"""
Integrated Gradients is an axiomatic model interpretability algorithm that
assigns an importance score to each input feature by approximating the
integral of gradients of the model's output with respect to the inputs
along the path (straight line) from given baselines / references to inputs.
Baselines can be provided as input arguments to attribute method.
To approximate the integral we can choose to use either a variant of
Riemann sum or Gauss-Legendre quadrature rule.
More details regarding the integrated gradients method can be found in the
original paper:
https://arxiv.org/abs/1703.01365
"""
def __init__(
self,
forward_func: Callable,
multiply_by_inputs: bool = True,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in,
then that type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of integrated gradients, if `multiply_by_inputs`
is set to True, final sensitivity scores are being multiplied by
(inputs - baselines).
"""
GradientAttribution.__init__(self, forward_func)
self._multiply_by_inputs = multiply_by_inputs
# The following overloaded method signatures correspond to the case where
# return_convergence_delta is False, then only attributions are returned,
# and when return_convergence_delta is True, the return type is
# a tuple with both attributions and deltas.
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
internal_batch_size: Union[None, int] = None,
return_convergence_delta: Literal[False] = False,
) -> TensorOrTupleOfTensorsGeneric:
...
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
internal_batch_size: Union[None, int] = None,
*,
return_convergence_delta: Literal[True],
) -> Tuple[TensorOrTupleOfTensorsGeneric, Tensor]:
...
@log_usage()
def attribute( # type: ignore
self,
inputs: TensorOrTupleOfTensorsGeneric,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
internal_batch_size: Union[None, int] = None,
return_convergence_delta: bool = False,
) -> Union[
TensorOrTupleOfTensorsGeneric, Tuple[TensorOrTupleOfTensorsGeneric, Tensor]
]:
r"""
This method attributes the output of the model with given target index
(in case it is provided, otherwise it assumes that output is a
scalar) to the inputs of the model using the approach described above.
In addition to that it also returns, if `return_convergence_delta` is
set to True, integral approximation delta based on the completeness
property of integrated gradients.
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which integrated
gradients are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define the starting point from which integral
is computed and can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. It will be
repeated for each of `n_steps` along the integrated
path. For all other types, the given argument is used
for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
n_steps (int, optional): The number of steps used by the approximation
method. Default: 50.
method (str, optional): Method for approximating the integral,
one of `riemann_right`, `riemann_left`, `riemann_middle`,
`riemann_trapezoid` or `gausslegendre`.
Default: `gausslegendre` if no method is provided.
internal_batch_size (int, optional): Divides total #steps * #examples
data points into chunks of size at most internal_batch_size,
which are computed (forward / backward passes)
sequentially. internal_batch_size must be at least equal to
#examples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain internal_batch_size / num_devices examples.
If internal_batch_size is None, then all evaluations are
processed in one batch.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Integrated gradients with respect to each input feature.
attributions will always be the same size as the provided
inputs, with each value providing the attribution of the
corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
- **delta** (*Tensor*, returned if return_convergence_delta=True):
The difference between the total approximated and true
integrated gradients. This is computed using the property
that the total sum of forward_func(inputs) -
forward_func(baselines) must equal the total sum of the
integrated gradient.
Delta is calculated per example, meaning that the number of
elements in returned delta tensor is equal to the number of
examples in inputs.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> ig = IntegratedGradients(net)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes integrated gradients for class 3.
>>> attribution = ig.attribute(input, target=3)
"""
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = _is_tuple(inputs)
inputs, baselines = _format_input_baseline(inputs, baselines)
_validate_input(inputs, baselines, n_steps, method)
if internal_batch_size is not None:
num_examples = inputs[0].shape[0]
attributions = _batch_attribution(
self,
num_examples,
internal_batch_size,
n_steps,
inputs=inputs,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
method=method,
)
else:
attributions = self._attribute(
inputs=inputs,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
n_steps=n_steps,
method=method,
)
if return_convergence_delta:
start_point, end_point = baselines, inputs
# computes approximation error based on the completeness axiom
delta = self.compute_convergence_delta(
attributions,
start_point,
end_point,
additional_forward_args=additional_forward_args,
target=target,
)
return _format_output(is_inputs_tuple, attributions), delta
return _format_output(is_inputs_tuple, attributions)
def _attribute(
self,
inputs: Tuple[Tensor, ...],
baselines: Tuple[Union[Tensor, int, float], ...],
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
step_sizes_and_alphas: Union[None, Tuple[List[float], List[float]]] = None,
) -> Tuple[Tensor, ...]:
if step_sizes_and_alphas is None:
# retrieve step size and scaling factor for specified
# approximation method
step_sizes_func, alphas_func = approximation_parameters(method)
step_sizes, alphas = step_sizes_func(n_steps), alphas_func(n_steps)
else:
step_sizes, alphas = step_sizes_and_alphas
# scale features and compute gradients. (batch size is abbreviated as bsz)
# scaled_features' dim -> (bsz * #steps x inputs[0].shape[1:], ...)
scaled_features_tpl = tuple(
torch.cat(
[baseline + alpha * (input - baseline) for alpha in alphas], dim=0
).requires_grad_()
for input, baseline in zip(inputs, baselines)
)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
# apply number of steps to additional forward args
# currently, number of steps is applied only to additional forward arguments
# that are nd-tensors. It is assumed that the first dimension is
# the number of batches.
# dim -> (bsz * #steps x additional_forward_args[0].shape[1:], ...)
input_additional_args = (
_expand_additional_forward_args(additional_forward_args, n_steps)
if additional_forward_args is not None
else None
)
expanded_target = _expand_target(target, n_steps)
# grads: dim -> (bsz * #steps x inputs[0].shape[1:], ...)
grads = self.gradient_func(
forward_fn=self.forward_func,
inputs=scaled_features_tpl,
target_ind=expanded_target,
additional_forward_args=input_additional_args,
)
# flattening grads so that we can multilpy it with step-size
# calling contiguous to avoid `memory whole` problems
scaled_grads = [
grad.contiguous().view(n_steps, -1)
* torch.tensor(step_sizes).view(n_steps, 1).to(grad.device)
for grad in grads
]
# aggregates across all steps for each tensor in the input tuple
# total_grads has the same dimensionality as inputs
total_grads = tuple(
_reshape_and_sum(
scaled_grad, n_steps, grad.shape[0] // n_steps, grad.shape[1:]
)
for (scaled_grad, grad) in zip(scaled_grads, grads)
)
# computes attribution for each tensor in input tuple
# attributions has the same dimensionality as inputs
if not self.multiplies_by_inputs:
attributions = total_grads
else:
attributions = tuple(
total_grad * (input - baseline)
for total_grad, input, baseline in zip(total_grads, inputs, baselines)
)
return attributions
def has_convergence_delta(self) -> bool:
return True
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Tuple, Union
from captum._utils.common import (
_format_additional_forward_args,
_format_output,
_format_tensor_into_tuples,
_is_tuple,
)
from captum._utils.gradient import (
_forward_layer_eval_with_neuron_grads,
apply_gradient_requirements,
undo_gradient_requirements,
)
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution
from captum.log import log_usage
from torch.nn import Module
class NeuronGradient(NeuronAttribution, GradientAttribution):
r"""
Computes the gradient of the output of a particular neuron with
respect to the inputs of the network.
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
layer (torch.nn.Module): Layer for which attributions are computed.
Output size of attribute matches this layer's input or
output dimensions, depending on whether we attribute to
the inputs or outputs of the layer, corresponding to
attribution of each neuron in the input or output of
this layer.
Currently, it is assumed that the inputs or the outputs
of the layer, depending on which one is used for
attribution, can only be a single tensor.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
NeuronAttribution.__init__(self, forward_func, layer, device_ids)
GradientAttribution.__init__(self, forward_func)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
additional_forward_args: Any = None,
attribute_to_neuron_input: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which neuron
gradients are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
neuron_selector (int, Callable, tuple[int], or slice):
Selector for neuron
in given layer for which attribution is desired.
Neuron selector can be provided as:
- a single integer, if the layer output is 2D. This integer
selects the appropriate neuron column in the layer input
or output
- a tuple of integers or slice objects. Length of this
tuple must be one less than the number of dimensions
in the input / output of the given layer (since
dimension 0 corresponds to number of examples).
The elements of the tuple can be either integers or
slice objects (slice object allows indexing a
range of neurons rather individual ones).
If any of the tuple elements is a slice object, the
indexed output tensor is used for attribution. Note
that specifying a slice of a tensor would amount to
computing the attribution of the sum of the specified
neurons, and not the individual neurons independently.
- a callable, which should
take the target layer as input (single tensor or tuple
if multiple tensors are in layer) and return a neuron or
aggregate of the layer's neurons for attribution.
For example, this function could return the
sum of the neurons in the layer or sum of neurons with
activations in a particular range. It is expected that
this function returns either a tensor with one element
or a 1D tensor with length equal to batch_size (one scalar
per input example)
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
attribute_to_neuron_input (bool, optional): Indicates whether to
compute the attributions with respect to the neuron input
or output. If `attribute_to_neuron_input` is set to True
then the attributions will be computed with respect to
neuron's inputs, otherwise it will be computed with respect
to neuron's outputs.
Note that currently it is assumed that either the input
or the output of internal neurons, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Gradients of particular neuron with respect to each input
feature. Attributions will always be the same size as the
provided inputs, with each value providing the attribution
of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x32x32.
>>> net = ImageClassifier()
>>> neuron_ig = NeuronGradient(net, net.conv1)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # To compute neuron attribution, we need to provide the neuron
>>> # index for which attribution is desired. Since the layer output
>>> # is Nx12x32x32, we need a tuple in the form (0..11,0..31,0..31)
>>> # which indexes a particular neuron in the layer output.
>>> # For this example, we choose the index (4,1,2).
>>> # Computes neuron gradient for neuron with
>>> # index (4,1,2).
>>> attribution = neuron_ig.attribute(input, (4,1,2))
"""
is_inputs_tuple = _is_tuple(inputs)
inputs = _format_tensor_into_tuples(inputs)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
gradient_mask = apply_gradient_requirements(inputs)
_, input_grads = _forward_layer_eval_with_neuron_grads(
self.forward_func,
inputs,
self.layer,
additional_forward_args,
gradient_neuron_selector=neuron_selector,
device_ids=self.device_ids,
attribute_to_layer_input=attribute_to_neuron_input,
)
undo_gradient_requirements(inputs, gradient_mask)
return _format_output(is_inputs_tuple, input_grads)
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Tuple, Union
from captum._utils.gradient import construct_neuron_grad_fn
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._core.guided_backprop_deconvnet import Deconvolution, GuidedBackprop
from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution
from captum.log import log_usage
from torch.nn import Module
class NeuronDeconvolution(NeuronAttribution, GradientAttribution):
r"""
Computes attribution of the given neuron using deconvolution.
Deconvolution computes the gradient of the target output with
respect to the input, but gradients of ReLU functions are overridden so
that the gradient of the ReLU input is simply computed taking ReLU of
the output gradient, essentially only propagating non-negative gradients
(without dependence on the sign of the ReLU input).
More details regarding the deconvolution algorithm can be found
in these papers:
https://arxiv.org/abs/1311.2901
https://link.springer.com/chapter/10.1007/978-3-319-46466-4_8
Warning: Ensure that all ReLU operations in the forward function of the
given model are performed using a module (nn.module.ReLU).
If nn.functional.ReLU is used, gradients are not overridden appropriately.
"""
def __init__(
self, model: Module, layer: Module, device_ids: Union[None, List[int]] = None
) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
layer (Module): Layer for which attributions are computed.
Output size of attribute matches this layer's input or
output dimensions, depending on whether we attribute to
the inputs or outputs of the layer, corresponding to
attribution of each neuron in the input or output of
this layer.
Currently, it is assumed that the inputs or the outputs
of the layer, depending on which one is used for
attribution, can only be a single tensor.
device_ids (list[int]): Device ID list, necessary only if model
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If model is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
NeuronAttribution.__init__(self, model, layer, device_ids)
GradientAttribution.__init__(self, model)
self.deconv = Deconvolution(model)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
additional_forward_args: Any = None,
attribute_to_neuron_input: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which
attributions are computed. If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
neuron_selector (int, Callable, tuple[int], or slice):
Selector for neuron
in given layer for which attribution is desired.
Neuron selector can be provided as:
- a single integer, if the layer output is 2D. This integer
selects the appropriate neuron column in the layer input
or output
- a tuple of integers or slice objects. Length of this
tuple must be one less than the number of dimensions
in the input / output of the given layer (since
dimension 0 corresponds to number of examples).
The elements of the tuple can be either integers or
slice objects (slice object allows indexing a
range of neurons rather individual ones).
If any of the tuple elements is a slice object, the
indexed output tensor is used for attribution. Note
that specifying a slice of a tensor would amount to
computing the attribution of the sum of the specified
neurons, and not the individual neurons independently.
- a callable, which should
take the target layer as input (single tensor or tuple
if multiple tensors are in layer) and return a neuron or
aggregate of the layer's neurons for attribution.
For example, this function could return the
sum of the neurons in the layer or sum of neurons with
activations in a particular range. It is expected that
this function returns either a tensor with one element
or a 1D tensor with length equal to batch_size (one scalar
per input example)
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
attribute_to_neuron_input (bool, optional): Indicates whether to
compute the attributions with respect to the neuron input
or output. If `attribute_to_neuron_input` is set to True
then the attributions will be computed with respect to
neuron's inputs, otherwise it will be computed with respect
to neuron's outputs.
Note that currently it is assumed that either the input
or the output of internal neuron, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Deconvolution attribution of
particular neuron with respect to each input feature.
Attributions will always be the same size as the provided
inputs, with each value providing the attribution of the
corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x32x32.
>>> net = ImageClassifier()
>>> neuron_deconv = NeuronDeconvolution(net, net.conv1)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # To compute neuron attribution, we need to provide the neuron
>>> # index for which attribution is desired. Since the layer output
>>> # is Nx12x32x32, we need a tuple in the form (0..11,0..31,0..31)
>>> # which indexes a particular neuron in the layer output.
>>> # For this example, we choose the index (4,1,2).
>>> # Computes neuron deconvolution for neuron with
>>> # index (4,1,2).
>>> attribution = neuron_deconv.attribute(input, (4,1,2))
"""
self.deconv.gradient_func = construct_neuron_grad_fn(
self.layer, neuron_selector, self.device_ids, attribute_to_neuron_input
)
# NOTE: using __wrapped__ to not log
return self.deconv.attribute.__wrapped__(
self.deconv, inputs, None, additional_forward_args
)
class NeuronGuidedBackprop(NeuronAttribution, GradientAttribution):
r"""
Computes attribution of the given neuron using guided backpropagation.
Guided backpropagation computes the gradient of the target neuron
with respect to the input, but gradients of ReLU functions are overridden
so that only non-negative gradients are backpropagated.
More details regarding the guided backpropagation algorithm can be found
in the original paper here:
https://arxiv.org/abs/1412.6806
Warning: Ensure that all ReLU operations in the forward function of the
given model are performed using a module (nn.module.ReLU).
If nn.functional.ReLU is used, gradients are not overridden appropriately.
"""
def __init__(
self, model: Module, layer: Module, device_ids: Union[None, List[int]] = None
) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
layer (Module): Layer for which neuron attributions are computed.
Attributions for a particular neuron in the output of
this layer are computed using the argument neuron_selector
in the attribute method.
Currently, only layers with a single tensor output are
supported.
device_ids (list[int]): Device ID list, necessary only if model
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If model is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
NeuronAttribution.__init__(self, model, layer, device_ids)
GradientAttribution.__init__(self, model)
self.guided_backprop = GuidedBackprop(model)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
additional_forward_args: Any = None,
attribute_to_neuron_input: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which
attributions are computed. If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples (aka batch size), and if
multiple input tensors are provided, the examples must
be aligned appropriately.
neuron_selector (int, Callable, tuple[int], or slice):
Selector for neuron
in given layer for which attribution is desired.
Neuron selector can be provided as:
- a single integer, if the layer output is 2D. This integer
selects the appropriate neuron column in the layer input
or output
- a tuple of integers or slice objects. Length of this
tuple must be one less than the number of dimensions
in the input / output of the given layer (since
dimension 0 corresponds to number of examples).
The elements of the tuple can be either integers or
slice objects (slice object allows indexing a
range of neurons rather individual ones).
If any of the tuple elements is a slice object, the
indexed output tensor is used for attribution. Note
that specifying a slice of a tensor would amount to
computing the attribution of the sum of the specified
neurons, and not the individual neurons independently.
- a callable, which should
take the target layer as input (single tensor or tuple
if multiple tensors are in layer) and return a neuron or
aggregate of the layer's neurons for attribution.
For example, this function could return the
sum of the neurons in the layer or sum of neurons with
activations in a particular range. It is expected that
this function returns either a tensor with one element
or a 1D tensor with length equal to batch_size (one scalar
per input example)
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
attribute_to_neuron_input (bool, optional): Indicates whether to
compute the attributions with respect to the neuron input
or output. If `attribute_to_neuron_input` is set to True
then the attributions will be computed with respect to
neuron's inputs, otherwise it will be computed with respect
to neuron's outputs.
Note that currently it is assumed that either the input
or the output of internal neurons, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Guided backprop attribution of
particular neuron with respect to each input feature.
Attributions will always be the same size as the provided
inputs, with each value providing the attribution of the
corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x32x32.
>>> net = ImageClassifier()
>>> neuron_gb = NeuronGuidedBackprop(net, net.conv1)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # To compute neuron attribution, we need to provide the neuron
>>> # index for which attribution is desired. Since the layer output
>>> # is Nx12x32x32, we need a tuple in the form (0..11,0..31,0..31)
>>> # which indexes a particular neuron in the layer output.
>>> # For this example, we choose the index (4,1,2).
>>> # Computes neuron guided backpropagation for neuron with
>>> # index (4,1,2).
>>> attribution = neuron_gb.attribute(input, (4,1,2))
"""
self.guided_backprop.gradient_func = construct_neuron_grad_fn(
self.layer, neuron_selector, self.device_ids, attribute_to_neuron_input
)
# NOTE: using __wrapped__ to not log
return self.guided_backprop.attribute.__wrapped__(
self.guided_backprop, inputs, None, additional_forward_args
)
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Tuple, Union
import torch
from captum._utils.common import _verify_select_neuron
from captum._utils.gradient import _forward_layer_eval
from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.feature_ablation import FeatureAblation
from captum.attr._utils.attribution import NeuronAttribution, PerturbationAttribution
from captum.log import log_usage
from torch.nn import Module
class NeuronFeatureAblation(NeuronAttribution, PerturbationAttribution):
r"""
A perturbation based approach to computing neuron attribution,
involving replacing each input feature with a given baseline /
reference, and computing the difference in the neuron's input / output.
By default, each scalar value within
each input tensor is taken as a feature and replaced independently. Passing
a feature mask, allows grouping features to be ablated together. This can
be used in cases such as images, where an entire segment or region
can be ablated, measuring the importance of the segment (feature group).
Each input scalar in the group will be given the same attribution value
equal to the change in target as a result of ablating the entire feature
group.
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
layer (torch.nn.Module): Layer for which attributions are computed.
Attributions for a particular neuron in the input or output
of this layer are computed using the argument neuron_selector
in the attribute method.
Currently, it is assumed that the inputs or the outputs
of the layer, depending on which one is used for
attribution, can only be a single tensor.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
NeuronAttribution.__init__(self, forward_func, layer, device_ids)
PerturbationAttribution.__init__(self, forward_func)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
baselines: BaselineType = None,
additional_forward_args: Any = None,
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
attribute_to_neuron_input: bool = False,
perturbations_per_eval: int = 1,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which neuron
attributions are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
neuron_selector (int, Callable, tuple[int], or slice):
Selector for neuron
in given layer for which attribution is desired.
Neuron selector can be provided as:
- a single integer, if the layer output is 2D. This integer
selects the appropriate neuron column in the layer input
or output
- a tuple of integers or slice objects. Length of this
tuple must be one less than the number of dimensions
in the input / output of the given layer (since
dimension 0 corresponds to number of examples).
The elements of the tuple can be either integers or
slice objects (slice object allows indexing a
range of neurons rather individual ones).
If any of the tuple elements is a slice object, the
indexed output tensor is used for attribution. Note
that specifying a slice of a tensor would amount to
computing the attribution of the sum of the specified
neurons, and not the individual neurons independently.
- a callable, which should
take the target layer as input (single tensor or tuple
if multiple tensors are in layer) and return a neuron or
aggregate of the layer's neurons for attribution.
For example, this function could return the
sum of the neurons in the layer or sum of neurons with
activations in a particular range. It is expected that
this function returns either a tensor with one element
or a 1D tensor with length equal to batch_size (one scalar
per input example)
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define reference value which replaces each
feature when ablated.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or
broadcastable to match the dimensions of inputs
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
feature_mask (Tensor or tuple[Tensor, ...], optional):
feature_mask defines a mask for the input, grouping
features which should be ablated together. feature_mask
should contain the same number of tensors as inputs.
Each tensor should
be the same size as the corresponding input or
broadcastable to match the input tensor. Each tensor
should contain integers in the range 0 to num_features
- 1, and indices corresponding to the same feature should
have the same value.
Note that features within each input tensor are ablated
independently (not across tensors).
If None, then a feature mask is constructed which assigns
each scalar within a tensor as a separate feature, which
is ablated independently.
Default: None
attribute_to_neuron_input (bool, optional): Indicates whether to
compute the attributions with respect to the neuron input
or output. If `attribute_to_neuron_input` is set to True
then the attributions will be computed with respect to
neuron's inputs, otherwise it will be computed with respect
to neuron's outputs.
Note that currently it is assumed that either the input
or the output of internal neurons, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
perturbations_per_eval (int, optional): Allows ablation of multiple
features to be processed simultaneously in one call to
forward_fn.
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
Default: 1
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attributions of particular neuron with respect to each input
feature. Attributions will always be the same size as the
provided inputs, with each value providing the attribution
of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # SimpleClassifier takes a single input tensor of size Nx4x4,
>>> # and returns an Nx3 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x3x3.
>>> net = SimpleClassifier()
>>> # Generating random input with size 2 x 4 x 4
>>> input = torch.randn(2, 4, 4)
>>> # Defining NeuronFeatureAblation interpreter
>>> ablator = NeuronFeatureAblation(net, net.conv1)
>>> # To compute neuron attribution, we need to provide the neuron
>>> # index for which attribution is desired. Since the layer output
>>> # is Nx12x3x3, we need a tuple in the form (0..11,0..2,0..2)
>>> # which indexes a particular neuron in the layer output.
>>> # For this example, we choose the index (4,1,2).
>>> # Computes neuron gradient for neuron with
>>> # index (4,1,2).
>>> # Computes ablation attribution, ablating each of the 16
>>> # scalar inputs independently.
>>> attr = ablator.attribute(input, neuron_selector=(4,1,2))
>>> # Alternatively, we may want to ablate features in groups, e.g.
>>> # grouping each 2x2 square of the inputs and ablating them together.
>>> # This can be done by creating a feature mask as follows, which
>>> # defines the feature groups, e.g.:
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # With this mask, all inputs with the same value are ablated
>>> # simultaneously, and the attribution for each input in the same
>>> # group (0, 1, 2, and 3) per example are the same.
>>> # The attributions can be calculated as follows:
>>> # feature mask has dimensions 1 x 4 x 4
>>> feature_mask = torch.tensor([[[0,0,1,1],[0,0,1,1],
>>> [2,2,3,3],[2,2,3,3]]])
>>> attr = ablator.attribute(input, neuron_selector=(4,1,2),
>>> feature_mask=feature_mask)
"""
def neuron_forward_func(*args: Any):
with torch.no_grad():
layer_eval = _forward_layer_eval(
self.forward_func,
args,
self.layer,
device_ids=self.device_ids,
attribute_to_layer_input=attribute_to_neuron_input,
)
return _verify_select_neuron(layer_eval, neuron_selector)
ablator = FeatureAblation(neuron_forward_func)
# NOTE: using __wrapped__ to not log
return ablator.attribute.__wrapped__(
ablator, # self
inputs,
baselines=baselines,
additional_forward_args=additional_forward_args,
feature_mask=feature_mask,
perturbations_per_eval=perturbations_per_eval,
)
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Tuple, Union
from captum._utils.gradient import construct_neuron_grad_fn
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._core.gradient_shap import GradientShap
from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution
from captum.log import log_usage
from torch.nn import Module
class NeuronGradientShap(NeuronAttribution, GradientAttribution):
r"""
Implements gradient SHAP for a neuron in a hidden layer based on the
implementation from SHAP's primary author. For reference, please, view:
https://github.com/slundberg/shap\
#deep-learning-example-with-gradientexplainer-tensorflowkeraspytorch-models
A Unified Approach to Interpreting Model Predictions
https://papers.nips.cc/paper\
7062-a-unified-approach-to-interpreting-model-predictions
GradientShap approximates SHAP values by computing the expectations of
gradients by randomly sampling from the distribution of baselines/references.
It adds white noise to each input sample `n_samples` times, selects a
random baseline from baselines' distribution and a random point along the
path between the baseline and the input, and computes the gradient of the
neuron with index `neuron_selector` with respect to those selected random
points. The final SHAP values represent the expected values of
`gradients * (inputs - baselines)`.
GradientShap makes an assumption that the input features are independent
and that the explanation model is linear, meaning that the explanations
are modeled through the additive composition of feature effects.
Under those assumptions, SHAP value can be approximated as the expectation
of gradients that are computed for randomly generated `n_samples` input
samples after adding gaussian noise `n_samples` times to each input for
different baselines/references.
In some sense it can be viewed as an approximation of integrated gradients
by computing the expectations of gradients for different baselines.
Current implementation uses Smoothgrad from :class:`.NoiseTunnel` in order to
randomly draw samples from the distribution of baselines, add noise to input
samples and compute the expectation (smoothgrad).
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
multiply_by_inputs: bool = True,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
layer (torch.nn.Module): Layer for which neuron attributions are computed.
The output size of the attribute method matches the
dimensions of the inputs or outputs of the neuron with
index `neuron_selector` in this layer, depending on whether
we attribute to the inputs or outputs of the neuron.
Currently, it is assumed that the inputs or the outputs
of the neurons in this layer, depending on which one is
used for attribution, can only be a single tensor.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in
then that type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of Neuron Gradient SHAP,
if `multiply_by_inputs` is set to True, the
sensitivity scores for scaled inputs are
being multiplied by (inputs - baselines).
"""
NeuronAttribution.__init__(self, forward_func, layer, device_ids)
GradientAttribution.__init__(self, forward_func)
self._multiply_by_inputs = multiply_by_inputs
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
baselines: Union[
TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric]
],
n_samples: int = 5,
stdevs: float = 0.0,
additional_forward_args: Any = None,
attribute_to_neuron_input: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which SHAP attribution
values are computed. If `forward_func` takes a single
tensor as input, a single input tensor should be provided.
If `forward_func` takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
neuron_selector (int, Callable, tuple[int], or slice):
Selector for neuron
in given layer for which attribution is desired.
Neuron selector can be provided as:
- a single integer, if the layer output is 2D. This integer
selects the appropriate neuron column in the layer input
or output
- a tuple of integers or slice objects. Length of this
tuple must be one less than the number of dimensions
in the input / output of the given layer (since
dimension 0 corresponds to number of examples).
The elements of the tuple can be either integers or
slice objects (slice object allows indexing a
range of neurons rather individual ones).
If any of the tuple elements is a slice object, the
indexed output tensor is used for attribution. Note
that specifying a slice of a tensor would amount to
computing the attribution of the sum of the specified
neurons, and not the individual neurons independently.
- a callable, which should
take the target layer as input (single tensor or tuple
if multiple tensors are in layer) and return a neuron or
aggregate of the layer's neurons for attribution.
For example, this function could return the
sum of the neurons in the layer or sum of neurons with
activations in a particular range. It is expected that
this function returns either a tensor with one element
or a 1D tensor with length equal to batch_size (one scalar
per input example)
baselines (Tensor, tuple[Tensor, ...], or Callable):
Baselines define the starting point from which expectation
is computed and can be provided as:
- a single tensor, if inputs is a single tensor, with
the first dimension equal to the number of examples
in the baselines' distribution. The remaining dimensions
must match with input tensor's dimension starting from
the second dimension.
- a tuple of tensors, if inputs is a tuple of tensors,
with the first dimension of any tensor inside the tuple
equal to the number of examples in the baseline's
distribution. The remaining dimensions must match
the dimensions of the corresponding input tensor
starting from the second dimension.
- callable function, optionally takes `inputs` as an
argument and either returns a single tensor
or a tuple of those.
It is recommended that the number of samples in the baselines'
tensors is larger than one.
n_samples (int, optional): The number of randomly generated examples
per sample in the input batch. Random examples are
generated by adding gaussian random noise to each sample.
Default: `5` if `n_samples` is not provided.
stdevs (float or tuple of float, optional): The standard deviation
of gaussian noise with zero mean that is added to each
input in the batch. If `stdevs` is a single float value
then that same value is used for all inputs. If it is
a tuple, then it must have the same length as the inputs
tuple. In this case, each stdev value in the stdevs tuple
corresponds to the input with the same index in the inputs
tuple.
Default: 0.0
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It can contain a tuple of ND tensors or
any arbitrary python type of any shape.
In case of the ND tensor the first dimension of the
tensor must correspond to the batch size. It will be
repeated for each `n_steps` for each randomly generated
input sample.
Note that the gradients are not computed with respect
to these arguments.
Default: None
attribute_to_neuron_input (bool, optional): Indicates whether to
compute the attributions with respect to the neuron input
or output. If `attribute_to_neuron_input` is set to True
then the attributions will be computed with respect to
neuron's inputs, otherwise it will be computed with respect
to neuron's outputs.
Note that currently it is assumed that either the input
or the output of internal neuron, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Attribution score computed based on GradientSHAP with respect
to each input feature. Attributions will always be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> neuron_grad_shap = NeuronGradientShap(net, net.linear2)
>>> input = torch.randn(3, 3, 32, 32, requires_grad=True)
>>> # choosing baselines randomly
>>> baselines = torch.randn(20, 3, 32, 32)
>>> # Computes gradient SHAP of first neuron in linear2 layer
>>> # with respect to the input's of the network.
>>> # Attribution size matches input size: 3x3x32x32
>>> attribution = neuron_grad_shap.attribute(input, neuron_ind=0
baselines)
"""
gs = GradientShap(self.forward_func, self.multiplies_by_inputs)
gs.gradient_func = construct_neuron_grad_fn(
self.layer,
neuron_selector,
self.device_ids,
attribute_to_neuron_input=attribute_to_neuron_input,
)
# NOTE: using __wrapped__ to not log
return gs.attribute.__wrapped__( # type: ignore
gs, # self
inputs,
baselines,
n_samples=n_samples,
stdevs=stdevs,
additional_forward_args=additional_forward_args,
)
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
|
#!/usr/bin/env python3
from typing import Any, Callable, cast, Tuple, Union
from captum._utils.gradient import construct_neuron_grad_fn
from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.deep_lift import DeepLift, DeepLiftShap
from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
class NeuronDeepLift(NeuronAttribution, GradientAttribution):
r"""
Implements DeepLIFT algorithm for the neuron based on the following paper:
Learning Important Features Through Propagating Activation Differences,
Avanti Shrikumar, et. al.
https://arxiv.org/abs/1704.02685
and the gradient formulation proposed in:
Towards better understanding of gradient-based attribution methods for
deep neural networks, Marco Ancona, et.al.
https://openreview.net/pdf?id=Sy21R9JAW
This implementation supports only Rescale rule. RevealCancel rule will
be supported in later releases.
Although DeepLIFT's(Rescale Rule) attribution quality is comparable with
Integrated Gradients, it runs significantly faster than Integrated
Gradients and is preferred for large datasets.
Currently we only support a limited number of non-linear activations
but the plan is to expand the list in the future.
Note: As we know, currently we cannot access the building blocks,
of PyTorch's built-in LSTM, RNNs and GRUs such as Tanh and Sigmoid.
Nonetheless, it is possible to build custom LSTMs, RNNS and GRUs
with performance similar to built-in ones using TorchScript.
More details on how to build custom RNNs can be found here:
https://pytorch.org/blog/optimizing-cuda-rnn-with-torchscript/
"""
def __init__(
self, model: Module, layer: Module, multiply_by_inputs: bool = True
) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
layer (torch.nn.Module): Layer for which neuron attributions are computed.
Attributions for a particular neuron for the input or output
of this layer are computed using the argument neuron_selector
in the attribute method.
Currently, it is assumed that the inputs or the outputs
of the layer, depending on which one is used for
attribution, can only be a single tensor.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in
then that type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of Neuron DeepLift, if `multiply_by_inputs`
is set to True, final sensitivity scores
are being multiplied by (inputs - baselines).
This flag applies only if `custom_attribution_func` is
set to None.
"""
NeuronAttribution.__init__(self, model, layer)
GradientAttribution.__init__(self, model)
self._multiply_by_inputs = multiply_by_inputs
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
baselines: BaselineType = None,
additional_forward_args: Any = None,
attribute_to_neuron_input: bool = False,
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which layer
attributions are computed. If model takes a
single tensor as input, a single input tensor should be
provided. If model takes multiple tensors as input,
a tuple of the input tensors should be provided. It is
assumed that for all given input tensors, dimension 0
corresponds to the number of examples (aka batch size),
and if multiple input tensors are provided, the examples
must be aligned appropriately.
neuron_selector (int, Callable, tuple[int], or slice):
Selector for neuron
in given layer for which attribution is desired.
Neuron selector can be provided as:
- a single integer, if the layer output is 2D. This integer
selects the appropriate neuron column in the layer input
or output
- a tuple of integers or slice objects. Length of this
tuple must be one less than the number of dimensions
in the input / output of the given layer (since
dimension 0 corresponds to number of examples).
The elements of the tuple can be either integers or
slice objects (slice object allows indexing a
range of neurons rather individual ones).
If any of the tuple elements is a slice object, the
indexed output tensor is used for attribution. Note
that specifying a slice of a tensor would amount to
computing the attribution of the sum of the specified
neurons, and not the individual neurons independently.
- a callable, which should
take the target layer as input (single tensor or tuple
if multiple tensors are in layer) and return a neuron or
aggregate of the layer's neurons for attribution.
For example, this function could return the
sum of the neurons in the layer or sum of neurons with
activations in a particular range. It is expected that
this function returns either a tensor with one element
or a 1D tensor with length equal to batch_size (one scalar
per input example)
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define reference samples that are compared with
the inputs. In order to assign attribution scores DeepLift
computes the differences between the inputs/outputs and
corresponding references.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided
to model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
attribute_to_neuron_input (bool, optional): Indicates whether to
compute the attributions with respect to the neuron input
or output. If `attribute_to_neuron_input` is set to True
then the attributions will be computed with respect to
neuron's inputs, otherwise it will be computed with respect
to neuron's outputs.
Note that currently it is assumed that either the input
or the output of internal neuron, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
custom_attribution_func (Callable, optional): A custom function for
computing final attribution scores. This function can take
at least one and at most three arguments with the
following signature:
- custom_attribution_func(multipliers)
- custom_attribution_func(multipliers, inputs)
- custom_attribution_func(multipliers, inputs, baselines)
In case this function is not provided, we use the default
logic defined as: multipliers * (inputs - baselines)
It is assumed that all input arguments, `multipliers`,
`inputs` and `baselines` are provided in tuples of same
length. `custom_attribution_func` returns a tuple of
attribution tensors that have the same length as the
`inputs`.
Default: None
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Computes attributions using Deeplift's rescale rule for
particular neuron with respect to each input feature.
Attributions will always be the same size as the provided
inputs, with each value providing the attribution of the
corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> # creates an instance of LayerDeepLift to interpret target
>>> # class 1 with respect to conv4 layer.
>>> dl = NeuronDeepLift(net, net.conv4)
>>> input = torch.randn(1, 3, 32, 32, requires_grad=True)
>>> # Computes deeplift attribution scores for conv4 layer and neuron
>>> # index (4,1,2).
>>> attribution = dl.attribute(input, (4,1,2))
"""
dl = DeepLift(cast(Module, self.forward_func), self.multiplies_by_inputs)
dl.gradient_func = construct_neuron_grad_fn(
self.layer,
neuron_selector,
attribute_to_neuron_input=attribute_to_neuron_input,
)
# NOTE: using __wrapped__ to not log
return dl.attribute.__wrapped__( # type: ignore
dl, # self
inputs,
baselines,
additional_forward_args=additional_forward_args,
custom_attribution_func=custom_attribution_func,
)
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
class NeuronDeepLiftShap(NeuronAttribution, GradientAttribution):
r"""
Extends NeuronAttribution and uses LayerDeepLiftShap algorithms and
approximates SHAP values for given input `layer` and `neuron_selector`.
For each input sample - baseline pair it computes DeepLift attributions
with respect to inputs or outputs of given `layer` and `neuron_selector`
averages resulting attributions across baselines. Whether to compute the
attributions with respect to the inputs or outputs of the layer is defined
by the input flag `attribute_to_layer_input`.
More details about the algorithm can be found here:
https://papers.nips.cc/paper/7062-a-unified-approach-to-interpreting-model-predictions.pdf
Note that the explanation model:
1. Assumes that input features are independent of one another
2. Is linear, meaning that the explanations are modeled through
the additive composition of feature effects.
Although, it assumes a linear model for each explanation, the overall
model across multiple explanations can be complex and non-linear.
"""
def __init__(
self, model: Module, layer: Module, multiply_by_inputs: bool = True
) -> None:
r"""
Args:
model (nn.Module): The reference to PyTorch model instance.
layer (torch.nn.Module): Layer for which neuron attributions are computed.
Attributions for a particular neuron for the input or output
of this layer are computed using the argument neuron_selector
in the attribute method.
Currently, only layers with a single tensor input and output
are supported.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in
then that type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of Neuron DeepLift Shap, if `multiply_by_inputs`
is set to True, final sensitivity scores
are being multiplied by (inputs - baselines).
This flag applies only if `custom_attribution_func` is
set to None.
"""
NeuronAttribution.__init__(self, model, layer)
GradientAttribution.__init__(self, model)
self._multiply_by_inputs = multiply_by_inputs
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
baselines: Union[
TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric]
],
additional_forward_args: Any = None,
attribute_to_neuron_input: bool = False,
custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which layer
attributions are computed. If model takes a
single tensor as input, a single input tensor should be
provided. If model takes multiple tensors as input,
a tuple of the input tensors should be provided. It is
assumed that for all given input tensors, dimension 0
corresponds to the number of examples (aka batch size),
and if multiple input tensors are provided, the examples
must be aligned appropriately.
neuron_selector (int, Callable, tuple[int], or slice):
Selector for neuron
in given layer for which attribution is desired.
Neuron selector can be provided as:
- a single integer, if the layer output is 2D. This integer
selects the appropriate neuron column in the layer input
or output
- a tuple of integers or slice objects. Length of this
tuple must be one less than the number of dimensions
in the input / output of the given layer (since
dimension 0 corresponds to number of examples).
The elements of the tuple can be either integers or
slice objects (slice object allows indexing a
range of neurons rather individual ones).
If any of the tuple elements is a slice object, the
indexed output tensor is used for attribution. Note
that specifying a slice of a tensor would amount to
computing the attribution of the sum of the specified
neurons, and not the individual neurons independently.
- a callable, which should
take the target layer as input (single tensor or tuple
if multiple tensors are in layer) and return a neuron or
aggregate of the layer's neurons for attribution.
For example, this function could return the
sum of the neurons in the layer or sum of neurons with
activations in a particular range. It is expected that
this function returns either a tensor with one element
or a 1D tensor with length equal to batch_size (one scalar
per input example)
baselines (Tensor, tuple[Tensor, ...], or Callable):
Baselines define reference samples that are compared with
the inputs. In order to assign attribution scores DeepLift
computes the differences between the inputs/outputs and
corresponding references. Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
the first dimension equal to the number of examples
in the baselines' distribution. The remaining dimensions
must match with input tensor's dimension starting from
the second dimension.
- a tuple of tensors, if inputs is a tuple of tensors,
with the first dimension of any tensor inside the tuple
equal to the number of examples in the baseline's
distribution. The remaining dimensions must match
the dimensions of the corresponding input tensor
starting from the second dimension.
- callable function, optionally takes `inputs` as an
argument and either returns a single tensor
or a tuple of those.
It is recommended that the number of samples in the baselines'
tensors is larger than one.
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided
to model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
attribute_to_neuron_input (bool, optional): Indicates whether to
compute the attributions with respect to the neuron input
or output. If `attribute_to_neuron_input` is set to True
then the attributions will be computed with respect to
neuron's inputs, otherwise it will be computed with respect
to neuron's outputs.
Note that currently it is assumed that either the input
or the output of internal neuron, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
custom_attribution_func (Callable, optional): A custom function for
computing final attribution scores. This function can take
at least one and at most three arguments with the
following signature:
- custom_attribution_func(multipliers)
- custom_attribution_func(multipliers, inputs)
- custom_attribution_func(multipliers, inputs, baselines)
In case this function is not provided, we use the default
logic defined as: multipliers * (inputs - baselines)
It is assumed that all input arguments, `multipliers`,
`inputs` and `baselines` are provided in tuples of same
length. `custom_attribution_func` returns a tuple of
attribution tensors that have the same length as the
`inputs`.
Default: None
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Computes attributions using Deeplift's rescale rule for
particular neuron with respect to each input feature.
Attributions will always be the same size as the provided
inputs, with each value providing the attribution of the
corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> # creates an instance of LayerDeepLift to interpret target
>>> # class 1 with respect to conv4 layer.
>>> dl = NeuronDeepLiftShap(net, net.conv4)
>>> input = torch.randn(1, 3, 32, 32, requires_grad=True)
>>> # Computes deeplift attribution scores for conv4 layer and neuron
>>> # index (4,1,2).
>>> attribution = dl.attribute(input, (4,1,2))
"""
dl = DeepLiftShap(cast(Module, self.forward_func), self.multiplies_by_inputs)
dl.gradient_func = construct_neuron_grad_fn(
self.layer,
neuron_selector,
attribute_to_neuron_input=attribute_to_neuron_input,
)
# NOTE: using __wrapped__ to not log
return dl.attribute.__wrapped__( # type: ignore
dl, # self
inputs,
baselines,
additional_forward_args=additional_forward_args,
custom_attribution_func=custom_attribution_func,
)
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Tuple, Union
from captum._utils.gradient import construct_neuron_grad_fn
from captum._utils.typing import TensorOrTupleOfTensorsGeneric
from captum.attr._core.integrated_gradients import IntegratedGradients
from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
class NeuronIntegratedGradients(NeuronAttribution, GradientAttribution):
r"""
Approximates the integral of gradients for a particular neuron
along the path from a baseline input to the given input.
If no baseline is provided, the default baseline is the zero tensor.
More details regarding the integrated gradient method can be found in the
original paper here:
https://arxiv.org/abs/1703.01365
Note that this method is equivalent to applying integrated gradients
where the output is the output of the identified neuron.
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
multiply_by_inputs: bool = True,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
layer (torch.nn.Module): Layer for which attributions are computed.
Output size of attribute matches this layer's input or
output dimensions, depending on whether we attribute to
the inputs or outputs of the layer, corresponding to
attribution of each neuron in the input or output of
this layer.
Currently, it is assumed that the inputs or the outputs
of the layer, depending on which one is used for
attribution, can only be a single tensor.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in
then that type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of Neuron Integrated Gradients,
if `multiply_by_inputs` is set to True, final
sensitivity scores are being multiplied
by (inputs - baselines).
"""
NeuronAttribution.__init__(self, forward_func, layer, device_ids)
GradientAttribution.__init__(self, forward_func)
self._multiply_by_inputs = multiply_by_inputs
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
baselines: Union[None, Tensor, Tuple[Tensor, ...]] = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
internal_batch_size: Union[None, int] = None,
attribute_to_neuron_input: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which neuron integrated
gradients are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
neuron_selector (int, Callable, tuple[int], or slice):
Selector for neuron
in given layer for which attribution is desired.
Neuron selector can be provided as:
- a single integer, if the layer output is 2D. This integer
selects the appropriate neuron column in the layer input
or output
- a tuple of integers or slice objects. Length of this
tuple must be one less than the number of dimensions
in the input / output of the given layer (since
dimension 0 corresponds to number of examples).
The elements of the tuple can be either integers or
slice objects (slice object allows indexing a
range of neurons rather individual ones).
If any of the tuple elements is a slice object, the
indexed output tensor is used for attribution. Note
that specifying a slice of a tensor would amount to
computing the attribution of the sum of the specified
neurons, and not the individual neurons independently.
- a callable, which should
take the target layer as input (single tensor or tuple
if multiple tensors are in layer) and return a neuron or
aggregate of the layer's neurons for attribution.
For example, this function could return the
sum of the neurons in the layer or sum of neurons with
activations in a particular range. It is expected that
this function returns either a tensor with one element
or a 1D tensor with length equal to batch_size (one scalar
per input example)
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define the starting point from which integral
is computed.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. It will be
repeated for each of `n_steps` along the integrated
path. For all other types, the given argument is used
for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
n_steps (int, optional): The number of steps used by the approximation
method. Default: 50.
method (str, optional): Method for approximating the integral,
one of `riemann_right`, `riemann_left`, `riemann_middle`,
`riemann_trapezoid` or `gausslegendre`.
Default: `gausslegendre` if no method is provided.
internal_batch_size (int, optional): Divides total #steps * #examples
data points into chunks of size at most internal_batch_size,
which are computed (forward / backward passes)
sequentially. internal_batch_size must be at least equal to
#examples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain internal_batch_size / num_devices examples.
If internal_batch_size is None, then all evaluations are
processed in one batch.
Default: None
attribute_to_neuron_input (bool, optional): Indicates whether to
compute the attributions with respect to the neuron input
or output. If `attribute_to_neuron_input` is set to True
then the attributions will be computed with respect to
neuron's inputs, otherwise it will be computed with respect
to neuron's outputs.
Note that currently it is assumed that either the input
or the output of internal neuron, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Integrated gradients for particular neuron with
respect to each input feature.
Attributions will always be the same size as the provided
inputs, with each value providing the attribution of the
corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x32x32.
>>> net = ImageClassifier()
>>> neuron_ig = NeuronIntegratedGradients(net, net.conv1)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # To compute neuron attribution, we need to provide the neuron
>>> # index for which attribution is desired. Since the layer output
>>> # is Nx12x32x32, we need a tuple in the form (0..11,0..31,0..31)
>>> # which indexes a particular neuron in the layer output.
>>> # For this example, we choose the index (4,1,2).
>>> # Computes neuron integrated gradients for neuron with
>>> # index (4,1,2).
>>> attribution = neuron_ig.attribute(input, (4,1,2))
"""
ig = IntegratedGradients(self.forward_func, self.multiplies_by_inputs)
ig.gradient_func = construct_neuron_grad_fn(
self.layer, neuron_selector, self.device_ids, attribute_to_neuron_input
)
# NOTE: using __wrapped__ to not log
# Return only attributions and not delta
return ig.attribute.__wrapped__( # type: ignore
ig, # self
inputs,
baselines,
additional_forward_args=additional_forward_args,
n_steps=n_steps,
method=method,
internal_batch_size=internal_batch_size,
)
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
|
#!/usr/bin/env python3
import warnings
from typing import Any, Callable, List, Tuple, Union
import torch
from captum._utils.common import (
_expand_additional_forward_args,
_expand_target,
_format_additional_forward_args,
_format_output,
_is_tuple,
_verify_select_neuron,
)
from captum._utils.gradient import compute_layer_gradients_and_eval
from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._utils.approximation_methods import approximation_parameters
from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution
from captum.attr._utils.batching import _batch_attribution
from captum.attr._utils.common import (
_format_input_baseline,
_reshape_and_sum,
_validate_input,
)
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
class NeuronConductance(NeuronAttribution, GradientAttribution):
r"""
Computes conductance with respect to particular hidden neuron. The
returned output is in the shape of the input, showing the attribution
/ conductance of each input feature to the selected hidden layer neuron.
The details of the approach can be found here:
https://arxiv.org/abs/1805.12233
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
multiply_by_inputs: bool = True,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
layer (torch.nn.Module): Layer for which neuron attributions are computed.
Attributions for a particular neuron in the input or output
of this layer are computed using the argument neuron_selector
in the attribute method.
Currently, only layers with a single tensor input or output
are supported.
layer (torch.nn.Module): Layer for which attributions are computed.
Output size of attribute matches this layer's input or
output dimensions, depending on whether we attribute to
the inputs or outputs of the layer, corresponding to
attribution of each neuron in the input or output of
this layer.
Currently, it is assumed that the inputs or the outputs
of the layer, depending on which one is used for
attribution, can only be a single tensor.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in
then that type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of Neuron Conductance,
if `multiply_by_inputs` is set to True, final
sensitivity scores are being multiplied
by (inputs - baselines).
"""
NeuronAttribution.__init__(self, forward_func, layer, device_ids)
GradientAttribution.__init__(self, forward_func)
self._multiply_by_inputs = multiply_by_inputs
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
neuron_selector: Union[int, Tuple[int, ...], Callable],
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "riemann_trapezoid",
internal_batch_size: Union[None, int] = None,
attribute_to_neuron_input: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which neuron
conductance is computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
neuron_selector (int, Callable, tuple[int], or slice):
Selector for neuron
in given layer for which attribution is desired.
Neuron selector can be provided as:
- a single integer, if the layer output is 2D. This integer
selects the appropriate neuron column in the layer input
or output
- a tuple of integers. Length of this
tuple must be one less than the number of dimensions
in the input / output of the given layer (since
dimension 0 corresponds to number of examples).
This can be used as long as the layer input / output
is a single tensor.
- a callable, which should
take the target layer as input (single tensor or tuple
if multiple tensors are in layer) and return a selected
neuron - output shape should be 1D with length equal to
batch_size (one scalar per input example)
NOTE: Callables applicable for neuron conductance are
less general than those of other methods and should
NOT aggregate values of the layer, only return a specific
output. This option should only be used in cases where the
layer input / output is a tuple of tensors, where the other
options would not suffice. This limitation is necessary since
neuron conductance, unlike other neuron methods, also utilizes
the gradient of output with respect to the intermedite neuron,
which cannot be computed for aggregations of multiple
intemediate neurons.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define the starting point from which integral
is computed and can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. It will be
repeated for each of `n_steps` along the integrated
path. For all other types, the given argument is used
for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
n_steps (int, optional): The number of steps used by the approximation
method. Default: 50.
method (str, optional): Method for approximating the integral,
one of `riemann_right`, `riemann_left`, `riemann_middle`,
`riemann_trapezoid` or `gausslegendre`.
Default: `gausslegendre` if no method is provided.
internal_batch_size (int, optional): Divides total #steps * #examples
data points into chunks of size at most internal_batch_size,
which are computed (forward / backward passes)
sequentially. internal_batch_size must be at least equal to
#examples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain internal_batch_size / num_devices examples.
If internal_batch_size is None, then all evaluations are
processed in one batch.
Default: None
attribute_to_neuron_input (bool, optional): Indicates whether to
compute the attributions with respect to the neuron input
or output. If `attribute_to_neuron_input` is set to True
then the attributions will be computed with respect to
neuron's inputs, otherwise it will be computed with respect
to neuron's outputs.
Note that currently it is assumed that either the input
or the output of internal neuron, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Conductance for
particular neuron with respect to each input feature.
Attributions will always be the same size as the provided
inputs, with each value providing the attribution of the
corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x32x32.
>>> net = ImageClassifier()
>>> neuron_cond = NeuronConductance(net, net.conv1)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # To compute neuron attribution, we need to provide the neuron
>>> # index for which attribution is desired. Since the layer output
>>> # is Nx12x32x32, we need a tuple in the form (0..11,0..31,0..31)
>>> # which indexes a particular neuron in the layer output.
>>> # Computes neuron conductance for neuron with
>>> # index (4,1,2).
>>> attribution = neuron_cond.attribute(input, (4,1,2))
"""
if callable(neuron_selector):
warnings.warn(
"The neuron_selector provided is a callable. Please ensure that this"
" function only selects neurons from the given layer; aggregating"
" or performing other operations on the tensor may lead to inaccurate"
" results."
)
is_inputs_tuple = _is_tuple(inputs)
inputs, baselines = _format_input_baseline(inputs, baselines)
_validate_input(inputs, baselines, n_steps, method)
num_examples = inputs[0].shape[0]
if internal_batch_size is not None:
num_examples = inputs[0].shape[0]
attrs = _batch_attribution(
self,
num_examples,
internal_batch_size,
n_steps,
inputs=inputs,
baselines=baselines,
neuron_selector=neuron_selector,
target=target,
additional_forward_args=additional_forward_args,
method=method,
attribute_to_neuron_input=attribute_to_neuron_input,
)
else:
attrs = self._attribute(
inputs=inputs,
neuron_selector=neuron_selector,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
n_steps=n_steps,
method=method,
attribute_to_neuron_input=attribute_to_neuron_input,
)
return _format_output(is_inputs_tuple, attrs)
def _attribute(
self,
inputs: Tuple[Tensor, ...],
neuron_selector: Union[int, Tuple[int, ...], Callable],
baselines: Tuple[Union[Tensor, int, float], ...],
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "riemann_trapezoid",
attribute_to_neuron_input: bool = False,
step_sizes_and_alphas: Union[None, Tuple[List[float], List[float]]] = None,
) -> Tuple[Tensor, ...]:
num_examples = inputs[0].shape[0]
total_batch = num_examples * n_steps
if step_sizes_and_alphas is None:
# retrieve step size and scaling factor for specified approximation method
step_sizes_func, alphas_func = approximation_parameters(method)
step_sizes, alphas = step_sizes_func(n_steps), alphas_func(n_steps)
else:
step_sizes, alphas = step_sizes_and_alphas
# Compute scaled inputs from baseline to final input.
scaled_features_tpl = tuple(
torch.cat(
[baseline + alpha * (input - baseline) for alpha in alphas], dim=0
).requires_grad_()
for input, baseline in zip(inputs, baselines)
)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
# apply number of steps to additional forward args
# currently, number of steps is applied only to additional forward arguments
# that are nd-tensors. It is assumed that the first dimension is
# the number of batches.
# dim -> (#examples * #steps x additional_forward_args[0].shape[1:], ...)
input_additional_args = (
_expand_additional_forward_args(additional_forward_args, n_steps)
if additional_forward_args is not None
else None
)
expanded_target = _expand_target(target, n_steps)
# Conductance Gradients - Returns gradient of output with respect to
# hidden layer and hidden layer evaluated at each input.
layer_gradients, layer_eval, input_grads = compute_layer_gradients_and_eval(
forward_fn=self.forward_func,
layer=self.layer,
inputs=scaled_features_tpl,
target_ind=expanded_target,
additional_forward_args=input_additional_args,
gradient_neuron_selector=neuron_selector,
device_ids=self.device_ids,
attribute_to_layer_input=attribute_to_neuron_input,
)
mid_grads = _verify_select_neuron(layer_gradients, neuron_selector)
scaled_input_gradients = tuple(
input_grad
* mid_grads.reshape((total_batch,) + (1,) * (len(input_grad.shape) - 1))
for input_grad in input_grads
)
# Mutliplies by appropriate step size.
scaled_grads = tuple(
scaled_input_gradient.contiguous().view(n_steps, -1)
* torch.tensor(step_sizes).view(n_steps, 1).to(scaled_input_gradient.device)
for scaled_input_gradient in scaled_input_gradients
)
# Aggregates across all steps for each tensor in the input tuple
total_grads = tuple(
_reshape_and_sum(scaled_grad, n_steps, num_examples, input_grad.shape[1:])
for (scaled_grad, input_grad) in zip(scaled_grads, input_grads)
)
if self.multiplies_by_inputs:
# computes attribution for each tensor in input tuple
# attributions has the same dimensionality as inputs
attributions = tuple(
total_grad * (input - baseline)
for total_grad, input, baseline in zip(total_grads, inputs, baselines)
)
else:
attributions = total_grads
return attributions
@property
def multiplies_by_inputs(self):
return self._multiply_by_inputs
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Tuple, Union
import torch
from captum._utils.common import (
_expand_additional_forward_args,
_expand_target,
_format_additional_forward_args,
_format_output,
)
from captum._utils.gradient import compute_layer_gradients_and_eval
from captum._utils.typing import BaselineType, TargetType
from captum.attr._utils.approximation_methods import approximation_parameters
from captum.attr._utils.attribution import GradientAttribution, LayerAttribution
from captum.attr._utils.batching import _batch_attribution
from captum.attr._utils.common import (
_format_input_baseline,
_reshape_and_sum,
_validate_input,
)
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
class InternalInfluence(LayerAttribution, GradientAttribution):
r"""
Computes internal influence by approximating the integral of gradients
for a particular layer along the path from a baseline input to the
given input.
If no baseline is provided, the default baseline is the zero tensor.
More details on this approach can be found here:
https://arxiv.org/abs/1802.03788
Note that this method is similar to applying integrated gradients and
taking the layer as input, integrating the gradient of the layer with
respect to the output.
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (Callable): The forward function of the model or any
modification of it
layer (torch.nn.Module): Layer for which attributions are computed.
Output size of attribute matches this layer's input or
output dimensions, depending on whether we attribute to
the inputs or outputs of the layer, corresponding to
attribution of each neuron in the input or output of
this layer.
device_ids (list[int]): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
LayerAttribution.__init__(self, forward_func, layer, device_ids)
GradientAttribution.__init__(self, forward_func)
@log_usage()
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
internal_batch_size: Union[None, int] = None,
attribute_to_layer_input: bool = False,
) -> Union[Tensor, Tuple[Tensor, ...]]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which internal
influence is computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
baselines (scalar, Tensor, tuple of scalar, or Tensor, optional):
Baselines define a starting point from which integral
is computed and can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (Any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. It will be
repeated for each of `n_steps` along the integrated
path. For all other types, the given argument is used
for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
n_steps (int, optional): The number of steps used by the approximation
method. Default: 50.
method (str, optional): Method for approximating the integral,
one of `riemann_right`, `riemann_left`, `riemann_middle`,
`riemann_trapezoid` or `gausslegendre`.
Default: `gausslegendre` if no method is provided.
internal_batch_size (int, optional): Divides total #steps * #examples
data points into chunks of size at most internal_batch_size,
which are computed (forward / backward passes)
sequentially. internal_batch_size must be at least equal to
#examples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain internal_batch_size / num_devices examples.
If internal_batch_size is None, then all evaluations
are processed in one batch.
Default: None
attribute_to_layer_input (bool, optional): Indicates whether to
compute the attribution with respect to the layer input
or output. If `attribute_to_layer_input` is set to True
then the attributions will be computed with respect to
layer inputs, otherwise it will be computed with respect
to layer outputs.
Note that currently it is assumed that either the input
or the output of internal layer, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
Internal influence of each neuron in given
layer output. Attributions will always be the same size
as the output or input of the given layer depending on
whether `attribute_to_layer_input` is set to `False` or
`True` respectively.
Attributions are returned in a tuple if
the layer inputs / outputs contain multiple tensors,
otherwise a single tensor is returned.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x32x32.
>>> net = ImageClassifier()
>>> layer_int_inf = InternalInfluence(net, net.conv1)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes layer internal influence.
>>> # attribution size matches layer output, Nx12x32x32
>>> attribution = layer_int_inf.attribute(input)
"""
inputs, baselines = _format_input_baseline(inputs, baselines)
_validate_input(inputs, baselines, n_steps, method)
if internal_batch_size is not None:
num_examples = inputs[0].shape[0]
attrs = _batch_attribution(
self,
num_examples,
internal_batch_size,
n_steps,
inputs=inputs,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
method=method,
attribute_to_layer_input=attribute_to_layer_input,
)
else:
attrs = self._attribute(
inputs=inputs,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
n_steps=n_steps,
method=method,
attribute_to_layer_input=attribute_to_layer_input,
)
return attrs
def _attribute(
self,
inputs: Tuple[Tensor, ...],
baselines: Tuple[Union[Tensor, int, float], ...],
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
attribute_to_layer_input: bool = False,
step_sizes_and_alphas: Union[None, Tuple[List[float], List[float]]] = None,
) -> Union[Tensor, Tuple[Tensor, ...]]:
if step_sizes_and_alphas is None:
# retrieve step size and scaling factor for specified approximation method
step_sizes_func, alphas_func = approximation_parameters(method)
step_sizes, alphas = step_sizes_func(n_steps), alphas_func(n_steps)
else:
step_sizes, alphas = step_sizes_and_alphas
# Compute scaled inputs from baseline to final input.
scaled_features_tpl = tuple(
torch.cat(
[baseline + alpha * (input - baseline) for alpha in alphas], dim=0
).requires_grad_()
for input, baseline in zip(inputs, baselines)
)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
# apply number of steps to additional forward args
# currently, number of steps is applied only to additional forward arguments
# that are nd-tensors. It is assumed that the first dimension is
# the number of batches.
# dim -> (bsz * #steps x additional_forward_args[0].shape[1:], ...)
input_additional_args = (
_expand_additional_forward_args(additional_forward_args, n_steps)
if additional_forward_args is not None
else None
)
expanded_target = _expand_target(target, n_steps)
# Returns gradient of output with respect to hidden layer.
layer_gradients, _ = compute_layer_gradients_and_eval(
forward_fn=self.forward_func,
layer=self.layer,
inputs=scaled_features_tpl,
target_ind=expanded_target,
additional_forward_args=input_additional_args,
device_ids=self.device_ids,
attribute_to_layer_input=attribute_to_layer_input,
)
# flattening grads so that we can multiply it with step-size
# calling contiguous to avoid `memory whole` problems
scaled_grads = tuple(
layer_grad.contiguous().view(n_steps, -1)
* torch.tensor(step_sizes).view(n_steps, 1).to(layer_grad.device)
for layer_grad in layer_gradients
)
# aggregates across all steps for each tensor in the input tuple
attrs = tuple(
_reshape_and_sum(
scaled_grad, n_steps, inputs[0].shape[0], layer_grad.shape[1:]
)
for scaled_grad, layer_grad in zip(scaled_grads, layer_gradients)
)
return _format_output(len(attrs) > 1, attrs)
|
#!/usr/bin/env python3
import typing
from typing import Any, cast, List, Tuple, Union
from captum._utils.common import (
_format_tensor_into_tuples,
_reduce_list,
_sort_key_list,
)
from captum._utils.gradient import (
apply_gradient_requirements,
compute_gradients,
undo_gradient_requirements,
)
from captum._utils.typing import (
Literal,
ModuleOrModuleList,
TargetType,
TensorOrTupleOfTensorsGeneric,
)
from captum.attr._core.lrp import LRP
from captum.attr._utils.attribution import LayerAttribution
from torch import Tensor
from torch.nn import Module
class LayerLRP(LRP, LayerAttribution):
r"""
Layer-wise relevance propagation is based on a backward propagation
mechanism applied sequentially to all layers of the model. Here, the
model output score represents the initial relevance which is decomposed
into values for each neuron of the underlying layers. The decomposition
is defined by rules that are chosen for each layer, involving its weights
and activations. Details on the model can be found in the original paper
[https://doi.org/10.1371/journal.pone.0130140]. The implementation is
inspired by the tutorial of the same group
[https://doi.org/10.1016/j.dsp.2017.10.011] and the publication by
Ancona et al. [https://openreview.net/forum?id=Sy21R9JAW].
"""
def __init__(self, model: Module, layer: ModuleOrModuleList) -> None:
"""
Args:
model (Module): The forward function of the model or
any modification of it. Custom rules for a given layer need to
be defined as attribute
`module.rule` and need to be of type PropagationRule.
layer (torch.nn.Module or list(torch.nn.Module)): Layer or layers
for which attributions are computed.
The size and dimensionality of the attributions
corresponds to the size and dimensionality of the layer's
input or output depending on whether we attribute to the
inputs or outputs of the layer. If value is None, the
relevance for all layers is returned in attribution.
"""
LayerAttribution.__init__(self, model, layer)
LRP.__init__(self, model)
if hasattr(self.model, "device_ids"):
self.device_ids = cast(List[int], self.model.device_ids)
@typing.overload # type: ignore
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: Literal[False] = False,
attribute_to_layer_input: bool = False,
verbose: bool = False,
) -> Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]]:
...
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
*,
return_convergence_delta: Literal[True],
attribute_to_layer_input: bool = False,
verbose: bool = False,
) -> Tuple[
Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]],
Union[Tensor, List[Tensor]],
]:
...
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: bool = False,
attribute_to_layer_input: bool = False,
verbose: bool = False,
) -> Union[
Tensor,
Tuple[Tensor, ...],
List[Union[Tensor, Tuple[Tensor, ...]]],
Tuple[
Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]],
Union[Tensor, List[Tensor]],
],
]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which relevance is
propagated.
If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (tuple, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
attribute_to_layer_input (bool, optional): Indicates whether to
compute the attribution with respect to the layer input
or output. If `attribute_to_layer_input` is set to True
then the attributions will be computed with respect to
layer input, otherwise it will be computed with respect
to layer output.
verbose (bool, optional): Indicates whether information on application
of rules is printed during propagation.
Default: False
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions** or 2-element tuple of
**attributions**, **delta** or list of **attributions** and **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The propagated relevance values with respect to each
input feature. Attributions will always
be the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned. The sum of attributions
is one and not corresponding to the prediction score as in other
implementations. If attributions for all layers are returned
(layer=None) a list of tensors or tuples of tensors is returned
with entries for each layer.
- **delta** (*Tensor* or list of *Tensor*
returned if return_convergence_delta=True):
Delta is calculated per example, meaning that the number of
elements in returned delta tensor is equal to the number of
examples in input.
If attributions for all layers are returned (layer=None) a list
of tensors is returned with entries for
each layer.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities. It has one
>>> # Conv2D and a ReLU layer.
>>> net = ImageClassifier()
>>> layer_lrp = LayerLRP(net, net.conv1)
>>> input = torch.randn(3, 3, 32, 32)
>>> # Attribution size matches input size: 3x3x32x32
>>> attribution = layer_lrp.attribute(input, target=5)
"""
self.verbose = verbose
self._original_state_dict = self.model.state_dict()
self.layers = []
self._get_layers(self.model)
self._check_and_attach_rules()
self.attribute_to_layer_input = attribute_to_layer_input
self.backward_handles = []
self.forward_handles = []
inputs = _format_tensor_into_tuples(inputs)
gradient_mask = apply_gradient_requirements(inputs)
try:
# 1. Forward pass
output = self._compute_output_and_change_weights(
inputs, target, additional_forward_args
)
self._register_forward_hooks()
# 2. Forward pass + backward pass
_ = compute_gradients(
self._forward_fn_wrapper, inputs, target, additional_forward_args
)
relevances = self._get_output_relevance(output)
finally:
self._restore_model()
undo_gradient_requirements(inputs, gradient_mask)
if return_convergence_delta:
delta: Union[Tensor, List[Tensor]]
if isinstance(self.layer, list):
delta = []
for relevance_layer in relevances:
delta.append(
self.compute_convergence_delta(relevance_layer, output)
)
else:
delta = self.compute_convergence_delta(
cast(Tuple[Tensor, ...], relevances), output
)
return relevances, delta # type: ignore
else:
return relevances # type: ignore
def _get_single_output_relevance(self, layer, output):
if self.attribute_to_layer_input:
normalized_relevances = layer.rule.relevance_input
else:
normalized_relevances = layer.rule.relevance_output
key_list = _sort_key_list(list(normalized_relevances.keys()), self.device_ids)
normalized_relevances = _reduce_list(
[normalized_relevances[device_id] for device_id in key_list]
)
if isinstance(normalized_relevances, tuple):
return tuple(
normalized_relevance
* output.reshape((-1,) + (1,) * (normalized_relevance.dim() - 1))
for normalized_relevance in normalized_relevances
)
else:
return normalized_relevances * output.reshape(
(-1,) + (1,) * (normalized_relevances.dim() - 1)
)
def _get_output_relevance(self, output):
if isinstance(self.layer, list):
relevances = []
for layer in self.layer:
relevances.append(self._get_single_output_relevance(layer, output))
return relevances
else:
return self._get_single_output_relevance(self.layer, output)
@staticmethod
def _convert_list_to_tuple(
relevances: Union[List[Any], Tuple[Any, ...]]
) -> Tuple[Any, ...]:
if isinstance(relevances, list):
return tuple(relevances)
else:
return relevances
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.