text
stringlengths 145
7.65M
|
---|
===========================================================================================================
SOURCE CODE FILE: _check.py
LINES: 3
SIZE: 9.41 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_check.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import ast
import inspect
import textwrap
import warnings
import torch
class AttributeTypeIsSupportedChecker(ast.NodeVisitor):
"""Check the ``__init__`` method of a given ``nn.Module``.
It ensures that all instance-level attributes can be properly initialized.
Specifically, we do type inference based on attribute values...even
if the attribute in question has already been typed using
Python3-style annotations or ``torch.jit.annotate``. This means that
setting an instance-level attribute to ``[]`` (for ``List``),
``{}`` for ``Dict``), or ``None`` (for ``Optional``) isn't enough
information for us to properly initialize that attribute.
An object of this class can walk a given ``nn.Module``'s AST and
determine if it meets our requirements or not.
Known limitations
1. We can only check the AST nodes for certain constructs; we can't
``eval`` arbitrary expressions. This means that function calls,
class instantiations, and complex expressions that resolve to one of
the "empty" values specified above will NOT be flagged as
problematic.
2. We match on string literals, so if the user decides to use a
non-standard import (e.g. `from typing import List as foo`), we
won't catch it.
Example:
.. code-block:: python
class M(torch.nn.Module):
def fn(self):
return []
def __init__(self) -> None:
super().__init__()
self.x: List[int] = []
def forward(self, x: List[int]):
self.x = x
return 1
The above code will pass the ``AttributeTypeIsSupportedChecker``
check since we have a function call in ``__init__``. However,
it will still fail later with the ``RuntimeError`` "Tried to set
nonexistent attribute: x. Did you forget to initialize it in
__init__()?".
Args:
nn_module - The instance of ``torch.nn.Module`` whose
``__init__`` method we wish to check
"""
def check(self, nn_module: torch.nn.Module) -> None:
source_lines = inspect.getsource(nn_module.__class__.__init__)
# Ignore comments no matter the indentation
def is_useless_comment(line):
line = line.strip()
return line.startswith("#") and not line.startswith("# type:")
source_lines = "\n".join(
[l for l in source_lines.split("\n") if not is_useless_comment(l)]
)
# This AST only contains the `__init__` method of the nn.Module
init_ast = ast.parse(textwrap.dedent(source_lines))
# Get items annotated in the class body
self.class_level_annotations = list(nn_module.__annotations__.keys())
# Flag for later
self.visiting_class_level_ann = False
self.visit(init_ast)
def _is_empty_container(self, node: ast.AST, ann_type: str) -> bool:
if ann_type == "List":
# Assigning `[]` to a `List` type gives you a Node where
# value=List(elts=[], ctx=Load())
if not isinstance(node, ast.List):
return False
if node.elts:
return False
elif ann_type == "Dict":
# Assigning `{}` to a `Dict` type gives you a Node where
# value=Dict(keys=[], values=[])
if not isinstance(node, ast.Dict):
return False
if node.keys:
return False
elif ann_type == "Optional":
# Assigning `None` to an `Optional` type gives you a
# Node where value=Constant(value=None, kind=None)
if not isinstance(node, ast.Constant):
return False
if node.value: # type: ignore[attr-defined]
return False
return True
def visit_Assign(self, node):
"""Store assignment state when assigning to a Call Node.
If we're visiting a Call Node (the right-hand side of an
assignment statement), we won't be able to check the variable
that we're assigning to (the left-hand side of an assignment).
Because of this, we need to store this state in visitAssign.
(Luckily, we only have to do this if we're assigning to a Call
Node, i.e. ``torch.jit.annotate``. If we're using normal Python
annotations, we'll be visiting an AnnAssign Node, which has its
target built in.)
"""
try:
if (
isinstance(node.value, ast.Call)
and node.targets[0].attr in self.class_level_annotations
):
self.visiting_class_level_ann = True
except AttributeError:
return
self.generic_visit(node)
self.visiting_class_level_ann = False
def visit_AnnAssign(self, node):
"""Visit an AnnAssign node in an ``nn.Module``'s ``__init__`` method.
It checks if it conforms to our attribute annotation rules."""
# If we have a local variable
try:
if node.target.value.id != "self":
return
except AttributeError:
return
# If we have an attribute that's already been annotated at the
# class level
if node.target.attr in self.class_level_annotations:
return
# TODO @ansley: add `Union` once landed
# NB: Even though `Tuple` is a "container", we don't want to
# check for it here. `Tuple` functions as an type with an
# "infinite" number of subtypes, in the sense that you can have
# `Tuple[())]`, `Tuple[T1]`, `Tuple[T2]`, `Tuple[T1, T2]`,
# `Tuple[T2, T1]` and so on, and none of these subtypes can be
# used in place of the other. Therefore, assigning an empty
# tuple in `__init__` CORRECTLY means that that variable
# cannot be reassigned later to a non-empty tuple. Same
# deal with `NamedTuple`
containers = {"List", "list", "Dict", "dict", "Optional"}
# If we're not evaluating one of the specified problem types
try:
if node.annotation.value.id not in containers:
return
except AttributeError:
# To evaluate a base type (`str`, `int`, etc.), we would
# have needed to get the name through `node.annotation.id`
# instead of `node.annotation.value.id`. Seems that we're
# not evaluating one of our "containers"
return
# Check if the assigned variable is empty
ann_type = node.annotation.value.id
if not self._is_empty_container(node.value, ann_type):
return
warnings.warn(
"The TorchScript type system doesn't support "
"instance-level annotations on empty non-base "
"types in `__init__`. Instead, either 1) use a "
"type annotation in the class body, or 2) wrap "
"the type in `torch.jit.Attribute`."
)
def visit_Call(self, node):
"""Determine if a Call node is 'torch.jit.annotate' in __init__.
Visit a Call node in an ``nn.Module``'s ``__init__``
method and determine if it's ``torch.jit.annotate``. If so,
see if it conforms to our attribute annotation rules.
"""
# If we have an attribute that's already been annotated at the
# class level
if self.visiting_class_level_ann:
return
# If this isn't a call to `torch.jit.annotate`
try:
if (
node.func.value.value.id != "torch"
or node.func.value.attr != "jit"
or node.func.attr != "annotate"
):
self.generic_visit(node)
elif (
node.func.value.value.id != "jit" or node.func.value.attr != "annotate"
):
self.generic_visit(node)
except AttributeError:
# Looks like we didn't even have the right node structure
# to check for `torch.jit.annotate` in the first place
self.generic_visit(node)
# Invariant: we have a `torch.jit.annotate` or a
# `torch.annotate` call
# A Call Node for `torch.jit.annotate` should have an `args`
# list of length 2 where args[0] represents the annotation and
# args[1] represents the actual value
if len(node.args) != 2:
return
if not isinstance(node.args[0], ast.Subscript):
return
# See notes in `visit_AnnAssign` r.e. containers
containers = {"List", "Dict", "Optional"}
try:
ann_type = node.args[0].value.id # type: ignore[attr-defined]
except AttributeError:
return
if ann_type not in containers:
return
# Check if the assigned variable is empty
if not self._is_empty_container(node.args[1], ann_type):
return
warnings.warn(
"The TorchScript type system doesn't support "
"instance-level annotations on empty non-base "
"types in `__init__`. Instead, either 1) use a "
"type annotation in the class body, or 2) wrap "
"the type in `torch.jit.Attribute`."
)
```
|
=====================================================================================================================
SOURCE CODE FILE: _dataclass_impls.py
LINES: 3
SIZE: 6.70 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_dataclass_impls.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
# Functions for synthesizing magic methods for JIT-compiled dataclasses
import ast
import dataclasses
import inspect
import os
from functools import partial
from typing import Callable
from torch._jit_internal import FAKE_FILENAME_PREFIX, is_optional
from torch._sources import ParsedDef, SourceContext
def _get_fake_filename(cls, method_name):
return os.path.join(FAKE_FILENAME_PREFIX, cls.__name__, method_name)
def compose_fn(cls, name: str, body_lines: list[str], signature: str) -> ParsedDef:
body = "\n".join(f" {b}" for b in body_lines)
decl = f"def {name}{signature}:\n{body}"
# Parse the function declaration
try:
py_ast = ast.parse(decl)
except SyntaxError as e:
# This should only happen if there's some unforeseeable change
# in the dataclasses module that makes our synthesized code fail
raise RuntimeError(
f"TorchScript failed to synthesize dataclass method '{name}' for class '{cls.__name__}'. "
"Please file a bug report at <https://github.com/pytorch/pytorch/issues>"
) from e
fake_filename = _get_fake_filename(cls, name)
# Parse the function
return ParsedDef(
py_ast,
ctx=SourceContext(
source=decl, filename=fake_filename, file_lineno=0, leading_whitespace_len=0
),
source=decl,
filename=fake_filename,
file_lineno=0,
)
def synthesize__init__(cls) -> ParsedDef:
# Supporting default factories in the way that people expect would sort of require us to
# allow compiling lambda functions, which is not currently supported.
if any(
field.default_factory is not dataclasses.MISSING
for field in dataclasses.fields(cls)
):
raise NotImplementedError(
"Default factory initializers are not supported in TorchScript dataclasses"
)
# Simply read off the generated __init__ signature from CPython's implementation. It'll be
# almost correct except for InitVar annotations, which we need to handle specially.
signature = inspect.signature(cls.__init__)
# Handle InitVars if needed (only works on Python 3.8+, when a `type` attribute was added to InitVar);
# see CPython commit here https://github.com/python/cpython/commit/01ee12ba35a333e8a6a25c4153c4a21838e9585c
init_vars: list[str] = []
params = []
for name, param in signature.parameters.items():
ann = param.annotation
if isinstance(ann, dataclasses.InitVar):
# The TorchScript interpreter can't handle InitVar annotations, so we unwrap the underlying type here
init_vars.append(name)
params.append(param.replace(annotation=ann.type)) # type: ignore[attr-defined]
else:
params.append(param)
signature = signature.replace(parameters=params)
body = [
# Assign all attributes to self
f"self.{field.name} = {field.name}"
for field in dataclasses.fields(cls)
if field.init and field.name not in init_vars
]
# Call user's impl of __post_init__ if it exists
if hasattr(cls, "__post_init__"):
body.append("self.__post_init__(" + ", ".join(init_vars) + ")")
return compose_fn(cls, "__init__", body or ["pass"], signature=str(signature))
# This is a placeholder at the moment since the TorchScript interpreter doesn't call __repr__
def synthesize__repr__(cls) -> ParsedDef:
return compose_fn(
cls,
"__repr__",
[
f"return '{cls.__name__}("
+ ", ".join(
[
f"{field.name}=self.{field.name}"
for field in dataclasses.fields(cls)
if field.repr
]
)
+ ")'"
],
signature="(self) -> str",
)
def synthesize__hash__(cls) -> ParsedDef:
return compose_fn(
cls,
"__hash__",
[
# This is just a placeholder to prevent compilation from failing; this won't even get called at
# all right now because the TorchScript interpreter doesn't call custom __hash__ implementations
"raise NotImplementedError('__hash__ is not supported for dataclasses in TorchScript')"
],
signature="(self) -> int",
)
# Implementation for __eq__ and __ne__
def synthesize_equality(cls, name: str, converse: str) -> ParsedDef:
return synthesize_comparison(
cls,
name,
allow_eq=True,
raise_on_none=False,
inner=[f"if val1 {converse} val2: return False"],
)
def synthesize_inequality(cls, name: str, op: str, allow_eq: bool) -> ParsedDef:
return synthesize_comparison(
cls,
name,
allow_eq,
raise_on_none=True,
inner=[
f"if val1 {op} val2: return True",
f"elif val2 {op} val1: return False",
],
)
def synthesize_comparison(
cls, name: str, allow_eq: bool, raise_on_none: bool, inner: list[str]
) -> ParsedDef:
body = []
for field in dataclasses.fields(cls):
if not field.compare:
continue
body.extend(
[
f"val1 = self.{field.name}",
f"val2 = other.{field.name}",
]
)
body.extend(
inner
if not is_optional(field.type)
else [
# Type refinement for optional fields; we need this to avoid type errors from the interpreter
"if val1 is not None and val2 is not None:",
*[" " + line for line in inner],
"elif (val1 is None) != (val2 is None):",
f" raise TypeError('Cannot compare {cls.__name__} with None')"
if raise_on_none
else " return False",
]
)
body.append(f"return {allow_eq}")
return compose_fn(
cls, name, body, signature=f"(self, other: {cls.__name__}) -> bool"
)
DATACLASS_MAGIC_METHODS: dict[str, Callable] = {
"__init__": synthesize__init__,
"__repr__": synthesize__repr__,
"__hash__": synthesize__hash__,
"__eq__": partial(synthesize_equality, name="__eq__", converse="!="),
"__ne__": partial(synthesize_equality, name="__ne__", converse="=="),
"__lt__": partial(synthesize_inequality, name="__lt__", op="<", allow_eq=False),
"__le__": partial(synthesize_inequality, name="__le__", op="<", allow_eq=True),
"__gt__": partial(synthesize_inequality, name="__gt__", op=">", allow_eq=False),
"__ge__": partial(synthesize_inequality, name="__ge__", op=">", allow_eq=True),
}
```
|
=========================================================================================================================
SOURCE CODE FILE: _decomposition_utils.py
LINES: 1
SIZE: 0.40 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_decomposition_utils.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import torch
from torch._ops import OpOverload, OpOverloadPacket
def _register_decomposition(op: OpOverload, graph: torch._C.Graph):
assert not isinstance(
op, OpOverloadPacket
), f"Must pass specific op overload, not overload packet, found {op}"
assert isinstance(op, OpOverload)
torch._C._jit_register_decomposition_for_schema(op._schema, graph)
```
|
====================================================================================================================
SOURCE CODE FILE: _decompositions.py
LINES: 1
SIZE: 4.41 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_decompositions.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import torch
from torch import Tensor
aten = torch.ops.aten
import inspect
import warnings
from typing import Callable, Optional, TypeVar
from typing_extensions import ParamSpec
from torch.types import Number
decomposition_table: dict[str, torch.jit.ScriptFunction] = {}
function_name_set: set[str] = set()
_T = TypeVar("_T")
_P = ParamSpec("_P")
def check_decomposition_has_type_annotations(f):
inspect_empty = inspect._empty # type: ignore[attr-defined]
sig = inspect.signature(f)
for param in sig.parameters.values():
assert (
param.annotation != inspect_empty
), f"No signature on param {param.name} for function {f.name}"
assert (
sig.return_annotation != inspect_empty
), f"No return annotation for function {f.name}"
def signatures_match(decomposition_sig, torch_op_sig):
decomp_params = decomposition_sig.parameters
op_params = torch_op_sig.parameters
if len(decomp_params) != len(op_params):
return False
for decomp_param, op_param in zip(decomp_params.values(), op_params.values()):
# can't check full equality yet because not all fields are correcly deduced
# in the torch_op_sig - like default value
# can't check 'kind' bc
# kwarg-only values with defaults not yet supported in TS
inspect_empty = inspect._empty # type: ignore[attr-defined]
for field in ["name", "annotation"]:
if field == "name" and decomp_param.name == "self":
warnings.warn("PyTorch uses 'input' instead of 'self' on public api")
if getattr(decomp_param, field) != getattr(op_param, field):
return False
decomp_default = decomp_param.default
op_default = op_param.default
# default value not always correctly inferred as being present on torch schema,
# but if specified on both they should be equal
if decomp_default != inspect_empty and op_default != inspect_empty:
if decomp_default != op_default:
return False
return decomposition_sig.return_annotation == torch_op_sig.return_annotation
def register_decomposition(
aten_op: torch._ops.OpOverload,
registry: Optional[dict[str, torch.jit.ScriptFunction]] = None,
) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]:
def decomposition_decorator(f: Callable[_P, _T]) -> Callable[_P, _T]:
nonlocal registry
if registry is None:
registry = decomposition_table
assert isinstance(aten_op, torch._ops.OpOverload)
# Need unique name for jit function serialization
assert (
f.__name__ not in function_name_set
), f"Duplicated function name {f.__name__}"
function_name_set.add(f.__name__)
scripted_func = torch.jit.script(f)
torch._C._jit_pass_inline(scripted_func.graph)
for _ in range(2):
torch._C._jit_pass_peephole(scripted_func.graph)
torch._C._jit_pass_constant_propagation(scripted_func.graph)
registry[str(aten_op._schema)] = scripted_func
return f
return decomposition_decorator
# TODO: replace torch.sigmoid -> aten.sigmoid
@register_decomposition(aten.var.correction)
def var_decomposition(
input: Tensor,
dim: Optional[list[int]] = None,
correction: Optional[Number] = None,
keepdim: bool = False,
) -> Tensor:
if dim is None:
dim_i: list[int] = []
dim = dim_i
if isinstance(dim, (tuple, list)) and len(dim) == 0:
n = input.numel()
else:
n = 1
for dim_i in dim: # type: ignore[assignment]
n *= input.shape[dim_i] # type: ignore[call-overload]
mean = aten.mean(input, dim, True)
sub = input - mean
sq = sub * sub
sum = aten.sum(sq, dim, keepdim)
if correction is None:
denom = float(n - 1)
else:
if isinstance(correction, int):
denom = float(n - correction)
elif isinstance(correction, float):
denom = float(n) - correction
else:
raise RuntimeError("correction must be int or float")
return sum / max(0, denom)
@register_decomposition(aten.var.default)
def var(input: Tensor, unbiased: bool = True) -> Tensor:
return var_decomposition(input, correction=(1 if unbiased else 0))
```
|
============================================================================================================
SOURCE CODE FILE: _freeze.py
LINES: 1
SIZE: 9.51 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_freeze.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""Freezing.
This is not intended to be imported directly; please use the exposed
functionalities in `torch.jit`.
"""
from typing import Optional
import torch
from torch.jit._script import RecursiveScriptModule, ScriptModule
def freeze(
mod, preserved_attrs: Optional[list[str]] = None, optimize_numerics: bool = True
):
r"""Freeze ScriptModule, inline submodules, and attributes as constants.
Freezing a :class:`ScriptModule` will clone it and attempt to inline the cloned
module's submodules, parameters, and attributes as constants in the TorchScript IR Graph.
By default, `forward` will be preserved, as well as attributes & methods specified in
`preserved_attrs`. Additionally, any attribute that is modified within a preserved
method will be preserved.
Freezing currently only accepts ScriptModules that are in eval mode.
Freezing applies generic optimization that will speed up your model regardless of machine.
To further optimize using server-specific settings, run `optimize_for_inference` after
freezing.
Args:
mod (:class:`ScriptModule`): a module to be frozen
preserved_attrs (Optional[List[str]]): a list of attributes to preserve in addition to the forward method.
Attributes modified in preserved methods will also be preserved.
optimize_numerics (bool): If ``True``, a set of optimization passes will be run that does not strictly
preserve numerics. Full details of optimization can be found at `torch.jit.run_frozen_optimizations`.
Returns:
Frozen :class:`ScriptModule`.
Example (Freezing a simple module with a Parameter):
.. testcode::
import torch
class MyModule(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
self.linear = torch.nn.Linear(N, M)
def forward(self, input):
output = self.weight.mm(input)
output = self.linear(output)
return output
scripted_module = torch.jit.script(MyModule(2, 3).eval())
frozen_module = torch.jit.freeze(scripted_module)
# parameters have been removed and inlined into the Graph as constants
assert len(list(frozen_module.named_parameters())) == 0
# See the compiled graph as Python code
print(frozen_module.code)
Example (Freezing a module with preserved attributes)
.. testcode::
import torch
class MyModule2(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.modified_tensor = torch.tensor(10.)
self.version = 1
def forward(self, input):
self.modified_tensor += 1
return input + self.modified_tensor
scripted_module = torch.jit.script(MyModule2().eval())
frozen_module = torch.jit.freeze(scripted_module, preserved_attrs=["version"])
# we've manually preserved `version`, so it still exists on the frozen module and can be modified
assert frozen_module.version == 1
frozen_module.version = 2
# `modified_tensor` is detected as being mutated in the forward, so freezing preserves
# it to retain model semantics
assert frozen_module(torch.tensor(1)) == torch.tensor(12)
# now that we've run it once, the next result will be incremented by one
assert frozen_module(torch.tensor(1)) == torch.tensor(13)
Note:
Freezing submodule attributes is also supported:
frozen_module = torch.jit.freeze(scripted_module, preserved_attrs=["submodule.version"])
Note:
If you're not sure why an attribute is not being inlined as a constant, you can run
`dump_alias_db` on frozen_module.forward.graph to see if freezing has detected the
attribute is being modified.
Note:
Because freezing makes weights constants and removes module hierarchy, `to` and other
nn.Module methods to manipulate device or dtype no longer work. As a workaround,
You can remap devices by specifying `map_location` in `torch.jit.load`, however
device-specific logic may have been baked into the model.
"""
if not isinstance(mod, ScriptModule):
raise RuntimeError(
"Freezing expects a ScriptModule as input. "
"Please use torch.jit.script or torch.jit.trace to script your 'nn.Module'."
)
if mod.training:
raise RuntimeError(
"Freezing is currently only implemented for modules in eval mode. "
"Please call .eval() on your module before freezing."
)
preserved_attrs = preserved_attrs if preserved_attrs is not None else []
out = RecursiveScriptModule(torch._C._freeze_module(mod._c, preserved_attrs))
RecursiveScriptModule._finalize_scriptmodule(out)
preserved_methods = [x for x in preserved_attrs if mod._c._has_method(x)]
run_frozen_optimizations(out, optimize_numerics, preserved_methods)
return out
def run_frozen_optimizations(
mod, optimize_numerics: bool = True, preserved_methods: Optional[list[str]] = None
):
r"""
Run a series of optimizations looking for patterns that occur in frozen graphs.
The current set of optimizations includes:
- Dropout Removal
- Pretranspose Linear Layers
- Concat Linear Layers with same input Tensor
- Conv -> Batchnorm folding
- Conv -> Add/Sub folding
- Conv -> Mul/Div folding
Args:
mod (:class:`ScriptModule`): a frozen module to be optimized
optimize_numerics (bool): If ``True``, a set of optimization passes will be run that does not strictly
preserve numerics. These optimizations preserve default rtol and atol of `torch.testing.assert_close`
when applied on a single transformation, however in a module where many transformations are applied
the rtol or atol may no longer fall within the default `assert_close` tolerance. Conv -> Batchnorm folding,
Conv-Add/Sub, and Conv -> Mul/Div folding all may alter numerics.
Returns:
None
Note:
In rare occassions, this can result in slower execution.
Example (Freezing a module with Conv->Batchnorm)
.. code-block:: python
import torch
in_channels, out_channels = 3, 32
conv = torch.nn.Conv2d(
in_channels, out_channels, kernel_size=3, stride=2, bias=True
)
bn = torch.nn.BatchNorm2d(out_channels, eps=0.001)
mod = torch.nn.Sequential(conv, bn)
# set optimize to False here, by default freezing runs run_frozen_optimizations
frozen_mod = torch.jit.freeze(torch.jit.script(mod.eval()), optimize=False)
# inspect frozen mod
assert "batch_norm" in str(frozen_mod.graph)
torch.jit.run_frozen_optimizations(frozen_mod)
assert "batch_norm" not in str(frozen_mod.graph)
"""
if mod._c._has_method("forward"):
torch._C._jit_pass_optimize_frozen_graph(mod.graph, optimize_numerics)
if preserved_methods is None:
preserved_methods = []
for method in preserved_methods:
torch._C._jit_pass_optimize_frozen_graph(
mod.__getattr__(method).graph, optimize_numerics
)
def optimize_for_inference(
mod: ScriptModule, other_methods: Optional[list[str]] = None
) -> ScriptModule:
"""
Perform a set of optimization passes to optimize a model for the purposes of inference.
If the model is not already frozen, optimize_for_inference
will invoke `torch.jit.freeze` automatically.
In addition to generic optimizations that should speed up your model regardless
of environment, prepare for inference will also bake in build specific settings
such as the presence of CUDNN or MKLDNN, and may in the future make transformations
which speed things up on one machine but slow things down on another. Accordingly,
serialization is not implemented following invoking `optimize_for_inference` and
is not guaranteed.
This is still in prototype, and may have the potential to slow down your model.
Primary use cases that have been targeted so far have been vision models on cpu
and gpu to a lesser extent.
Example (optimizing a module with Conv->Batchnorm)::
import torch
in_channels, out_channels = 3, 32
conv = torch.nn.Conv2d(
in_channels, out_channels, kernel_size=3, stride=2, bias=True
)
bn = torch.nn.BatchNorm2d(out_channels, eps=0.001)
mod = torch.nn.Sequential(conv, bn)
frozen_mod = torch.jit.optimize_for_inference(torch.jit.script(mod.eval()))
assert "batch_norm" not in str(frozen_mod.graph)
# if built with MKLDNN, convolution will be run with MKLDNN weights
assert "MKLDNN" in frozen_mod.graph
"""
if not isinstance(mod, ScriptModule):
raise RuntimeError(
"optimize_for_inference expects a ScriptModule as input. "
"Please use torch.jit.script or torch.jit.trace to script your 'nn.Module'."
)
if other_methods is None:
other_methods = []
if hasattr(mod, "training"):
mod = freeze(mod.eval(), preserved_attrs=other_methods)
torch._C._jit_pass_optimize_for_inference(mod._c, other_methods)
return mod
```
|
===========================================================================================================
SOURCE CODE FILE: _fuser.py
LINES: 1
SIZE: 7.07 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_fuser.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import contextlib
import torch
@contextlib.contextmanager
def optimized_execution(should_optimize):
"""Context manager that controls whether the JIT's executor will run optimizations before executing a function."""
stored_flag = torch._C._get_graph_executor_optimize()
torch._C._set_graph_executor_optimize(should_optimize)
try:
yield
finally:
torch._C._set_graph_executor_optimize(stored_flag)
@contextlib.contextmanager
def fuser(name):
"""Context manager that facilitates switching between backend fusers.
Valid names:
* ``fuser0`` - enables only legacy fuser
* ``fuser1`` - enables only NNC
* ``fuser2`` - enables only nvFuser
* ``fuser3`` - enables oneDNN Graph
"""
old_cpu_fuse = torch._C._jit_can_fuse_on_cpu()
old_gpu_fuse = torch._C._jit_can_fuse_on_gpu()
old_texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
old_nvfuser_state = torch._C._jit_nvfuser_enabled()
old_llga_state = torch._C._jit_llga_enabled()
if name == "fuser0": # legacy fuser
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(False)
torch._C._jit_set_llga_enabled(False)
elif name == "fuser1": # NNC
old_profiling_executor = torch._C._jit_set_profiling_executor(True)
old_profiling_mode = torch._C._get_graph_executor_optimize(True)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
torch._C._jit_set_nvfuser_enabled(False)
torch._C._jit_set_llga_enabled(False)
elif name == "fuser2": # nvFuser
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(True)
torch._C._jit_set_llga_enabled(False)
elif name == "fuser3": # oneDNN Graph
old_profiling_executor = torch._C._jit_set_profiling_executor(True)
old_profiling_mode = torch._C._get_graph_executor_optimize(True)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(True)
torch._C._jit_set_nvfuser_enabled(False)
torch._C._jit_set_llga_enabled(True)
elif name == "none": # Turn Pytorch fuser off
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(False)
torch._C._jit_set_llga_enabled(False)
else:
raise Exception(f"unrecognized fuser option (name: {name})") # noqa: TRY002
try:
yield
finally:
if name in ["fuser1", "fuser3"]: # NNC or oneDNN Graph
torch._C._jit_set_profiling_executor(old_profiling_executor) # type: ignore[possibly-undefined]
torch._C._get_graph_executor_optimize(old_profiling_mode) # type: ignore[possibly-undefined]
# recover the previous values
torch._C._jit_override_can_fuse_on_cpu(old_cpu_fuse)
torch._C._jit_override_can_fuse_on_gpu(old_gpu_fuse)
torch._C._jit_set_texpr_fuser_enabled(old_texpr_fuser_state)
torch._C._jit_set_nvfuser_enabled(old_nvfuser_state)
torch._C._jit_set_llga_enabled(old_llga_state)
last_executed_optimized_graph = torch._C._last_executed_optimized_graph
def _get_differentiable_graph_node(node, diff_node):
if node.kind() == "prim::DifferentiableGraph":
diff_node.append(node)
else:
for block in node.blocks():
for n in block.nodes():
_get_differentiable_graph_node(n, diff_node)
def _graph_for(self, *args, **kwargs):
return _script_method_graph_for(self, self, *args, **kwargs)
def _script_method_graph_for(self, parent, *args, **kwargs):
try:
dbs = parent.get_debug_state()
eps = list(dbs.execution_plans.values())
assert len(eps) == 1
graph = eps[0].graph.copy()
# graph_executor_states for differentiable node
fw_states = eps[0].code.differentiable_op_executor_states()
diff_nodes: list[torch._C.Node] = []
for n in graph.nodes():
_get_differentiable_graph_node(n, diff_nodes)
assert len(fw_states) == len(diff_nodes)
# swap each differentiable graph with optimized graph in their execution plan
for n, state in zip(diff_nodes, fw_states):
fw_execution_plans = list(state.execution_plans.values())
# we can only update the subgraph when there's a unique execution
# plan. Avoid assert here so we would skip the ones that can't be
# updated while try the best effort to update other nodes.
if len(fw_execution_plans) == 1:
n.g_("Subgraph", fw_execution_plans[0].graph)
return graph
except Exception:
# fallback approach, we just ran the graph and return the recorded optimized
# graph
self(*args, **kwargs)
return last_executed_optimized_graph()
def set_fusion_strategy(strategy: list[tuple[str, int]]):
"""Set the type and number of specializations that can occur during fusion.
Usage: provide a list of pairs (type, depth) where type is one of "STATIC" or "DYNAMIC"
and depth is an integer.
Behavior - static vs dynamic:
In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined
based on some initial profiling runs.
In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple
shapes are possible.
In both cases, we also recompile on new striding behavior, device, or dtype.
Behavior - fallback functions & depth:
When an input doesn't match the format required by the specialized compiled op, it will run
a fallback function. Fallback functions are recursively be compiled and specialized based
on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to
limit the number of specializations that can be compiled, before giving up on recompiling and
falling back to a completely un-fused, un-specialized implementation.
The list of (type, depth) pairs controls the type of specializations and the number of
specializations. For example: [("STATIC", 2), ("DYNAMIC", 2)] indicates that the first
two specializations will use static fusions, the following two specializations will use
dynamic fusion, and any inputs that satisfy none of the 4 options will run an
unfused implementation.
NB: in the future, if more as more fusion backends are added there may be more granular
apis for specific fusers.
"""
return torch._C._jit_set_fusion_strategy(strategy)
```
|
==============================================================================================================
SOURCE CODE FILE: _ir_utils.py
LINES: 1
SIZE: 0.90 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_ir_utils.py
ENCODING: utf-8
```py
from types import TracebackType
from typing import Optional, Union
import torch
class _InsertPoint:
def __init__(
self,
insert_point_graph: torch._C.Graph,
insert_point: Union[torch._C.Node, torch._C.Block],
):
self.insert_point = insert_point
self.g = insert_point_graph
self.guard = None
def __enter__(self) -> None:
self.prev_insert_point = self.g.insertPoint()
self.g.setInsertPoint(self.insert_point)
def __exit__(
self,
exc_type: Optional[type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self.g.setInsertPoint(self.prev_insert_point)
def insert_point_guard(
self: torch._C.Graph, insert_point: Union[torch._C.Node, torch._C.Block]
) -> _InsertPoint:
return _InsertPoint(self, insert_point)
```
|
=============================================================================================================
SOURCE CODE FILE: _logging.py
LINES: 1
SIZE: 0.26 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_logging.py
ENCODING: utf-8
```py
import torch
add_stat_value = torch.ops.prim.AddStatValue
set_logger = torch._C._logging_set_logger
LockingLogger = torch._C.LockingLogger
AggregationType = torch._C.AggregationType
NoopLogger = torch._C.NoopLogger
time_point = torch.ops.prim.TimePoint
```
|
=======================================================================================================================
SOURCE CODE FILE: _monkeytype_config.py
LINES: 1
SIZE: 7.31 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_monkeytype_config.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import inspect
import sys
import typing
from collections import defaultdict
from collections.abc import Iterable
from pathlib import Path
from types import CodeType
from typing import Optional
import torch
_IS_MONKEYTYPE_INSTALLED = True
try:
import monkeytype # type: ignore[import]
from monkeytype import trace as monkeytype_trace
from monkeytype.config import _startswith, LIB_PATHS # type: ignore[import]
from monkeytype.db.base import ( # type: ignore[import]
CallTraceStore,
CallTraceStoreLogger,
CallTraceThunk,
)
from monkeytype.tracing import CallTrace, CodeFilter # type: ignore[import]
except ImportError:
_IS_MONKEYTYPE_INSTALLED = False
# Checks whether a class is defind in `torch.*` modules
def is_torch_native_class(cls):
if not hasattr(cls, "__module__"):
return False
parent_modules = cls.__module__.split(".")
if not parent_modules:
return False
root_module = sys.modules.get(parent_modules[0])
return root_module is torch
def get_type(type):
"""Convert the given type to a torchScript acceptable format."""
if isinstance(type, str):
return type
elif inspect.getmodule(type) == typing:
# If the type is a type imported from typing
# like Tuple, List, Dict then replace `typing.`
# with a null string. This needs to be done since
# typing.List is not accepted by TorchScript.
type_to_string = str(type)
return type_to_string.replace(type.__module__ + ".", "")
elif is_torch_native_class(type):
# If the type is a subtype of torch module, then TorchScript expects a fully qualified name
# for the type which is obtained by combining the module name and type name.
return type.__module__ + "." + type.__name__
else:
# For all other types use the name for the type.
return type.__name__
def get_optional_of_element_type(types):
"""Extract element type, return as `Optional[element type]` from consolidated types.
Helper function to extracts the type of the element to be annotated to Optional
from the list of consolidated types and returns `Optional[element type]`.
TODO: To remove this check once Union support lands.
"""
elem_type = types[1] if type(None) == types[0] else types[0]
elem_type = get_type(elem_type)
# Optional type is internally converted to Union[type, NoneType], which
# is not supported yet in TorchScript. Hence, representing the optional type as string.
return "Optional[" + elem_type + "]"
def get_qualified_name(func):
return func.__qualname__
if _IS_MONKEYTYPE_INSTALLED:
class JitTypeTraceStoreLogger(CallTraceStoreLogger):
"""A JitTypeCallTraceLogger that stores logged traces in a CallTraceStore."""
def __init__(self, store: CallTraceStore):
super().__init__(store)
def log(self, trace: CallTrace) -> None:
self.traces.append(trace)
class JitTypeTraceStore(CallTraceStore):
def __init__(self) -> None:
super().__init__()
# A dictionary keeping all collected CallTrace
# key is fully qualified name of called function
# value is list of all CallTrace
self.trace_records: dict[str, list] = defaultdict(list)
def add(self, traces: Iterable[CallTrace]):
for t in traces:
qualified_name = get_qualified_name(t.func)
self.trace_records[qualified_name].append(t)
def filter(
self,
qualified_name: str,
qualname_prefix: Optional[str] = None,
limit: int = 2000,
) -> list[CallTraceThunk]:
return self.trace_records[qualified_name]
def analyze(self, qualified_name: str) -> dict:
# Analyze the types for the given module
# and create a dictionary of all the types
# for arguments.
records = self.trace_records[qualified_name]
all_args = defaultdict(set)
for record in records:
for arg, arg_type in record.arg_types.items():
all_args[arg].add(arg_type)
return all_args
def consolidate_types(self, qualified_name: str) -> dict:
all_args = self.analyze(qualified_name)
# If there are more types for an argument,
# then consolidate the type to `Any` and replace the entry
# by type `Any`.
for arg, types in all_args.items():
types = list(types)
type_length = len(types)
if type_length == 2 and type(None) in types:
# TODO: To remove this check once Union suppport in TorchScript lands.
all_args[arg] = get_optional_of_element_type(types)
elif type_length > 1:
all_args[arg] = "Any"
elif type_length == 1:
all_args[arg] = get_type(types[0])
return all_args
def get_args_types(self, qualified_name: str) -> dict:
return self.consolidate_types(qualified_name)
class JitTypeTraceConfig(monkeytype.config.Config):
def __init__(self, s: JitTypeTraceStore):
super().__init__()
self.s = s
def trace_logger(self) -> JitTypeTraceStoreLogger:
"""Return a JitCallTraceStoreLogger that logs to the configured trace store."""
return JitTypeTraceStoreLogger(self.trace_store())
def trace_store(self) -> CallTraceStore:
return self.s
def code_filter(self) -> Optional[CodeFilter]:
return jit_code_filter
else:
# When MonkeyType is not installed, we provide dummy class definitions
# for the below classes.
class JitTypeTraceStoreLogger: # type: ignore[no-redef]
def __init__(self) -> None:
pass
class JitTypeTraceStore: # type: ignore[no-redef]
def __init__(self) -> None:
self.trace_records = None
class JitTypeTraceConfig: # type: ignore[no-redef]
def __init__(self) -> None:
pass
monkeytype_trace = None # type: ignore[assignment] # noqa: F811
def jit_code_filter(code: CodeType) -> bool:
"""Codefilter for Torchscript to trace forward calls.
The custom CodeFilter is required while scripting a FX Traced forward calls.
FX Traced forward calls have `code.co_filename` start with '<' which is used
to exclude tracing of stdlib and site-packages in the default code filter.
Since we need all forward calls to be traced, this custom code filter
checks for code.co_name to be 'forward' and enables tracing for all such calls.
The code filter is similar to default code filter for monkeytype and
excludes tracing of stdlib and site-packages.
"""
# Filter code without a source file and exclude this check for 'forward' calls.
if code.co_name != "forward" and (
not code.co_filename or code.co_filename[0] == "<"
):
return False
filename = Path(code.co_filename).resolve()
return not any(_startswith(filename, lib_path) for lib_path in LIB_PATHS)
```
|
=====================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_passes\__init__.py
ENCODING: utf-8
```py
```
|
==================================================================================================================================
SOURCE CODE FILE: _property_propagation.py
LINES: 1
SIZE: 1.45 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_passes\_property_propagation.py
ENCODING: utf-8
```py
"""
Tools to help with tensor property propagation.
This is not intended to be imported directly; please use the exposed
functionalities in `torch.jit`.
"""
from typing import Any
import torch
from torch import TensorType
from torch._C import Graph
def apply_input_props_using_example(graph: Graph, example_input: list[Any]) -> None:
"""
Applies properties for each tensor in the graph inputs
using the example supplied.
"""
graph_inputs = list(graph.inputs())
if len(graph_inputs) == 0:
return
# Strip self args off for methods
in_0 = graph_inputs[0]
if isinstance(in_0.type(), torch._C.ClassType) and in_0.debugName() == "self":
graph_inputs = graph_inputs[1:]
if not len(graph_inputs) == len(example_input):
raise RuntimeError(
"Number of inputs in graph does not match number of inputs in the example"
)
for i, (graph_i, example_i) in enumerate(zip(graph_inputs, example_input)):
if example_i is None:
continue # Skip the type check
if isinstance(example_i, torch.Tensor) != isinstance(
graph_i.type(), TensorType
):
raise RuntimeError(
f"Input {i} does not match type of example", graph_i, example_i
)
if isinstance(example_i, torch.Tensor):
graph_i.setType(TensorType.create_from_tensor(example_i)) # type: ignore[arg-type]
```
|
============================================================================================================
SOURCE CODE FILE: _pickle.py
LINES: 1
SIZE: 1.18 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_pickle.py
ENCODING: utf-8
```py
# These functions are referenced from the pickle archives produced by
# ScriptModule.save()
# These (`build_*`) functions used to be used by `pickler.cpp` to specify
# the type of the list for certain special types, but now all lists get
# a type attached and restored via `restore_type_tag` below. The legacy
# functions should stick around for backwards-compatibility.
from typing import Union
def build_intlist(data: list[int]) -> list[int]:
return data
def build_tensorlist(data: list[object]) -> list[object]:
return data
def build_doublelist(data: list[float]) -> list[float]:
return data
def build_boollist(data: list[bool]) -> list[bool]:
return data
def build_tensor_from_id(data: Union[int, object]) -> Union[int, None]:
if isinstance(data, int):
# just the id, can't really do anything
return data
return None
def restore_type_tag(value: object, type_str: str) -> object:
# The type_ptr is used by the jit unpickler to restore the full static type
# to container types like list when they are re-loaded, but this doesn't
# matter for Python, so just return the plain value
return value
```
|
===============================================================================================================
SOURCE CODE FILE: _recursive.py
LINES: 10
SIZE: 42.40 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_recursive.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import collections
import functools
import inspect
import sys
import textwrap
import types
import warnings
import torch
import torch._jit_internal as _jit_internal
from torch._sources import fake_range
from torch.jit._builtins import _find_builtin
from torch.jit._check import AttributeTypeIsSupportedChecker
from torch.jit._state import _add_script_class, _get_script_class, _python_cu
from torch.jit.frontend import (
get_class_properties,
get_default_args,
get_jit_class_def,
get_jit_def,
)
from torch.nn import Module
ScriptMethodStub = collections.namedtuple(
"ScriptMethodStub", ("resolution_callback", "def_", "original_method")
)
PropertyStub = collections.namedtuple("PropertyStub", ("resolution_callback", "def_"))
# TODO: there should be a more principled way of doing this.
ignored_attributes = [
"_version",
"_parameters",
"_buffers",
"_non_persistent_buffers_set",
"_backward_hooks",
"_backward_pre_hooks",
"_forward_hooks",
"_forward_hooks_with_kwargs",
"_forward_pre_hooks",
"_forward_pre_hooks_with_kwargs",
"_forward_hooks_always_called",
"_state_dict_hooks",
"_state_dict_pre_hooks",
"_load_state_dict_pre_hooks",
"_load_state_dict_post_hooks",
"_modules",
"_initializing",
"dump_patches",
]
def _compile_and_register_class(obj, rcb, qualified_name):
script_class = _get_script_class(obj)
if not script_class:
ast = get_jit_class_def(obj, obj.__name__)
defaults = torch.jit.frontend.get_default_args_for_class(obj)
script_class = torch._C._jit_script_class_compile(
qualified_name, ast, defaults, rcb
)
_add_script_class(obj, script_class)
return script_class
def make_stub(func, name):
rcb = _jit_internal.createResolutionCallbackFromClosure(func)
ast = get_jit_def(func, name, self_name="RecursiveScriptModule")
return ScriptMethodStub(rcb, ast, func)
def make_stub_from_method(nn_module, method_name):
func = getattr(nn_module, method_name)
if isinstance(func, ScriptMethodStub):
return func
# Make sure the name present in the resulting AST will match the name
# requested here. The only time they don't match is if you do something
# like:
# def _forward(self):
# pass
# forward = _forward
# In this case, the actual function object will have the name `_forward`,
# even though we requested a stub for `forward`.
return make_stub(func, method_name)
def make_stubs_from_exported_methods(mod):
stubs = []
for name in dir(mod):
item = getattr(mod, name, None)
if (
_jit_internal.get_torchscript_modifier(item)
is _jit_internal.FunctionModifiers.EXPORT
):
stubs.append(make_stub_from_method(mod, name))
return stubs
def jit_ignored_properties(module):
user_annotated_ignored_attributes = getattr(
module, "__jit_ignored_attributes__", []
)
def get_properties_names(module):
return {k for k, v in vars(module).items() if isinstance(v, property)}
properties = get_properties_names(type(module))
user_annoted_ignored_properties = set()
for ignored_attr in user_annotated_ignored_attributes:
if ignored_attr in properties:
user_annoted_ignored_properties.add(ignored_attr)
return user_annoted_ignored_properties
# base types that can be constants
# in addition, tuples and lists of these base types are also considered constants
# If you edit this list, then you also need to edit the handlers in
# ConstantValue in jit/script/init.cpp
_constant_types = (
bool,
float,
int,
str,
type(None),
torch.device,
torch.layout,
torch.dtype,
)
def _get_valid_constant(attr, v, owner_type):
if isinstance(v, _constant_types):
return v
elif isinstance(v, (tuple, list)):
return tuple(_get_valid_constant(attr, x, owner_type) for x in v)
constants = ", ".join(torch.typename(typ) for typ in _constant_types)
raise TypeError(
textwrap.dedent(
f"""
'{torch.typename(type(v))}' object in attribute '{owner_type}.{attr}' is not a valid constant.
Valid constants are:
1. a nn.ModuleList
2. a value of type {{{constants}}}
3. a list or tuple of (2)
"""
)
)
class SourceContext(torch._C._jit_tree_views.SourceRangeFactory):
def __init__(self, source, filename, file_lineno, leading_whitespace_len):
super().__init__(source, filename, file_lineno, leading_whitespace_len)
def get_annotations(obj):
if sys.version_info < (3, 10):
return getattr(obj, "__annotations__", {})
# In Python-3.10+ it is recommended to use inspect.get_annotations
# See https://docs.python.org/3.10/howto/annotations.html
# But also, in 3.10 annotations from base class are not inherited
# by unannotated derived one, so they must be manually extracted
annotations = inspect.get_annotations(obj)
if annotations:
return annotations
def get_cls_annotations(cls):
cls_annotations = inspect.get_annotations(cls)
if cls_annotations:
return cls_annotations
for base in cls.__bases__:
cls_annotations = get_cls_annotations(base)
if cls_annotations:
return cls_annotations
return {}
cls = obj if isinstance(obj, type) else type(obj)
return get_cls_annotations(cls)
def infer_concrete_type_builder(nn_module, share_types=True):
"""
Build a ConcreteModuleTypeBuilder from an nn.Module.
This ConcreteModuleType doesn't have a JIT type associated with it yet, it
must be filled in by the caller.
"""
concrete_type_builder = torch._C.ConcreteModuleTypeBuilder(type(nn_module))
if isinstance(nn_module, (torch.nn.ModuleDict)):
concrete_type_builder.set_module_dict()
if isinstance(nn_module, (torch.nn.ModuleList, torch.nn.Sequential)):
concrete_type_builder.set_module_list()
if isinstance(nn_module, (torch.nn.ParameterList)):
concrete_type_builder.set_parameter_list()
if isinstance(nn_module, (torch.nn.ParameterDict)):
concrete_type_builder.set_parameter_dict()
class_annotations = get_annotations(nn_module)
if isinstance(nn_module, (torch.ao.quantization.QuantWrapper)):
class_annotations = {}
# Get user-annotated ignored attributes.
user_annotated_ignored_attributes = getattr(
nn_module, "__jit_ignored_attributes__", []
)
concrete_type_builder.add_ignored_attributes(user_annotated_ignored_attributes)
ignored_properties = jit_ignored_properties(nn_module)
# try to infer the type from type annotation or from the object itself
def infer_type(name, item):
# The forward function from Module is special; never use this annotations; we
# need to infer type directly using JIT. I originally wanted to write
# this test as isinstance(class_annotations[name], Callable) but
# isinstance on typing things doesn't seem to work: isinstance(list, Callable)
# is also true!
inferred = False
try:
if (
name in class_annotations
and class_annotations[name]
!= torch.nn.Module.__annotations__["forward"]
):
ann_to_type = torch.jit.annotations.ann_to_type(
class_annotations[name], fake_range()
)
attr_type = torch._C.InferredType(ann_to_type)
elif isinstance(item, torch.jit.Attribute):
ann_to_type = torch.jit.annotations.ann_to_type(item.type, fake_range())
attr_type = torch._C.InferredType(ann_to_type)
else:
attr_type = torch._C._jit_try_infer_type(item)
inferred = True
except RuntimeError as re:
raise RuntimeError(f"Error inferring type for {name}: {item}: {re}") from re
return attr_type, inferred
added_names = set()
for name, item in nn_module._parameters.items():
if name in user_annotated_ignored_attributes:
continue
assert item is None or isinstance(item, torch.Tensor)
attr_type, _ = infer_type(name, item)
# We currently have the invariant in various places in our code
# that parameters must be Tensors. However, the nn.Module API also
# allows NoneType parameters. These parameters are not returned as
# part of `parameters()` and its variants, but are available
# through direct attribute access.
concrete_type_builder.add_attribute(name, attr_type.type(), True, False)
added_names.add(name)
for name, item in nn_module._buffers.items():
if name in user_annotated_ignored_attributes:
continue
assert item is None or isinstance(item, torch.Tensor)
attr_type, _ = infer_type(name, item)
concrete_type_builder.add_attribute(name, attr_type.type(), False, True)
added_names.add(name)
for name, item in nn_module._modules.items():
if name in user_annotated_ignored_attributes:
continue
attr_type, _ = infer_type(name, item)
if item is None:
# Modules can be None. We don't have direct support for optional
# Modules, so the register it as an NoneType attribute instead.
concrete_type_builder.add_attribute(name, attr_type.type(), False, False)
continue
if attr_type.success():
assert attr_type.type().is_interface_type()
# if the type can be inferred, it should be a module interface type
sub_concrete_type = torch._C.ConcreteModuleType.from_jit_type(
attr_type.type()
)
else:
# otherwise we get the concrete module type for item and add it to concrete_type
sub_concrete_type = get_module_concrete_type(item, share_types)
concrete_type_builder.add_module(name, sub_concrete_type)
added_names.add(name)
# populate constants_set
constants_set = set(getattr(nn_module, "__constants__", ()))
# Constants annotated via `Final[T]` rather than being added to `__constants__`
for name, ann in class_annotations.items():
if torch._jit_internal.is_final(ann):
constants_set.add(name)
for name in constants_set:
if name in added_names:
# TODO: We should really error in this case, but its bc-breaking so
# we need to warn for at least one release
if name in nn_module._modules:
hint = "submodule"
elif name in nn_module._buffers:
hint = "buffer"
elif name in nn_module._parameters:
hint = "parameter"
else:
raise AssertionError(
"added_names must be submodule, parameter, or buffer"
)
warnings.warn(
f"'{name}' was found in ScriptModule constants, "
f" but it is a non-constant {hint}. Consider removing it."
)
continue
if not hasattr(nn_module, name):
# TODO: We should really error in this case, but its bc-breaking so
# we need to warn for at least one release
warnings.warn(
f"'{name}' was found in ScriptModule constants, "
"but was not actually set in __init__. "
"Consider removing it."
)
continue
value = getattr(nn_module, name)
concrete_type_builder.add_constant(
name, _get_valid_constant(name, value, type(nn_module).__name__)
)
added_names.add(name)
# populate overloads
overloads = getattr(nn_module, "__overloads__", {})
# update with any annotated overloads
overloads.update(
get_overload_name_mapping(
get_overload_annotations(nn_module, ignored_properties)
)
)
for name, overloaded_names in overloads.items():
concrete_type_builder.add_overload(name, overloaded_names)
for name, value in nn_module.__dict__.items():
if name in ignored_attributes or name.startswith("__"):
# Python objects have lots of random attributes attached to them;
# PyTorch adds a few more. Prevent these from getting compiled.
continue
if name in user_annotated_ignored_attributes:
continue
if name in added_names:
# Don't re-add anything we already added
continue
isoverloadpacket = isinstance(value, torch._ops.OpOverloadPacket)
if isoverloadpacket:
value = value.op
# Handle Python function attributes
if inspect.isfunction(value):
try:
scripted_fn = torch.jit.script(value)
concrete_type_builder.add_function_attribute(
name, torch._C._jit_try_infer_type(scripted_fn).type(), value
)
except Exception as e:
# If we fail to script the function, it isn't a hard error.
# Instead, we will add it to the list of attributes we failed
# to convert, with the compilation error.
hint = (
"(This function exists as an attribute on the Python module, "
"but we failed to compile it to a TorchScript function. "
f"\nThe error stack is reproduced here:\n{e}"
)
concrete_type_builder.add_failed_attribute(name, hint)
continue
# Handle calls to builtin functions (either bespoke builtins from torch.jit._builtins or
# a call to an aten function like torch.add)
builtin_symbol_name = _find_builtin(value)
if builtin_symbol_name:
concrete_type_builder.add_builtin_function(name, builtin_symbol_name)
continue
# Handle Script function attributes
if isinstance(value, torch.jit.ScriptFunction):
concrete_type_builder.add_function_attribute(
name, torch._C._jit_try_infer_type(value).type(), value
)
continue
# If we got here, this is a regular "data" attribute, add it to the concrete type
attr_type, inferred = infer_type(name, value)
if attr_type.success():
concrete_type_builder.add_attribute(name, attr_type.type(), False, False)
else:
# TODO: could add more detail here. For example, what the user should do
# when the pytype is `list` or `NoneType`
inferred_msg = (
"Its type was inferred; try adding a type annotation for the attribute."
if inferred
else ""
)
additional_info = f"{attr_type.reason()}. {inferred_msg}"
hint = (
"(This attribute exists on the Python module, "
f"but we failed to convert Python type: '{torch.typename(type(value))}' "
f"to a TorchScript type. {additional_info})"
)
concrete_type_builder.add_failed_attribute(name, hint)
# add hooks to concrete type
for hook in nn_module._forward_hooks.values():
concrete_type_builder.add_forward_hook(hook)
for pre_hook in nn_module._forward_pre_hooks.values():
concrete_type_builder.add_forward_pre_hook(pre_hook)
return concrete_type_builder
class ConcreteTypeStore:
type_store: dict[type[Module], list[torch._C.ConcreteModuleType]]
methods_compiled: set[torch._C.ConcreteModuleType]
def __init__(self) -> None:
# Python module type => List[ConcreteModuleType)]
self.type_store = {}
# ConcreteTypes that have had their methods already compiled
self.methods_compiled = set()
def get_or_create_concrete_type(self, nn_module):
"""Infer a ConcreteType from this `nn.Module` instance. Underlying JIT types are re-used if possible."""
concrete_type_builder = infer_concrete_type_builder(nn_module)
nn_module_type = type(nn_module)
if nn_module_type not in self.type_store:
self.type_store[nn_module_type] = []
# Search the type store for an already-available JIT type
known_types = self.type_store[nn_module_type]
for known_type in known_types:
if known_type.equals(concrete_type_builder):
return known_type
# We didn't find anything; generate a new JIT type from this concrete type
concrete_type = concrete_type_builder.build()
self.type_store[nn_module_type].append(concrete_type)
return concrete_type
concrete_type_store = ConcreteTypeStore()
def create_methods_and_properties_from_stubs(
concrete_type, method_stubs, property_stubs
):
method_defs = [m.def_ for m in method_stubs]
method_rcbs = [m.resolution_callback for m in method_stubs]
method_defaults = [get_default_args(m.original_method) for m in method_stubs]
property_defs = [p.def_ for p in property_stubs]
property_rcbs = [p.resolution_callback for p in property_stubs]
concrete_type._create_methods_and_properties(
property_defs, property_rcbs, method_defs, method_rcbs, method_defaults
)
def create_hooks_from_stubs(concrete_type, hook_stubs, pre_hook_stubs):
hook_defs = [h.def_ for h in hook_stubs]
hook_rcbs = [h.resolution_callback for h in hook_stubs]
pre_hook_defs = [h.def_ for h in pre_hook_stubs]
pre_hook_rcbs = [h.resolution_callback for h in pre_hook_stubs]
concrete_type._create_hooks(hook_defs, hook_rcbs, pre_hook_defs, pre_hook_rcbs)
def get_module_concrete_type(nn_module, share_types=True):
"""
Get a concrete type for nn_modules.
If share_types is True, the concrete type is fetched from concrete_type_store.
If it is False, a new concrete type is created without first searching concrete_type_store.
Args:
nn_module: The original Python nn.Module that we are creating a ScriptModule for.
share_types = Whether to share underlying JIT types between modules (if possible).
Returns:
A concrete type for nn_module.
"""
assert isinstance(nn_module, Module)
if isinstance(nn_module, torch.jit.ScriptModule) and hasattr(
nn_module, "_concrete_type"
):
return nn_module._concrete_type
if share_types:
# Look into the store of cached JIT types
concrete_type = concrete_type_store.get_or_create_concrete_type(nn_module)
else:
# Get a concrete type directly, without trying to re-use an existing JIT
# type from the type store.
concrete_type_builder = infer_concrete_type_builder(nn_module, share_types)
concrete_type_builder.set_poisoned()
concrete_type = concrete_type_builder.build()
return concrete_type
def create_script_class(obj):
"""
Create and return a RecursiveScriptClass instance from a Python object.
Arguments:
obj: A Python object.
"""
qualified_class_name = _jit_internal._qualified_name(type(obj))
rcb = _jit_internal.createResolutionCallbackForClassMethods(type(obj))
# Script the type of obj if it hasn't already been scripted.
_compile_and_register_class(type(obj), rcb, qualified_class_name)
class_ty = _python_cu.get_class(qualified_class_name)
# Create an empty torch._C.ScriptObject with the scripted type.
cpp_object = torch._C._create_object_with_type(class_ty)
# Copy all of the attributes over to the torch._C.ScriptObject.
for name, value in obj.__dict__.items():
cpp_object.setattr(name, value)
# Wrap the torch._C.ScriptObject in a RecursiveScriptClass instance.
return wrap_cpp_class(cpp_object)
def create_script_module(nn_module, stubs_fn, share_types=True, is_tracing=False):
"""
Create a new ScriptModule from an nn.Module.
Args:
nn_module: The original Python nn.Module that we are creating a ScriptModule for.
stubs_fn: Lambda that takes an nn.Module and generates a list of ScriptMethodStubs to compile.
share_types: Whether to share underlying JIT types between modules (if possible).
NOTE: Only set to False this when we cannot guarantee type sharing will work
correctly. This only happens today for traced modules, where the same
module can produce different traced methods depending on the inputs.
is_tracing: Whether this function is called during tracing or scripting. If tracing,
we don't need to do AttributeTypeIsSupportedChecker because all the unsupported
attributes will be baked as constant in the tracing graph. In addition,
this check significantly slows down the traced modules when the module size is big.
"""
assert not isinstance(nn_module, torch.jit.RecursiveScriptModule)
check_module_initialized(nn_module)
concrete_type = get_module_concrete_type(nn_module, share_types)
if not is_tracing:
AttributeTypeIsSupportedChecker().check(nn_module)
return create_script_module_impl(nn_module, concrete_type, stubs_fn)
def create_script_module_impl(nn_module, concrete_type, stubs_fn):
"""
Convert an nn.Module to a RecursiveScriptModule.
Args:
nn_module: The original Python nn.Module that we are creating a ScriptModule for.
concrete_type: The fully initialized ConcreteType of the module.
stubs_fn: Lambda that takes an nn.Module and generates a list of ScriptMethodStubs to compile.
"""
cpp_module = torch._C._create_module_with_type(concrete_type.jit_type)
method_stubs = stubs_fn(nn_module)
property_stubs = get_property_stubs(nn_module)
hook_stubs, pre_hook_stubs = get_hook_stubs(nn_module)
ignored_properties = jit_ignored_properties(nn_module)
def init_fn(script_module):
# Initialize the ScriptModule:
# 1. Copy the attributes/parameters/buffers from the original `nn_module` to the new ScriptModule.
for name in concrete_type.get_attributes().keys():
orig_value = getattr(nn_module, name)
orig_value = (
orig_value.value
if isinstance(orig_value, torch.jit.Attribute)
else orig_value
)
cpp_module.setattr(name, orig_value)
# 2. Copy the submodules from the original `nn_module` to the new ScriptModule,
# recursively scripting them.
for name, sub_concrete_type in concrete_type.get_modules():
orig_value = getattr(nn_module, name)
assert isinstance(
orig_value, Module
), f"Expected Module but got {type(orig_value)}"
module_type = sub_concrete_type.jit_type
if isinstance(module_type, torch._C.InterfaceType):
# use the interface inference rule to compile the module
scripted = interface_script(module_type, orig_value)
elif isinstance(orig_value, torch.jit.ScriptModule):
scripted = orig_value
else:
# always reuse the provided stubs_fn to infer the methods to compile
scripted = create_script_module_impl(
orig_value, sub_concrete_type, stubs_fn
)
cpp_module.setattr(name, scripted)
script_module._modules[name] = scripted
# 3. Copy @ignored/@unused methods and attrs from the original `nn_module` to the new ScriptModule.
# This ensures we can access these Python methods on the ScriptModule.
for name in dir(nn_module):
if name in ignored_properties:
continue
item = getattr(nn_module, name, None)
if inspect.ismethod(item) and _jit_internal.is_ignored_fn(item):
unbound_function = getattr(nn_module, name).__func__
bound_method = unbound_function.__get__(script_module)
setattr(script_module, name, bound_method)
elif concrete_type.is_ignored_attribute(name):
setattr(script_module, name, item)
# For convenience, attach the concrete type to the new ScriptModule
script_module._concrete_type = concrete_type
# Actually create the ScriptModule, initializing it with the function we just defined
script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
# Compile methods if necessary
if concrete_type not in concrete_type_store.methods_compiled:
create_methods_and_properties_from_stubs(
concrete_type, method_stubs, property_stubs
)
# Create hooks after methods to ensure no name collisions between hooks and methods.
# If done before, hooks can overshadow methods that aren't exported.
create_hooks_from_stubs(concrete_type, hook_stubs, pre_hook_stubs)
torch._C._run_emit_module_hook(cpp_module)
concrete_type_store.methods_compiled.add(concrete_type)
# Copy the forward hooks and pre-hooks to the new ScriptModule
# to allow the hooks to be run from eager as ScriptFunctions
for idx, fn in enumerate(script_module._c._get_forward_pre_hooks()):
script_module._forward_pre_hooks[idx] = fn
for idx, fn in enumerate(script_module._c._get_forward_hooks()):
script_module._forward_hooks[idx] = fn
# Special handling so methods like __len__ work in script methods on classes derived from containers
if (
isinstance(
nn_module, (torch.nn.ModuleList, torch.nn.Sequential, torch.nn.ModuleDict)
)
and "__len__" not in cpp_module._method_names()
):
script_module.define(f"def __len__(self):\n return {len(nn_module)}\n")
if (
isinstance(nn_module, torch.nn.ModuleDict)
and "__contains__" not in cpp_module._method_names()
):
if len(nn_module.keys()):
keys = repr(list(nn_module.keys()))
script_module.define(
f"def __contains__(self, key: str):\n return key in {keys}\n"
)
else:
script_module.define("def __contains__(self, key: str):\n return False\n")
# Make the compiled methods available to the Python ScriptModule class.
for method_stub in method_stubs:
if method_stub.original_method is None:
# define()'d methods don't have an Python original_method, so we
# don't need to do any Python re-wrapping stuff
continue
name = method_stub.original_method.__name__
if name != method_stub.def_.name().name:
# TODO: Why skip this? Because @torch.jit._overload_method will
# mangle the name of the function.
continue
script_method = cpp_module._get_method(name)
# Wrap the original to propagate docstrings and such.
# TODO: we don't currently do this functions that are recursively
# compiled, we should.
wrapped_script_method = functools.wraps(method_stub.original_method)(
script_method
)
# Add the methods to the script_module directly. This ensures they will
# be found first when `name` is looked up (as opposed to the stubs or
# nn.Module.forward)
script_module.__dict__[name] = wrapped_script_method
# Make module properties available on the Python ScriptModule class.
for property_stub in property_stubs:
property_name = property_stub.def_.name().name
fget = cpp_module._get_method(property_stub.def_.getter_name().name)
# Setter is optional, so it may not exist.
setter_name = property_stub.def_.setter_name()
fset = cpp_module._get_method(setter_name.name) if setter_name else None
script_module.__dict__[property_name] = property(property_name, fget, fset) # type: ignore[arg-type]
# copy over python methods to script module if they aren't defined on the script module
# this is currently an internal api used only on module containers
for name in dir(nn_module):
if name in ignored_properties:
continue
item = getattr(nn_module, name, None)
if (
_jit_internal.get_torchscript_modifier(item)
is _jit_internal.FunctionModifiers.COPY_TO_SCRIPT_WRAPPER
):
add_python_attr_to_scripted_model(script_module, nn_module, name)
return script_module
# We define shims of certain attributes on the RecursiveScriptModule to support
# magic methods. To check if a script model defines an attribute we need
# to also check that the attribute is not the shim
def script_model_defines_attr(script_model, attr):
script_attr = getattr(script_model, attr, None)
if script_attr is None:
return False
default_attr = getattr(torch.jit.RecursiveScriptModule, attr, None)
if default_attr is None:
return False
return script_attr != default_attr
def add_python_attr_to_scripted_model(script_model, orig, attr):
if hasattr(orig, attr) and script_model_defines_attr(script_model, attr):
setattr(script_model, attr, getattr(orig, attr))
def get_overload_annotations(mod, jit_ignored_properties):
# original function => [(mangled overload name, overload function)]
overloads = {}
for name in dir(type(mod)):
if name in jit_ignored_properties:
continue
item = getattr(mod, name, None)
if not callable(item):
continue
# builtin functions like repr() in python 2 do not have __module__ defined
if hasattr(item, "__module__") and item.__module__ is not None:
method_overloads = _jit_internal._get_overloaded_methods(
item, mod.__class__
)
if method_overloads is None:
continue
if item.__func__ in method_overloads:
raise RuntimeError(
_jit_internal.get_overload_no_implementation_error_message(
"method", item.__func__
)
)
names = [name + "__" + str(i) for i in range(len(method_overloads))]
overloads[item] = list(zip(names, method_overloads))
return overloads
def get_overload_name_mapping(overload_info):
# Same format as __overloads__
# original function => [overload names]
overload_name_mappings: dict[str, list[str]] = {}
for orig_fn, overloads in overload_info.items():
original_name = orig_fn.__name__
if original_name not in overload_name_mappings:
overload_name_mappings[original_name] = []
for overload_name, _ in overloads:
overload_name_mappings[original_name].append(overload_name)
return overload_name_mappings
def _check_no_signature(func):
signature = torch.jit.annotations.get_signature(
func, None, fake_range(), inspect.ismethod(func)
)
if signature is None:
qual_name = _jit_internal._qualified_name(func)
raise RuntimeError(
f"Must explicitly add type annotations to overloaded functions: {qual_name}"
)
def make_stubs_for_overloads(overload_info):
overload_stubs = []
for orig_fn, overloads in overload_info.items():
orig_ast = get_jit_def(
orig_fn, orig_fn.__name__, self_name="RecursiveScriptModule"
)
for overload_name, overload_fn in overloads:
_check_no_signature(overload_fn)
over_ast = get_jit_def(
overload_fn, overload_fn.__name__, self_name="RecursiveScriptModule"
)
new_ast = torch._C._replace_overloaded_method_decl(
over_ast.decl(), orig_ast, overload_name
)
_rcb = _jit_internal.createResolutionCallbackFromClosure(orig_fn)
overload_stubs.append(ScriptMethodStub(_rcb, new_ast, overload_fn))
return overload_stubs
def check_module_initialized(mod):
assert isinstance(mod, torch.nn.Module)
if not hasattr(mod, "_parameters"):
raise RuntimeError(
f"'{torch.typename(type(mod))}' has not been initialized, did you forget to call 'super()'?"
)
# This is to avoid importing torch.distributed.nn
if not hasattr(mod, "remote_parameters"):
for name, param in mod._parameters.items():
if param is not None and torch.nn.parameter.is_lazy(param):
raise RuntimeError(
f"'{torch.typename(type(mod))}' has uninitialized parameters {name}. Did you forget to run a forward pass?"
)
for name, buf in mod._buffers.items():
if buf is not None and torch.nn.parameter.is_lazy(buf):
raise RuntimeError(
f"'{torch.typename(type(mod))}' has uninitialized buffers {name}. Did you forget to run a forward pass?"
)
def infer_methods_to_compile(nn_module):
"""Implement the default rules for which methods should act as starting points for compilation.
(TODO add a link when the rules are published).
"""
check_module_initialized(nn_module)
ignored_properties = jit_ignored_properties(nn_module)
methods: list[str] = []
if hasattr(nn_module, "forward") and not _jit_internal.is_ignored_fn(
nn_module.forward
):
forward_func = getattr(nn_module.forward, "__func__", None)
module_forward = getattr(torch.nn.Module, "forward", None)
if forward_func != module_forward:
methods = ["forward"]
exported = []
for name in dir(nn_module):
if name in ignored_properties:
continue
item = getattr(nn_module, name, None)
if (
_jit_internal.get_torchscript_modifier(item)
is _jit_internal.FunctionModifiers.EXPORT
):
exported.append(name)
methods = methods + exported
overload_name_mappings = dict(getattr(nn_module, "__overloads__", {}))
overload_info = get_overload_annotations(nn_module, ignored_properties)
overload_name_mappings.update(get_overload_name_mapping(overload_info))
overload_stubs = make_stubs_for_overloads(overload_info)
nn_module.__overloads__ = overload_name_mappings
# we shouldn't directly compile overloaded methods, just its overloads
def ignore_overloaded(method_name):
return method_name not in overload_name_mappings
filtered_methods = filter(ignore_overloaded, methods)
# Unique the methods. We don't want to use a set to store the methods because it
# introduces non-determinism to compile order.
uniquer: set[str] = set()
uniqued_methods = []
for name in filtered_methods:
if name in uniquer:
continue
uniqued_methods.append(name)
uniquer.add(name)
stubs = [make_stub_from_method(nn_module, method) for method in uniqued_methods]
return overload_stubs + stubs
def get_hook_stubs(nn_module):
"""Return forward hook and pre_hook ScriptModuleStubs."""
check_module_initialized(nn_module)
hook_map: dict = {}
hook_stubs = []
for hook in nn_module._forward_hooks.values():
if hook.__name__ in hook_map:
if id(hook) != id(hook_map[hook.__name__]):
raise RuntimeError(
f"Hook '{hook.__name__}' on {type(nn_module).__name__} "
"has at least two different python definitions."
" Please use unique names for all hooks."
)
else:
hook_map[hook.__name__] = hook
hook_stubs.append(make_stub(hook, hook.__name__))
pre_hook_stubs = []
for pre_hook in nn_module._forward_pre_hooks.values():
if pre_hook.__name__ in hook_map:
if id(pre_hook) != id(hook_map[pre_hook.__name__]):
raise RuntimeError(
f"Pre-hook '{pre_hook.__name__}' on {type(nn_module).__name__} "
"has at least two different python definitions."
" Please use unique names for all hooks."
)
else:
hook_map[pre_hook.__name__] = pre_hook
pre_hook_stubs.append(make_stub(pre_hook, pre_hook.__name__))
return hook_stubs, pre_hook_stubs
def get_property_stubs(nn_module):
"""Create property stubs for the properties of the module by creating method stubs for the getter and setter."""
module_ty = type(nn_module)
properties_asts = get_class_properties(module_ty, self_name="RecursiveScriptModule")
rcbs = {}
for name in dir(module_ty):
item = getattr(module_ty, name, None)
if isinstance(item, property):
if not item.fget:
raise RuntimeError(
f"Property {name} of {nn_module.__name__} must have a getter"
)
rcbs[name] = _jit_internal.createResolutionCallbackFromClosure(item.fget)
stubs = [PropertyStub(rcbs[ast.name().name], ast) for ast in properties_asts]
return stubs
def interface_script(mod_interface, nn_module):
"""
Make a ScriptModule from an nn.Module, using the interface methods rule for determining which methods to compile.
Args:
mod_interface: the interface type that the module have
nn_module: The original Python nn.Module that we are creating a ScriptModule for.
"""
if isinstance(nn_module, torch.jit.ScriptModule):
return nn_module
check_module_initialized(nn_module)
def infer_interface_methods_to_compile(nn_module):
"""Rule to infer the methods from the interface type.
It is used to know which methods need to act as starting points for compilation.
"""
stubs = [
make_stub_from_method(nn_module, method)
for method in mod_interface.getMethodNames()
]
return stubs
return create_script_module(nn_module, infer_interface_methods_to_compile)
def try_compile_fn(fn, loc):
if _jit_internal.is_ignored_fn(fn):
# Don't do anything for @ignore'd functions
return None
if isinstance(fn, torch.nn.Module):
# Since modules are callable pybind recognizes them as functions, but
# don't do anything for them
return None
if not inspect.isfunction(fn) and not inspect.ismethod(fn):
raise RuntimeError(
f"`{fn}` is not a function. Recursive scripting only supports "
"Python functions or methods currently.\n"
f"Consider manually annotating `{fn}` with @torch.jit.script."
)
# The object returned by __prepare_scriptable__ might have a different closure.
# Resolve it here to get the right resolution callback.
fn = fn.__prepare_scriptable__() if hasattr(fn, "__prepare_scriptable__") else fn # type: ignore[operator]
# We don't have the actual scope where the function was defined, but we can
# extract the necessary info from the closed over variables on the function
# object
rcb = _jit_internal.createResolutionCallbackFromClosure(fn)
return torch.jit.script(fn, _rcb=rcb)
def wrap_cpp_class(cpp_class):
"""Wrap this torch._C.Object in a Python RecursiveScriptClass."""
return torch.jit.RecursiveScriptClass(cpp_class)
def wrap_cpp_module(cpp_module):
"""Wrap this torch._C.ScriptModule in a Python ScriptModule, recursively for all submodules."""
def init_fn(script_module):
for name, cpp_module in torch._C.ModuleDict(script_module._c).items():
setattr(script_module, name, wrap_cpp_module(cpp_module))
script_module._concrete_type = torch._C.ConcreteModuleType.from_jit_type(
script_module._c._type()
)
for idx, fn in enumerate(script_module._c._get_forward_pre_hooks()):
script_module._forward_pre_hooks[idx] = fn
for idx, fn in enumerate(script_module._c._get_forward_hooks()):
script_module._forward_hooks[idx] = fn
return torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
def compile_unbound_method(concrete_type, fn):
if _jit_internal.is_ignored_fn(fn):
return None
stub = make_stub(fn, fn.__name__)
with torch._jit_internal._disable_emit_hooks():
# We don't want to call the hooks here since the graph that is calling
# this function is not yet complete
create_methods_and_properties_from_stubs(concrete_type, (stub,), ())
return stub
def lazy_bind(concrete_type, unbound_method):
"""
Return a function that lazily binds `unbound_method` to a provided Module IValue, then invokes the method.
We do this so that any Python shenanigans that
will poison type sharing are impossible at compile time.
"""
def lazy_binding_method(cpp_module, *args):
def init_fn(script_module):
orig_class = concrete_type.py_class
# Copy @ignored/@unused methods from the original module to the new one.
# This ensures they are available during execution.
for name in dir(orig_class):
item = getattr(orig_class, name, None)
if _jit_internal.is_ignored_fn(item):
setattr(script_module, name, item)
# Copy constants over so they are available during execution.
for name, value in concrete_type.get_constants().items():
setattr(script_module, name, value)
script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
method = types.MethodType(unbound_method, script_module)
return method(*args)
# make the lazy binding method "look like" the original method
lazy_binding_method.original_fn = unbound_method # type: ignore[attr-defined]
lazy_binding_method.__name__ = unbound_method.__name__
torch._jit_internal.copy_torchscript_modifier(unbound_method, lazy_binding_method)
return lazy_binding_method
```
|
============================================================================================================
SOURCE CODE FILE: _script.py
LINES: 4
SIZE: 65.46 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_script.py
ENCODING: utf-8
```py
"""TorchScript.
This module contains functionality to support the JIT's scripting frontend, notably:
- torch.jit.script
This is not intended to be imported directly; please use the exposed
functionalities in `torch.jit`.
"""
import collections
import copy
import enum
import functools
import inspect
import pickle
import warnings
from typing import Any, Callable, Union
import torch
import torch._jit_internal as _jit_internal
from torch._classes import classes
from torch._jit_internal import _get_model_id, _qualified_name
from torch._utils_internal import log_torchscript_usage
from torch.jit._builtins import _register_builtin
from torch.jit._fuser import _graph_for, _script_method_graph_for
from torch.jit._monkeytype_config import (
JitTypeTraceConfig,
JitTypeTraceStore,
monkeytype_trace,
)
from torch.jit._recursive import (
_compile_and_register_class,
infer_methods_to_compile,
ScriptMethodStub,
wrap_cpp_module,
)
from torch.jit._state import (
_enabled,
_set_jit_function_cache,
_set_jit_overload_cache,
_try_get_jit_cached_function,
_try_get_jit_cached_overloads,
)
from torch.jit.frontend import get_default_args, get_jit_class_def, get_jit_def
from torch.nn import Module
from torch.overrides import (
has_torch_function,
has_torch_function_unary,
has_torch_function_variadic,
)
from torch.package import PackageExporter, PackageImporter
from torch.utils import set_module
from ._serialization import validate_map_location
type_trace_db = JitTypeTraceStore() # DB to hold all call traces from MonkeyType
torch._C.ScriptMethod.graph_for = _script_method_graph_for # type: ignore[attr-defined]
torch._C.ScriptFunction.graph_for = _graph_for # type: ignore[attr-defined]
ScriptFunction = torch._C.ScriptFunction
ScriptFunction.__doc__ = """
Functionally equivalent to a :class:`ScriptModule`, but represents a single
function and does not have any attributes or Parameters.
"""
ScriptFunction.__name__ = "ScriptFunction"
ScriptFunction.__qualname__ = "torch.jit.ScriptFunction"
set_module(ScriptFunction, "torch.jit")
# Throws an error if a jit function is pickled.
# Helps to avoid Python crashes for Python versions 3.9.5 + when protocol 0 or 1 is given as an argument.
def _reduce(cls):
raise pickle.PickleError("ScriptFunction cannot be pickled")
ScriptFunction.__reduce__ = _reduce # type: ignore[assignment]
if _enabled:
Attribute = collections.namedtuple("Attribute", ["value", "type"])
else:
def Attribute(value, type): # type: ignore[no-redef]
return value
Attribute.__doc__ = """
This method is a pass-through function that returns `value`, mostly
used to indicate to the TorchScript compiler that the left-hand side
expression is a class instance attribute with type of `type`. Note that
`torch.jit.Attribute` should only be used in `__init__` method of `jit.ScriptModule`
subclasses.
Though TorchScript can infer correct type for most Python expressions, there are some cases where
type inference can be wrong, including:
- Empty containers like `[]` and `{}`, which TorchScript assumes to be container of `Tensor`
- Optional types like `Optional[T]` but assigned a valid value of type `T`, TorchScript would assume
it is type `T` rather than `Optional[T]`
In eager mode, it is simply a pass-through function that returns `value`
without other implications.
Example:
.. testcode::
import torch
from typing import Dict
class AttributeModule(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.foo = torch.jit.Attribute(0.1, float)
# we should be able to use self.foo as a float here
assert 0.0 < self.foo
self.names_ages = torch.jit.Attribute({}, Dict[str, int])
self.names_ages["someone"] = 20
assert isinstance(self.names_ages["someone"], int)
m = AttributeModule()
# m will contain two attributes
# 1. foo of type float
# 2. names_ages of type Dict[str, int]
.. testcleanup::
del AttributeModule
del m
Note: it's now preferred to instead use type annotations instead of `torch.jit.Attribute`:
.. testcode::
import torch
from typing import Dict
class AttributeModule(torch.nn.Module):
names: Dict[str, int]
def __init__(self) -> None:
super().__init__()
self.names = {}
m = AttributeModule()
.. testcleanup::
del AttributeModule
del m
Args:
value: An initial value to be assigned to attribute.
type: A Python type
Returns:
Returns `value`
"""
def _get_type_trace_db():
# This is a private API. Use of this for external purposes is discouraged.
return type_trace_db
# Gets a function from the name of a method on a type
def _get_function_from_type(cls, name):
return getattr(cls, name, None)
# ScriptClasses must be new-style classes because we construct them using their
# __new__ method.
def _is_new_style_class(cls):
if hasattr(cls, "__class__"):
return "__dict__" in dir(cls) or hasattr(cls, "__slots__")
# These OrderedDictWrapper classes replace the actual OrderedDicts in
# module with versions that get/set properties inside of Module.
# This allows us to reuse most of nn.Module while still storing the
# data in C++.
# Each OrderedDict needs to support:
# x not in view
# x in view
# view[name] = ...
# view.values()
# del view[name]
# view.items()
# view.keys()
# len(view)
class OrderedDictWrapper:
def __init__(self, _c):
self._c = _c
def keys(self):
return [k for k, v in self.items()]
def values(self):
return [v for k, v in self.items()]
def __len__(self):
return len(self.values())
def __delitem__(self, k):
raise RuntimeError("cannot delete methods or parameters of a script module")
def items(self):
return self._c.items()
def __setitem__(self, k, v):
if k not in self:
raise RuntimeError(
f"Can't add a new parameter after ScriptModule construction. Tried to add '{k}"
)
self._c.setattr(k, v)
def __contains__(self, k):
return self._c.contains(k)
def __getitem__(self, k):
if k not in self:
raise KeyError(k)
return self._c.getattr(k)
class OrderedModuleDict(OrderedDictWrapper):
def __init__(self, module, python_dict):
super().__init__(torch._C.ModuleDict(module))
# contains _both_ script modules and non-script python-only modules
# because script modules are subclassed in python and the
# C++ Module class will not hold references to them,
# to ensure that you always get the same python value here
# we store it in the python dict as well
self._python_modules = python_dict
def items(self):
r = self._python_modules.items()
return r
def __contains__(self, k):
return k in self._python_modules
def __setitem__(self, k, v):
# Cases where sub-module can be re-assigned after ScriptModule construction
# 1. If the attr is an module interface type, it's guaranteed that the module is
# not inlined in the graph, so it's safe to swap a new ScriptModule in.
# 2. if the new value if a ScriptModule with the same JIT type, IR won't change
# and it's legit to swap a new module in.
# In these two cases we allow swapping a new scripted module and update the
# corresponding python module dict to keep sync.
# Note: the value to be swapped in has to be ScriptModule instead of nn.Module,
# otherwise it's illegal and we throw error.
if isinstance(v, ScriptModule):
self._c.setattr(k, v)
self._python_modules[k] = v
else:
raise RuntimeError(
"Cannot re-assign modules in a ScriptModule with non-scripted "
f"module, tried to replace existing module '{k}': {v}"
)
def __getitem__(self, k):
return self._python_modules[k]
# For each user-defined class that subclasses ScriptModule, this meta-class:
# (1) finds all the methods annotated with @script_method in a ScriptModule and
# removes them from the class attributes
# (2) puts a wrapper around the class's __init__ method to recursively compile
# all of the script_methods with the module after the original __init__ has
# run. This has to occur after the user-defined __init__ so that submodules and
# parameters are initialized _before_ the script compiler resolve references to
# `self.param` or `self.module`.
class ScriptMeta(type):
def __init__(cls, name, bases, attrs): # noqa: B902
# Aggregate all the ScriptMethods and constants from superclasses
cls._methods: dict[str, Any] = {}
cls._constants_set = set(getattr(cls, "__constants__", ()))
for base in reversed(bases):
for k, v in getattr(base, "_methods", {}).items():
cls._methods[k] = v
base_constants: set = getattr(base, "_constants_set", set())
cls._constants_set = cls._constants_set.union(base_constants)
# find all the script methods of the current class
for k, v in sorted(attrs.items()):
if isinstance(v, ScriptMethodStub):
delattr(cls, k)
cls._methods[v.original_method.__name__] = v
if getattr(cls, "_disable_script_meta", False):
# We leave built-in ScriptModule types alone, since this metaclass
# is only for compiling user classes that inherit from
# ScriptModule.
super().__init__(name, bases, attrs)
return
original_init = getattr(cls, "__init__", lambda self: None)
@functools.wraps(original_init)
def init_then_script(self, *args, **kwargs):
num_methods = len(cls._methods)
original_init(self, *args, **kwargs)
added_methods_in_init = len(cls._methods) > num_methods
if type(self) == cls:
def make_stubs(module):
cls = type(module)
if hasattr(cls, "_methods"):
return [v for k, v in sorted(cls._methods.items())]
else:
return infer_methods_to_compile(module)
self.__dict__[
"_actual_script_module"
] = torch.jit._recursive.create_script_module(
self, make_stubs, share_types=not added_methods_in_init
)
# Delete the Python attributes that now shadow the ScriptModule
# ones, so that __getattr__ and __setattr__ will properly find
# the scripted versions.
concrete_type = self._actual_script_module._concrete_type
for name in concrete_type.get_attributes():
delattr(self, name)
for name, _ in concrete_type.get_modules():
delattr(self, name)
for name in ("_parameters", "_buffers", "_modules"):
delattr(self, name)
cls.__init__ = init_then_script # type: ignore[misc]
super().__init__(name, bases, attrs)
class _CachedForward:
def __get__(self, obj, cls):
return self.__getattr__("forward") # type: ignore[attr-defined]
class ScriptWarning(Warning):
pass
def script_method(fn):
if not _enabled:
return fn
# NOTE: we need to traverse two frames here because the meta-class frame
# for ScriptModule will be present, as opposed to invoking @script on a
# a function or invoking define() on a CompilationUnit.
# The stack will look like:
#
# 0. createResolutionCallback()
# 1. script_method()
# 2. ScriptModule metaclass frame
# 3. Surrounding scope
#
# createResolutionCallback internally adds 1 to get us to the scope of this
# function (the calling function). Adding 2 gets us to the proper surrounding scope.
_rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=2)
ast = get_jit_def(fn, fn.__name__, self_name="ScriptModule")
return ScriptMethodStub(_rcb, ast, fn)
class ConstMap:
def __init__(self, const_mapping):
self.const_mapping = const_mapping
def __getattr__(self, attr):
return self.const_mapping[attr]
def unpackage_script_module(
importer: PackageImporter, script_module_id: str
) -> torch.nn.Module:
"""
Call by ``torch.package.PackageImporter``'s Pickler's ``persistent_load`` function.
Performs work of loading and returning a ScriptModule from a ``torch.package`` archive.
"""
if not isinstance(importer.zip_reader, torch._C.PyTorchFileReader):
raise RuntimeError(
"Loading ScriptObjects from a PackageImporter created from a "
"directory is not supported. Use a package archive file instead."
)
cu = torch._C.CompilationUnit()
cpp_module = torch._C._import_ir_module_from_package(
cu,
importer.zip_reader,
importer.storage_context,
validate_map_location(importer.last_map_location),
script_module_id,
)
return wrap_cpp_module(cpp_module)
if _enabled:
_magic_methods = [
"__iter__",
"__len__",
"__neg__",
"__mul__",
"__contains__",
"__add__",
"__sub__",
"__pow__",
"__truediv__",
"__mod__",
"__ne__",
"__eq__",
"__lt__",
"__gt__",
"__le__",
"__ge__",
"__and__",
"__or__",
"__xor__",
"__getitem__",
"__setitem__",
"__call__",
"__int__",
"__float__",
"__bool__",
"__str__",
"__enter__",
"__exit__",
]
class RecursiveScriptClass:
"""Wrapper for a TorchScript class instance for use in Python.
An analogue of RecursiveScriptModule for regular objects that are not modules.
This class is a wrapper around a torch._C.ScriptObject that represents an instance
of a TorchScript class and allows it to be used in Python.
Attributes:
_c [torch._C.ScriptObject]: The C++ object to which attribute lookups and method
calls are forwarded.
_props [Dict[str, property]]: A dictionary of properties fetched from self._c and
exposed on this wrppaer.
"""
def __init__(self, cpp_class):
super().__init__()
self.__dict__["_initializing"] = True
self._c = cpp_class
# Add wrapped object's properties to this class instance.
self._props = {
prop.name: property(prop.getter, prop.setter)
for prop in self._c._properties()
}
self.__dict__["_initializing"] = False
def __getattr__(self, attr):
if self.__dict__.get("_initializing"):
return super().__getattr__(attr) # type: ignore[misc]
if attr in self._props:
return self._props[attr].fget() # type: ignore[call-arg, misc]
return getattr(self._c, attr)
def __setattr__(self, attr, value):
if self.__dict__.get("_initializing"):
return super().__setattr__(attr, value)
if attr in self._props:
return self._props[attr].fset(value) # type: ignore[call-arg, misc]
setattr(self._c, attr, value)
# Delegate calls to magic methods like __len__ to the C++ module backing the
# RecursiveScriptClass.
def forward_magic_method(self, method_name, *args, **kwargs):
if not self._c._has_method(method_name):
raise TypeError
self_method = self.__getattr__(method_name)
return self_method(*args, **kwargs)
def __getstate__(self):
raise pickle.PickleError("ScriptClasses cannot be pickled")
def __iadd__(self, other):
if self._c._has_method("__iadd__"):
return self.forward_magic_method("__iadd__", other)
else:
return self.forward_magic_method("__add__", other)
for method_name in _magic_methods:
def method_template(self, *args, **kwargs):
return self.forward_magic_method(method_name, *args, **kwargs)
setattr(RecursiveScriptClass, method_name, method_template)
# this is a Python 'non-data descriptor' that causes the first access
# to ScriptModule's forward to look up the forward method and stash
# it in the objects dict. Due to the standard rules for attribute lookup,
# subsequent lookups will just directly return the previously looked up method.
# This is necessary because nn.Module defines forward as a method. If we
# did nothing, __getattr__ would not be called. Instead we'd get nn.Module.forward
# which always throws an exception.
class ScriptModule(Module, metaclass=ScriptMeta):
r"""Wrapper for C++ torch::jit::Module with methods, attributes, and parameters.
A wrapper around C++ ``torch::jit::Module``. ``ScriptModule``\s
contain methods, attributes, parameters, and
constants. These can be accessed the same way as on a normal ``nn.Module``.
"""
__jit_unused_properties__ = [
"code",
"code_with_constants",
"graph",
"inlined_graph",
"original_name",
]
def __init__(self) -> None:
super().__init__()
forward: Callable[..., Any] = _CachedForward() # type: ignore[assignment]
def __getattr__(self, attr):
if "_actual_script_module" not in self.__dict__:
return super().__getattr__(attr)
return getattr(self._actual_script_module, attr)
def __setattr__(self, attr, value):
if "_actual_script_module" not in self.__dict__:
# Unwrap torch.jit.Attribute into a regular setattr + record
# the provided type in __annotations__.
#
# This ensures that if we use the attr again in `__init__`, it
# will look like the actual value, not an instance of Attribute.
if isinstance(value, Attribute):
# NB: Ensure that we set __annotations__ on the specific
# class in question, and not on a superclass (which would
# be wrong wrong wrong!).
# See also https://github.com/pytorch/pytorch/issues/39463
if "__annotations__" not in self.__class__.__dict__:
self.__class__.__annotations__ = {}
self.__annotations__[attr] = value.type
value = value.value
return super().__setattr__(attr, value)
setattr(self._actual_script_module, attr, value)
def define(self, src):
if "_actual_script_module" in self.__dict__:
# If we have completed initialization, just defer to the
# backing RecursiveScriptModule to eagerly compile the provided
# source.
return self._actual_script_module.define(src)
# Otherwise, we are still in the object's __init__.
# In that case, add `src` as a stub to be compiled.
#
# We use frames_up=1 to get to the proper surrounding scope. The stack
# will look like:
# 0. createResolutionCallback
# 1. define()
# 2. surrounding scope.
#
# createResolutionCallback internally adds 1 to get us to our frame, then
# we add 1 to get to the proper surrounding scope.
rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=1)
ast = torch._C._parse_source_def(src)
self._methods[ast.name().name] = ScriptMethodStub(rcb, ast, None)
def _replicate_for_data_parallel(self):
return self._actual_script_module._replicate_for_data_parallel()
def __reduce_package__(self, exporter: PackageExporter):
"""Save a ScriptModule inside of a ``torch.package`` archive.
Called by ``torch.package.PackageExporter``'s Pickler's ``persistent_id`` when
saving TorchScript objects. Performs act of saving a ScriptModule inside of
a ``torch.package`` archive.
Returns method to load the ScriptModule from a ``torch.package.PackageImporter``'s
Pickler's ``persistent_load`` function.
"""
script_module_id = exporter.get_unique_id()
exporter.script_module_serializer.serialize(self._c, int(script_module_id))
return (unpackage_script_module, (script_module_id,))
class RecursiveScriptModule(ScriptModule):
# XXX: RecursiveScriptModule inherits from ScriptModule for the sole
# reason that it retains the existing isinstance(ScriptModule)
# behavior.
r"""Retain the existing isinstance(ScriptModule) behavior.
The core data structure in TorchScript is the ``ScriptModule``. It is an
analogue of torch's ``nn.Module`` and represents an entire model as a tree of
submodules. Like normal modules, each individual module in a ``ScriptModule`` can
have submodules, parameters, and methods. In ``nn.Module``\s methods are implemented
as Python functions, but in ``ScriptModule``\s methods are implemented as
TorchScript functions, a statically-typed subset of Python that contains all
of PyTorch's built-in Tensor operations. This difference allows your
``ScriptModule``\s code to run without the need for a Python interpreter.
``ScriptModule``\s should not be created manually, instead use
either :func:`tracing <torch.jit.trace>` or :func:`scripting <torch.jit.script>`.
Tracing and scripting can be applied incrementally and :ref:`composed as necessary <Types>`.
* Tracing records the tensor operations as executed with a set of example inputs and uses these
operations to construct a computation graph. You can use the full dynamic behavior of Python with tracing,
but values other than Tensors and control flow aren't captured in the graph.
* Scripting inspects the Python code of the model
and compiles it to TorchScript. Scripting allows the use of many `types`_ of values and supports dynamic control flow.
Many, but not all features of Python are supported by the compiler, so changes to the source code may be necessary.
"""
_disable_script_meta = True
def __init__(self, cpp_module):
self.__dict__["_initializing"] = True
self._c = cpp_module
super().__init__()
# Delete the 'training' attribute set up by `Module.__init__`. It
# will get set on the underlying cpp module, so we delete it here
# to avoid this version shadowing the cpp module version.
delattr(self, "training")
@staticmethod
def _construct(cpp_module, init_fn):
"""
Construct a RecursiveScriptModule that's ready for use.
PyTorch code should use this to construct a RecursiveScriptModule instead
of instead of calling `__init__` directly, as it makes sure the
object is properly finalized (and in the future, we may take
control of how the RecursiveScriptModule instance is created).
Args:
cpp_module: The C++ Module that will hold the actual state of
this RecursiveScriptModule instance.
init_fn: Lambda that initializes the RecursiveScriptModule passed to it.
"""
script_module = RecursiveScriptModule(cpp_module)
init_fn(script_module)
# Finalize the ScriptModule: replace the nn.Module state with our
# custom implementations and flip the _initializing bit.
RecursiveScriptModule._finalize_scriptmodule(script_module)
return script_module
@staticmethod
def _finalize_scriptmodule(script_module):
script_module._parameters = OrderedDictWrapper(
torch._C.ParameterDict(script_module._c)
)
script_module._buffers = OrderedDictWrapper(
torch._C.BufferDict(script_module._c)
)
script_module._modules = OrderedModuleDict(
script_module._c, script_module._modules
)
script_module._initializing = False
def _reconstruct(self, cpp_module):
"""
Re-construct an instance of RecursiveScriptModule using an instance of a C++ module.
Args:
cpp_module: The C++ module that this RecursiveScriptModule will be rebuilt around.
"""
self.__init__(cpp_module) # type: ignore[misc]
# Copy the concrete type from the C++ module to this ScriptModule.
self._concrete_type = torch._C.ConcreteModuleType.from_jit_type(
self._c._type()
)
# Copy submodules from the C++ module to this ScriptModule.
modules = {}
for name, cpp_module in torch._C.ModuleDict(self._c).items():
modules[name] = wrap_cpp_module(cpp_module)
self._modules = OrderedModuleDict(self._c, modules) # type: ignore[assignment]
# Copy parameters and buffers.
self._parameters = OrderedDictWrapper(torch._C.ParameterDict(self._c)) # type: ignore[assignment]
self._buffers = OrderedDictWrapper(torch._C.BufferDict(self._c)) # type: ignore[assignment]
# Get rid of the functions from the old C++ module.
self.__dict__ = {
k: v
for k, v in self.__dict__.items()
if not isinstance(v, torch._C.ScriptMethod)
}
self.__dict__["_initializing"] = False
@property
def graph(self):
r"""Return a string representation of the internal graph for the ``forward`` method.
See :ref:`interpreting-graphs` for details.
"""
return self._c._get_method("forward").graph
@property
def inlined_graph(self):
r"""
Return a string representation of the internal graph for the ``forward`` method.
This graph will be preprocessed to inline all function and method calls.
See :ref:`interpreting-graphs` for details.
"""
return self.forward.inlined_graph # type: ignore[attr-defined]
@property
def code(self):
r"""
Return a pretty-printed representation (as valid Python syntax) of the internal graph for the ``forward`` method.
See :ref:`inspecting-code` for details.
"""
return self.forward.code # type: ignore[attr-defined]
@property
def code_with_constants(self):
r"""Return a tuple.
Returns a tuple of:
[0] a pretty-printed representation (as valid Python syntax) of
the internal graph for the ``forward`` method. See `code`.
[1] a ConstMap following the CONSTANT.cN format of the output in [0].
The indices in the [0] output are keys to the underlying constant's values.
See :ref:`inspecting-code` for details.
"""
r = self.forward.code_with_constants # type: ignore[attr-defined]
return (r[0], ConstMap(r[1]))
def save(self, f, **kwargs):
r"""Save with a file-like object.
save(f, _extra_files={})
See :func:`torch.jit.save <torch.jit.save>` which accepts a file-like object.
This function, torch.save(), converts the object to a string, treating it as a path.
DO NOT confuse these two functions when it comes to the 'f' parameter functionality.
"""
return self._c.save(str(f), **kwargs)
def _save_for_lite_interpreter(self, *args, **kwargs):
r"""Add (or update) the bytecode session to the script model.
_save_for_lite_interpreter(f)
The updated model is used
in lite interpreter for mobile applications.
Args:
f: a string containing a file name.
_extra_files: Map from filename to contents which will be stored as part of 'f'.
"""
return self._c._save_for_mobile(*args, **kwargs)
def _save_to_buffer_for_lite_interpreter(self, *args, **kwargs):
return self._c._save_to_buffer_for_mobile(*args, **kwargs)
def save_to_buffer(self, *args, **kwargs):
return self._c.save_to_buffer(*args, **kwargs)
def get_debug_state(self, *args, **kwargs):
return self._c.get_debug_state()
def extra_repr(self):
return f"original_name={self.original_name}"
def graph_for(self, *args, **kwargs):
return self.forward.graph_for(self, *args, **kwargs) # type: ignore[attr-defined]
@property
def original_name(self):
if type(self) == str(self._c._type().name()):
return ""
return str(self._c._type().name())
def define(self, src):
# We use frames_up=1 to get to the proper surrounding scope. The stack
# will look like:
# 0. createResolutionCallback
# 1. define()
# 2. surrounding scope.
#
# createResolutionCallback internally adds 1 to get us to our frame, then
# we add 1 to get to the proper surrounding scope.
rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=1)
self._c._define(self._concrete_type, src, rcb)
def __getattr__(self, attr):
if "_initializing" not in self.__dict__:
raise RuntimeError(
"ScriptModule has not been initialized, did you forget to call super's init?"
)
if self._initializing:
return super().__getattr__(attr)
# _modules check is before hasattr since modules are included as attributes in _c,
# but we want to get the python wrapper from _modules instead of the raw _c object.
if attr in self._modules:
return self._modules[attr]
elif self._c.hasattr(attr):
return self._c.getattr(attr)
elif self._c._has_method(attr):
script_method = self._c._get_method(attr)
# cache method so future calls do not go through __getattr__
# to improve invocation performance
self.__dict__[attr] = script_method
return script_method
return super().__getattr__(attr)
def __setattr__(self, attr, value):
if self._initializing:
return super().__setattr__(attr, value)
if attr in self._modules:
self._modules[attr] = value
elif self._c.hasattr(attr):
self._c.setattr(attr, value)
elif (
hasattr(self, "_concrete_type")
and attr in self._concrete_type.get_constants().keys()
):
# TODO: we don't have _concrete_type set after load(), and in general we lose constant information.
# We should encode constants as class type attributes (or something) so it persists across save/load.
raise AttributeError(
f"Cannot mutate TorchScript constant value: '{attr}'. Value: '{value}'"
)
else:
# We allow setting Python attributes on the ScriptModule, for
# when people want to stash some convenience info on it.
# TODO: it's possible that the following is confusing:
# s = torch.jit.script(...)
# s.python_attr = ...
# s.save() <--- this doesn't have `python_attr`
# It's fairly trivial to save enough info to warn in this case.
return super().__setattr__(attr, value)
def __copy__(self):
return torch.jit._recursive.wrap_cpp_module(copy.copy(self._c))
def __deepcopy__(self, memo):
return torch.jit._recursive.wrap_cpp_module(copy.deepcopy(self._c, memo))
# Python magic methods do method lookups on an object's class type, instead of looking up
# the method defines on the class instance. In order to continue to expose the magic methods
# of builtin-containers (ModuleList, Sequential, ModuleDict) to Python, we
# define magic methods here as a shim to the correct attribute.
def forward_magic_method(self, method_name, *args, **kwargs):
self_method = getattr(self, method_name)
if getattr(self_method, "__func__", None) == getattr(
RecursiveScriptModule, method_name
):
raise NotImplementedError
return self_method(*args, **kwargs)
def __iter__(self):
return self.forward_magic_method("__iter__")
def __getitem__(self, idx):
return self.forward_magic_method("__getitem__", idx)
def __len__(self):
return self.forward_magic_method("__len__")
def __contains__(self, key):
return self.forward_magic_method("__contains__", key)
# dir is defined by the base nn.Module, so instead of throwing if
# it is not overridden, we call into the nn.Module __dir__ method
def __dir__(self):
self_method = self.__dir__
if (
self_method.__func__ # type: ignore[attr-defined]
== _get_function_from_type(RecursiveScriptModule, "__dir__")
):
return super().__dir__()
return self_method()
# to resolve bool(value), Python looks if __bool__ is defined then __iter__
# is defined then returns true for classes. Since __iter__() on this
# class throws if it isn't overridden, we define __bool__ to preserve default behavior
def __bool__(self):
self_method = self.__bool__
if (
self_method.__func__ # type: ignore[attr-defined]
== _get_function_from_type(RecursiveScriptModule, "__bool__")
):
return True
return self_method()
def _replicate_for_data_parallel(self):
# we have to initialize ScriptModule properly so that
# it works with pybind11
def init_fn(script_module):
# Don't do anything here, we'll initialize the ScriptModule below
return
return RecursiveScriptModule._construct(
self._c._replicate_for_data_parallel(), init_fn
)
# Need to copy all RecursiveScriptModule methods to ScriptModule.
#
# This is because `super().foo()` does not use
# `__getattr__` to look up `foo`. So we need to make each method available on
# the ScriptModule manually.
for name, item in RecursiveScriptModule.__dict__.items():
if not callable(item) and not isinstance(item, property):
continue
if name.startswith("__") or hasattr(ScriptModule, name):
continue
# We can copy over the implementation wholesale because besides the
# `super()` thing above, ScriptModule behaves exactly like
# RecursiveScriptModule
setattr(ScriptModule, name, item)
def _get_methods(cls):
import inspect
# In Python 3 unbound methods are functions, but in Python 2 they are methods
return inspect.getmembers(
cls, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x)
)
_compiled_methods_allowlist = {
"forward",
"register_buffer",
"register_parameter",
"register_module",
"add_module",
"_apply",
"apply",
"cuda",
"cpu",
"to",
"type",
"float",
"double",
"half",
"state_dict",
"_save_to_state_dict",
"load_state_dict",
"_load_from_state_dict",
"_named_members",
"parameters",
"named_parameters",
"buffers",
"named_buffers",
"children",
"named_children",
"modules",
"named_modules",
"zero_grad",
"share_memory",
"_get_name",
"extra_repr",
"_slow_forward",
"_tracing_name",
"eval",
"train",
"get_extra_state",
"set_extra_state",
}
def _make_fail(name):
def fail(self, *args, **kwargs):
raise RuntimeError(name + " is not supported on ScriptModules")
return fail
for name, method in _get_methods(torch.nn.Module):
if name.startswith("__") or name.endswith("_call_impl"):
continue
if (
name not in RecursiveScriptModule.__dict__
and name not in _compiled_methods_allowlist
):
setattr(RecursiveScriptModule, method.__name__, _make_fail(name))
else:
# TODO MAKE SURE THAT DISABLING WORKS
class RecursiveScriptClass: # type: ignore[no-redef]
pass
class ScriptModule(torch.nn.Module): # type: ignore[no-redef]
def __init__(self, arg=None):
super().__init__()
class RecursiveScriptModule(ScriptModule): # type: ignore[no-redef]
def __init__(self, arg=None):
super().__init__()
def call_prepare_scriptable_func_impl(obj, memo):
if not isinstance(obj, torch.nn.Module):
return obj
obj_id = id(obj)
# If obj_id is in memo, obj has already been prepared or is being
# prepared in another call up the stack.
if obj_id in memo:
return memo[id(obj)]
obj = (
obj.__prepare_scriptable__() if hasattr(obj, "__prepare_scriptable__") else obj
) # type: ignore[operator]
# Record obj in memo to avoid infinite recursion in the case of cycles in the module
# hierarchy when recursing below.
memo[obj_id] = obj
new_obj_dict = {}
for name, sub_module in obj.__dict__.items():
if name == "_modules":
for k, v in sub_module.items():
sub_module[k] = call_prepare_scriptable_func_impl(v, memo)
new_obj_dict[name] = sub_module
elif isinstance(sub_module, torch.nn.Module) and not isinstance(
sub_module, ScriptModule
):
new_obj_dict[name] = call_prepare_scriptable_func_impl(sub_module, memo)
else:
new_obj_dict[name] = sub_module
for k, v in new_obj_dict.items():
obj.__dict__[name] = v
return obj
def call_prepare_scriptable_func(obj):
memo: dict[int, torch.nn.Module] = {}
return call_prepare_scriptable_func_impl(obj, memo)
def create_script_dict(obj):
"""
Create a ``torch._C.ScriptDict`` instance with the data from ``obj``.
Args:
obj (dict): The Python dictionary that is used to initialize the ``ScriptDict``
returned by this function.
Returns:
An instance of ``torch._C.ScriptDict`` that has the same data as ``obj``
and can be passed between Python and TorchScript with reference semantics and
zero copy overhead.
"""
return torch._C.ScriptDict(obj) # type: ignore[attr-defined]
def create_script_list(obj, type_hint=None):
"""
Create a ``torch._C.ScriptList`` instance with the data from ``obj``.
Args:
obj (dict): The Python list that is used to initialize the ``ScriptList``
returned by this function.
Returns:
An instance of ``torch._C.ScriptList`` that has the same data as ``obj``
and can be passed between Python and TorchScript with reference semantics and
zero copy overhead.
"""
return torch._C.ScriptList(obj) # type: ignore[attr-defined]
_TOPLEVEL: bool = True
def _script_impl(
obj,
optimize=None,
_frames_up=0,
_rcb=None,
example_inputs: Union[list[tuple], dict[Callable, list[tuple]], None] = None,
):
global type_trace_db
if optimize is not None:
warnings.warn(
"`optimize` is deprecated and has no effect. "
"Use `with torch.jit.optimized_execution()` instead",
FutureWarning,
stacklevel=3,
)
# No-op for modules, functions, class instances that are already scripted
if isinstance(obj, RecursiveScriptClass):
return obj
if isinstance(obj, ScriptModule):
return obj
if isinstance(obj, ScriptFunction):
return obj
if example_inputs:
# If MonkeyType is installed, enable profile directed type annotation
# Check if example_inputs are defined and generate call traces
# for the method by running eager mode version of the method with
# the provide example inputs. This logs all the traces in type_trace_db
type_trace_db = JitTypeTraceStore()
if monkeytype_trace:
monkeytype_config = JitTypeTraceConfig(type_trace_db)
with monkeytype_trace(monkeytype_config):
if isinstance(example_inputs, dict):
# If the obj is an nn.Module or a class, then each method is
# executed with the arguments provided in the example inputs.
# example inputs here will be of type Dict(class.method, (arguments))
# This is used to infer type annotations for those methods
# which are not called directly under the hood of monkeytype.
for module, example_input in example_inputs.items():
for example in example_input:
module(*example)
elif isinstance(example_inputs, list):
for examples in example_inputs:
obj(*examples)
else:
raise ValueError(
"Error: Unable to infer types. Please format the inputs to type `List[Tuple]`"
" or `Dict[Callable, List[Tuple]]` to be run with MonkeyType."
)
else:
warnings.warn(
"Warning: monkeytype is not installed. Please install https://github.com/Instagram/MonkeyType "
"to enable Profile-Directed Typing in TorchScript. Refer to "
"https://github.com/Instagram/MonkeyType/blob/master/README.rst to install MonkeyType. "
)
if isinstance(obj, torch.nn.Module):
obj = call_prepare_scriptable_func(obj)
return torch.jit._recursive.create_script_module(
obj, torch.jit._recursive.infer_methods_to_compile
)
else:
obj = (
obj.__prepare_scriptable__()
if hasattr(obj, "__prepare_scriptable__")
else obj
) # type: ignore[operator]
if isinstance(obj, dict):
return create_script_dict(obj)
if isinstance(obj, list):
return create_script_list(obj)
if inspect.isclass(obj):
qualified_name = _qualified_name(obj)
# If this type is a `nn.Module` subclass, they probably meant to pass
# an instance instead of a Module
if issubclass(obj, torch.nn.Module):
raise RuntimeError(
f"Type '{obj}' cannot be compiled since it inherits from nn.Module, pass an instance instead"
)
# Enums are automatically usable in TorchScript, explicitly scripting
# is not necessary, but not harmful either.
if issubclass(obj, enum.Enum):
return obj
if not _is_new_style_class(obj):
raise RuntimeError(
"TorchScript classes must be new-style classes. "
"Please inherit from 'object'."
)
if len(obj.mro()) > 2:
raise RuntimeError(
"TorchScript classes does not support inheritance yet. "
"Please directly inherit from 'object'."
)
if _rcb is None:
_rcb = _jit_internal.createResolutionCallbackFromFrame(_frames_up + 1)
_compile_and_register_class(obj, _rcb, qualified_name)
return obj
elif inspect.isfunction(obj) or inspect.ismethod(obj):
qualified_name = _qualified_name(obj)
# this is a decorated fn, and we need to the underlying fn and its rcb
if hasattr(obj, "__script_if_tracing_wrapper"):
obj = obj.__original_fn # type: ignore[union-attr]
_rcb = _jit_internal.createResolutionCallbackFromClosure(obj)
# some functions are explicitly marked as not supported in script mode
if hasattr(obj, "__script_unsupported"):
raise RuntimeError("TorchScript error: " + obj.__script_unsupported)
_check_directly_compile_overloaded(obj)
maybe_already_compiled_fn = _try_get_jit_cached_function(obj)
if maybe_already_compiled_fn:
maybe_already_compiled_fn._torchdynamo_inline = obj # type: ignore[attr-defined]
return maybe_already_compiled_fn
ast = get_jit_def(obj, obj.__name__)
if _rcb is None:
_rcb = _jit_internal.createResolutionCallbackFromClosure(obj)
fn = torch._C._jit_script_compile(
qualified_name, ast, _rcb, get_default_args(obj)
)
# Forward docstrings
fn.__doc__ = obj.__doc__
fn.__name__ = "ScriptFunction"
fn.__qualname__ = "torch.jit.ScriptFunction"
# Allow torch.compile() to inline
fn._torchdynamo_inline = obj # type: ignore[attr-defined]
_set_jit_function_cache(obj, fn)
return fn
else:
return torch.jit._recursive.create_script_class(obj)
def script(
obj,
optimize=None,
_frames_up=0,
_rcb=None,
example_inputs: Union[list[tuple], dict[Callable, list[tuple]], None] = None,
):
r"""Script the function.
Scripting a function or ``nn.Module`` will inspect the source code, compile
it as TorchScript code using the TorchScript compiler, and return a :class:`ScriptModule` or
:class:`ScriptFunction`. TorchScript itself is a subset of the Python language, so not all
features in Python work, but we provide enough functionality to compute on
tensors and do control-dependent operations. For a complete guide, see the
:ref:`language-reference`.
Scripting a dictionary or list copies the data inside it into a TorchScript instance than can be
subsequently passed by reference between Python and TorchScript with zero copy overhead.
``torch.jit.script`` can be used as a function for modules, functions, dictionaries and lists
and as a decorator ``@torch.jit.script`` for :ref:`torchscript-classes` and functions.
Args:
obj (Callable, class, or nn.Module): The ``nn.Module``, function, class type,
dictionary, or list to compile.
example_inputs (Union[List[Tuple], Dict[Callable, List[Tuple]], None]): Provide example inputs
to annotate the arguments for a function or ``nn.Module``.
Returns:
If ``obj`` is ``nn.Module``, ``script`` returns
a :class:`ScriptModule` object. The returned :class:`ScriptModule` will
have the same set of sub-modules and parameters as the
original ``nn.Module``. If ``obj`` is a standalone function,
a :class:`ScriptFunction` will be returned. If ``obj`` is a ``dict``, then
``script`` returns an instance of `torch._C.ScriptDict`. If ``obj`` is a ``list``,
then ``script`` returns an instance of `torch._C.ScriptList`.
**Scripting a function**
The ``@torch.jit.script`` decorator will construct a :class:`ScriptFunction`
by compiling the body of the function.
Example (scripting a function):
.. testcode::
import torch
@torch.jit.script
def foo(x, y):
if x.max() > y.max():
r = x
else:
r = y
return r
print(type(foo)) # torch.jit.ScriptFunction
# See the compiled graph as Python code
print(foo.code)
# Call the function using the TorchScript interpreter
foo(torch.ones(2, 2), torch.ones(2, 2))
.. testoutput::
:hide:
...
****Scripting a function using example_inputs**
Example inputs can be used to annotate a function arguments.
Example (annotating a function before scripting):
.. testcode::
import torch
def test_sum(a, b):
return a + b
# Annotate the arguments to be int
scripted_fn = torch.jit.script(test_sum, example_inputs=[(3, 4)])
print(type(scripted_fn)) # torch.jit.ScriptFunction
# See the compiled graph as Python code
print(scripted_fn.code)
# Call the function using the TorchScript interpreter
scripted_fn(20, 100)
.. testoutput::
:hide:
...
**Scripting an nn.Module**
Scripting an ``nn.Module`` by default will compile the ``forward`` method and recursively
compile any methods, submodules, and functions called by ``forward``. If a ``nn.Module`` only uses
features supported in TorchScript, no changes to the original module code should be necessary. ``script``
will construct :class:`ScriptModule` that has copies of the attributes, parameters, and methods of
the original module.
Example (scripting a simple module with a Parameter):
.. testcode::
import torch
class MyModule(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
# This parameter will be copied to the new ScriptModule
self.weight = torch.nn.Parameter(torch.rand(N, M))
# When this submodule is used, it will be compiled
self.linear = torch.nn.Linear(N, M)
def forward(self, input):
output = self.weight.mv(input)
# This calls the `forward` method of the `nn.Linear` module, which will
# cause the `self.linear` submodule to be compiled to a `ScriptModule` here
output = self.linear(output)
return output
scripted_module = torch.jit.script(MyModule(2, 3))
Example (scripting a module with traced submodules):
.. testcode::
import torch
import torch.nn as nn
import torch.nn.functional as F
class MyModule(nn.Module):
def __init__(self) -> None:
super().__init__()
# torch.jit.trace produces a ScriptModule's conv1 and conv2
self.conv1 = torch.jit.trace(nn.Conv2d(1, 20, 5), torch.rand(1, 1, 16, 16))
self.conv2 = torch.jit.trace(nn.Conv2d(20, 20, 5), torch.rand(1, 20, 16, 16))
def forward(self, input):
input = F.relu(self.conv1(input))
input = F.relu(self.conv2(input))
return input
scripted_module = torch.jit.script(MyModule())
To compile a method other than ``forward`` (and recursively compile anything it calls), add
the :func:`@torch.jit.export <torch.jit.export>` decorator to the method. To opt out of compilation
use :func:`@torch.jit.ignore <torch.jit.ignore>` or :func:`@torch.jit.unused <torch.jit.unused>`.
Example (an exported and ignored method in a module)::
import torch
import torch.nn as nn
class MyModule(nn.Module):
def __init__(self) -> None:
super().__init__()
@torch.jit.export
def some_entry_point(self, input):
return input + 10
@torch.jit.ignore
def python_only_fn(self, input):
# This function won't be compiled, so any
# Python APIs can be used
import pdb
pdb.set_trace()
def forward(self, input):
if self.training:
self.python_only_fn(input)
return input * 99
scripted_module = torch.jit.script(MyModule())
print(scripted_module.some_entry_point(torch.randn(2, 2)))
print(scripted_module(torch.randn(2, 2)))
Example ( Annotating forward of nn.Module using example_inputs)::
import torch
import torch.nn as nn
from typing import NamedTuple
class MyModule(NamedTuple):
result: List[int]
class TestNNModule(torch.nn.Module):
def forward(self, a) -> MyModule:
result = MyModule(result=a)
return result
pdt_model = TestNNModule()
# Runs the pdt_model in eager model with the inputs provided and annotates the arguments of forward
scripted_model = torch.jit.script(pdt_model, example_inputs={pdt_model: [([10, 20, ], ), ], })
# Run the scripted_model with actual inputs
print(scripted_model([20]))
"""
if not _enabled:
return obj
try:
global _TOPLEVEL
prev = _TOPLEVEL
_TOPLEVEL = False
ret = _script_impl(
obj=obj,
optimize=optimize,
_frames_up=_frames_up + 1,
_rcb=_rcb,
example_inputs=example_inputs,
)
if prev:
log_torchscript_usage("script", model_id=_get_model_id(ret))
return ret
finally:
_TOPLEVEL = prev
# overloads are registered in _jit_internal and compiled here so that _overload
# can be used in nn/functional.py without an import cycle
def _check_overload_defaults(impl_defaults, overload_defaults, loc):
for name, overload_value in overload_defaults.items():
if name not in impl_defaults or impl_defaults[name] != overload_value:
raise torch.jit.frontend.FrontendError(
loc,
"Default parameters on overloads do not affect the runtime so they "
"must equal to the default parameter on the implementation function. Found on "
f"parameter {name}",
)
def _compile_function_with_overload(overload_fn, qual_name, impl_fn):
overload_decl = get_jit_def(overload_fn, overload_fn.__name__).decl()
overload_signature = torch.jit.annotations.get_signature(
overload_fn, None, None, inspect.ismethod(overload_fn)
)
impl_ast = get_jit_def(impl_fn, impl_fn.__name__)
overload_defaults = get_default_args(overload_fn)
implementation_defaults = get_default_args(impl_fn)
_rcb = _jit_internal.createResolutionCallbackFromClosure(impl_fn)
_check_overload_defaults(
implementation_defaults, overload_defaults, overload_decl.range()
)
fn = torch._C._jit_script_compile_overload(
qual_name,
overload_decl,
impl_ast,
_rcb,
implementation_defaults,
overload_signature,
)
return fn
def _get_overloads(obj):
# check for cached compiled fns
existing_compiled_fns = _try_get_jit_cached_overloads(obj)
qual_name = _qualified_name(obj)
uncompiled_overloads = _jit_internal._get_fn_overloads(qual_name)
if uncompiled_overloads is None:
return existing_compiled_fns
if obj in uncompiled_overloads:
raise RuntimeError(
_jit_internal.get_overload_no_implementation_error_message("function", obj)
)
compiled_fns = [
_compile_function_with_overload(overload_fn, qual_name, obj)
for overload_fn in uncompiled_overloads
]
if existing_compiled_fns:
compiled_fns = existing_compiled_fns + compiled_fns
# cache compilation, remove information stored to do compilation
_set_jit_overload_cache(obj, compiled_fns)
_jit_internal._clear_fn_overloads(qual_name)
return compiled_fns
def _check_directly_compile_overloaded(obj):
qual_name = _qualified_name(obj)
if _jit_internal._get_fn_overloads(qual_name) or _try_get_jit_cached_overloads(obj):
raise RuntimeError(
f"Function {qual_name} cannot be directly compiled because it"
" is overloaded. It must be used in a context of a function"
" where its inputs can determine which overload to call."
)
def interface(obj):
r"""Decorate to annotate classes or modules of different types.
This decorator can be used to define an interface that can be used to annotate
classes or modules of different types. This can be used for to annotate a submodule
or attribute class that could have different types that implement the same
interface, or which could be swapped at runtime; or to store a list of modules or
classes of varying types.
It is sometimes used to implement "Callables" - functions or modules that implement
an interface but whose implementations differ and which can be swapped out.
Example:
.. testcode::
import torch
from typing import List
@torch.jit.interface
class InterfaceType:
def run(self, x: torch.Tensor) -> torch.Tensor:
pass
# implements InterfaceType
@torch.jit.script
class Impl1:
def run(self, x: torch.Tensor) -> torch.Tensor:
return x.relu()
class Impl2(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.val = torch.rand(())
@torch.jit.export
def run(self, x: torch.Tensor) -> torch.Tensor:
return x + self.val
def user_fn(impls: List[InterfaceType], idx: int, val: torch.Tensor) -> torch.Tensor:
return impls[idx].run(val)
user_fn_jit = torch.jit.script(user_fn)
impls = [Impl1(), torch.jit.script(Impl2())]
val = torch.rand(4, 4)
user_fn_jit(impls, 0, val)
user_fn_jit(impls, 1, val)
"""
if not inspect.isclass(obj):
raise RuntimeError("interface must be applied to a class")
if not _is_new_style_class(obj):
raise RuntimeError("TorchScript interfaces must inherit from 'object'")
# Expected MRO is:
# User module
# torch.nn.modules.module.Module
# object
is_module_interface = issubclass(obj, torch.nn.Module) and len(obj.mro()) == 3
if not is_module_interface and len(obj.mro()) > 2:
raise RuntimeError(
"TorchScript interface does not support inheritance yet. "
"Please directly inherit from 'object' or 'nn.Module'."
)
qualified_name = _qualified_name(obj)
rcb = _jit_internal.createResolutionCallbackFromFrame(1)
# if this type is a `nn.Module` subclass, generate a module interface type
# instead of a class interface type; a module interface type only compiles
# the user provided methods as part of the interface
ast = get_jit_class_def(obj, obj.__name__)
mangled_classname = torch._C._jit_script_interface_compile(
qualified_name, ast, rcb, is_module_interface
)
obj.__torch_script_interface__ = mangled_classname
return obj
def _recursive_compile_class(obj, loc):
_qual_name = _qualified_name(obj)
# We're starting a new compilation, so update the error call stack in
# case it fails
error_stack = torch._C.CallStack(_qual_name, loc) # noqa: F841
rcb = _jit_internal.createResolutionCallbackForClassMethods(obj)
return _compile_and_register_class(obj, rcb, _qual_name)
CompilationUnit = torch._C.CompilationUnit
set_module(CompilationUnit, "torch.jit")
def pad(s: str, padding: int, offset: int = 0, char: str = " "):
if padding >= len(s):
padding -= len(s)
return "".join([char for _ in range(padding + offset)]) + s
class _ScriptProfileColumn:
def __init__(self, header: str, alignment: int = 4, offset: int = 0):
self.header = header
self.alignment = alignment
self.offset = offset
self.rows: dict[int, Any] = {}
def add_row(self, lineno: int, value: Any):
self.rows[lineno] = value
def materialize(self):
max_length = len(self.header)
rows: list[tuple[int, str]] = []
for key, value in self.rows.items():
cell = str(value)
rows.append((key, cell))
max_length = max(len(cell), max_length)
if self.alignment > 0:
padding = max_length + self.alignment
padding -= padding % self.alignment
else:
padding = 0
rows = [(key, pad(cell, padding, self.offset)) for key, cell in rows]
return pad(self.header, padding, self.offset), rows
class _ScriptProfileTable:
def __init__(self, cols: list[_ScriptProfileColumn], source_range: list[int]):
self.cols = cols
self.source_range = source_range
def dump_string(self):
outputs: list[str] = []
cells: list[tuple[str, dict[int, str]]] = []
header_buffer = ""
for col in self.cols:
header, rows = col.materialize()
header_buffer += header
cells.append((header, dict(rows)))
outputs.append(header_buffer)
outputs.append(pad("", len(header_buffer), 0, "="))
for line in self.source_range:
row_buffer = ""
for header, rows in cells:
cell = rows.get(line)
if cell is None:
row_buffer += pad("", len(header))
else:
row_buffer += cell
outputs.append(row_buffer)
return "\n".join(outputs)
class _ScriptProfile:
def __init__(self) -> None:
self.profile = classes.profiling._ScriptProfile()
def enable(self):
self.profile.enable()
def disable(self):
self.profile.disable()
def dump_string(self) -> str:
outputs: list[str] = []
for source_stats in self.profile._dump_stats():
source_ref = source_stats.source()
source_lines = source_ref.text().splitlines()
dedent = min(len(line) - len(line.lstrip(" ")) for line in source_lines)
source_lines = [line[dedent:] for line in source_lines]
start_line = source_ref.starting_lineno()
end_line = start_line + len(source_lines)
source_range = range(start_line, end_line)
lineno = _ScriptProfileColumn("Line #")
hits = _ScriptProfileColumn("Hits")
time_ns = _ScriptProfileColumn("Time (ns)")
line_contents = _ScriptProfileColumn("Line Contents", 0, 1)
stats = source_stats.line_map()
for line in source_range:
lineno.add_row(line, line)
line_contents.add_row(line, source_lines[line - start_line])
stat = stats.get(line)
if stat is not None:
hits.add_row(line, stat.count())
time_ns.add_row(line, stat.duration_ns())
table = _ScriptProfileTable(
[lineno, hits, time_ns, line_contents], list(source_range)
)
outputs.append(table.dump_string())
return "\n\n".join(outputs)
def dump(self):
print(self.dump_string())
def _unwrap_optional(x):
assert x is not None, "Unwrapping null optional"
return x
_register_builtin(_unwrap_optional, "aten::_unwrap_optional")
_register_builtin(_jit_internal.is_scripting, "aten::is_scripting")
_register_builtin(has_torch_function, "aten::has_torch_function")
_register_builtin(has_torch_function_unary, "aten::has_torch_function")
_register_builtin(has_torch_function_variadic, "aten::has_torch_function")
```
|
===================================================================================================================
SOURCE CODE FILE: _serialization.py
LINES: 1
SIZE: 9.73 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_serialization.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""Serialization.
This module contains functionality for serializing TorchScript modules, notably:
* torch.jit.save
* torch.jit.load
This is not intended to be imported directly; please use the exposed
functionalities in `torch.jit`.
"""
import os
import torch
from torch._jit_internal import _get_model_id
from torch._utils_internal import log_torchscript_usage
from torch.jit._recursive import wrap_cpp_module
from torch.serialization import validate_cuda_device
def save(m, f, _extra_files=None):
r"""
Save an offline version of this module for use in a separate process.
The saved module serializes all of the methods, submodules, parameters, and
attributes of this module. It can be loaded into the C++ API using
``torch::jit::load(filename)`` or into the Python API with
:func:`torch.jit.load <torch.jit.load>`.
To be able to save a module, it must not make any calls to native Python
functions. This means that all submodules must be subclasses of
:class:`ScriptModule` as well.
.. DANGER::
All modules, no matter their device, are always loaded onto the CPU
during loading. This is different from :func:`torch.load`'s semantics
and may change in the future.
Args:
m: A :class:`ScriptModule` to save.
f: A file-like object (has to implement write and flush) or a string
containing a file name.
_extra_files: Map from filename to contents which will be stored as part of `f`.
.. note::
torch.jit.save attempts to preserve the behavior of some operators
across versions. For example, dividing two integer tensors in
PyTorch 1.5 performed floor division, and if the module
containing that code is saved in PyTorch 1.5 and loaded in PyTorch 1.6
its division behavior will be preserved. The same module saved in
PyTorch 1.6 will fail to load in PyTorch 1.5, however, since the
behavior of division changed in 1.6, and 1.5 does not know how to
replicate the 1.6 behavior.
Example:
.. testcode::
import torch
import io
class MyModule(torch.nn.Module):
def forward(self, x):
return x + 10
m = torch.jit.script(MyModule())
# Save to file
torch.jit.save(m, 'scriptmodule.pt')
# This line is equivalent to the previous
m.save("scriptmodule.pt")
# Save to io.BytesIO buffer
buffer = io.BytesIO()
torch.jit.save(m, buffer)
# Save with extra files
extra_files = {'foo.txt': b'bar'}
torch.jit.save(m, 'scriptmodule.pt', _extra_files=extra_files)
"""
log_torchscript_usage("save", model_id=_get_model_id(m))
if _extra_files is None:
_extra_files = {}
if isinstance(f, (str, os.PathLike)):
m.save(f, _extra_files=_extra_files)
else:
ret = m.save_to_buffer(_extra_files=_extra_files)
f.write(ret)
def load(f, map_location=None, _extra_files=None, _restore_shapes=False):
r"""
Load a :class:`ScriptModule` or :class:`ScriptFunction` previously saved with :func:`torch.jit.save <torch.jit.save>`.
All previously saved modules, no matter their device, are first loaded onto CPU,
and then are moved to the devices they were saved from. If this fails (e.g.
because the run time system doesn't have certain devices), an exception is
raised.
Args:
f: a file-like object (has to implement read, readline, tell, and seek),
or a string containing a file name
map_location (string or torch.device): A simplified version of
``map_location`` in `torch.jit.save` used to dynamically remap
storages to an alternative set of devices.
_extra_files (dictionary of filename to content): The extra
filenames given in the map would be loaded and their content
would be stored in the provided map.
_restore_shapes (bool): Whether or not to retrace the module on load using stored inputs
Returns:
A :class:`ScriptModule` object.
.. warning::
It is possible to construct malicious pickle data which will execute arbitrary code
during func:`torch.jit.load`. Never load data that could have come from an untrusted
source, or that could have been tampered with. **Only load data you trust**.
Example:
.. testcode::
import torch
import io
torch.jit.load('scriptmodule.pt')
# Load ScriptModule from io.BytesIO object
with open('scriptmodule.pt', 'rb') as f:
buffer = io.BytesIO(f.read())
# Load all tensors to the original device
torch.jit.load(buffer)
# Load all tensors onto CPU, using a device
buffer.seek(0)
torch.jit.load(buffer, map_location=torch.device('cpu'))
# Load all tensors onto CPU, using a string
buffer.seek(0)
torch.jit.load(buffer, map_location='cpu')
# Load with extra files.
extra_files = {'foo.txt': ''} # values will be replaced with data
torch.jit.load('scriptmodule.pt', _extra_files=extra_files)
print(extra_files['foo.txt'])
.. testoutput::
:hide:
...
.. testcleanup::
import os
os.remove("scriptmodule.pt")
"""
if isinstance(f, (str, os.PathLike)):
if not os.path.exists(f):
raise ValueError(f"The provided filename {f} does not exist")
if os.path.isdir(f):
raise ValueError(f"The provided filename {f} is a directory")
map_location = validate_map_location(map_location)
if _extra_files is None:
_extra_files = {}
cu = torch._C.CompilationUnit()
if isinstance(f, (str, os.PathLike)):
cpp_module = torch._C.import_ir_module(
cu, os.fspath(f), map_location, _extra_files, _restore_shapes
) # type: ignore[call-arg]
else:
cpp_module = torch._C.import_ir_module_from_buffer(
cu, f.read(), map_location, _extra_files, _restore_shapes
) # type: ignore[call-arg]
# TODO: Pretty sure this approach loses ConstSequential status and such
ret = wrap_cpp_module(cpp_module)
log_torchscript_usage("load", model_id=_get_model_id(ret))
return ret
def validate_map_location(map_location=None):
if isinstance(map_location, str):
map_location = torch.device(map_location)
elif not (map_location is None or isinstance(map_location, torch.device)):
raise ValueError(
"map_location should be either None, string or torch.device, "
"but got type: " + str(type(map_location))
)
if str(map_location).startswith("cuda"):
validate_cuda_device(map_location)
return map_location
def jit_module_from_flatbuffer(f):
if isinstance(f, (str, os.PathLike)):
f = os.fspath(f)
return wrap_cpp_module(torch._C._load_jit_module_from_file(f))
else:
return wrap_cpp_module(torch._C._load_jit_module_from_bytes(f.read()))
def save_jit_module_to_flatbuffer(m, f, _extra_files=None):
r"""
Save an offline version of this module for use in a separate process.
The saved module serializes all of the methods, submodules, parameters, and
attributes of this module. It can be loaded into the C++ API using
``torch::jit::load_jit_module_from_file(filename)`` or into the Python API with
:func:`torch.jit.jit_module_from_flatbuffer<torch.jit.jit_module_from_flatbuffer>`.
To be able to save a module, it must not make any calls to native Python
functions. This means that all submodules must be subclasses of
:class:`ScriptModule` as well.
.. DANGER::
All modules, no matter their device, are always loaded onto the CPU
during loading. This is different from :func:`torch.load`'s semantics
and may change in the future.
Args:
m: A :class:`ScriptModule` to save.
f: A string for file path
Example:
.. testcode::
import torch
import io
class MyModule(torch.nn.Module):
def forward(self, x):
return x + 10
m = torch.jit.script(MyModule())
# Save to file
torch.jit.save_jit_module_to_flatbuffer(m, 'scriptmodule.ff')
"""
extra_files = _extra_files
if extra_files is None:
extra_files = {}
if isinstance(f, (str, os.PathLike)):
f = os.fspath(f)
torch._C._save_jit_module(m._c, f, extra_files)
else:
s = torch._C._save_jit_module_to_bytes(m._c, extra_files)
f.write(s)
def get_flatbuffer_module_info(path_or_file):
r"""Get some information regarding a model file in flatbuffer format.
Args:
path_or_file: Either str, Path or file like object (BytesIO OK).
If it's str or Path, we will read the file referenced by that
path as Bytes.
Returns:
A dict with metadata on what that file contains, currently looks like
this:
{
'bytecode_version': 4, # int
'operator_version': 4, # int
'function_names': {
'__torch__.___torch_mangle_0.Foo.forward'}, # set
'type_names': set(), # set
'opname_to_num_args': {'aten::linear': 3} # Dict[str, int]
}
"""
if isinstance(path_or_file, (str, os.PathLike)):
with open(path_or_file, "rb") as f:
all_bytes = f.read()
else:
all_bytes = path_or_file.read()
return torch._C._get_module_info_from_flatbuffer(all_bytes)
```
|
=====================================================================================================================
SOURCE CODE FILE: _shape_functions.py
LINES: 1
SIZE: 45.65 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_shape_functions.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import math
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
number = Union[int, float]
# flake8: noqa
###
# There are generated files that depend on this file
# To re-generate, please run from the root of the repo:
# python torchgen/shape_functions/gen_jit_shape_functions.py
# How to test:
# After regenerating files, compile PyTorch.
# Then run: ./build/bin/test_jit --gtest_filter=TestShapeGraphLinting.Basic
# If you have enabled opinfo testing for the op, also run:
# python test/test_ops_jit.py TestJitCPU.test_variant_consistency_jit_[FAILING_OP]_cpu_float32
# to reproduce errors from opinfo tests.
# Example PR: https://github.com/pytorch/pytorch/pull/80860/files
####
import torch
def broadcast(a: list[int], b: list[int]):
dimsA = len(a)
dimsB = len(b)
ndim = max(dimsA, dimsB)
expandedSizes: list[int] = []
for i in range(ndim):
offset = ndim - 1 - i
dimA = dimsA - 1 - offset
dimB = dimsB - 1 - offset
sizeA = a[dimA] if (dimA >= 0) else 1
sizeB = b[dimB] if (dimB >= 0) else 1
if sizeA != sizeB and sizeA != 1 and sizeB != 1:
# TODO: only assertion error is bound in C++ compilation right now
raise AssertionError(
f"The size of tensor a {sizeA} must match the size of tensor b ({sizeB}) at non-singleton dimension {i}"
)
expandedSizes.append(sizeB if sizeA == 1 else sizeA)
return expandedSizes
def broadcast_three(a: list[int], b: list[int], c: list[int]):
return broadcast(broadcast(a, b), c)
def broadcast_one_three(a: list[int], b: Any, c: list[int]):
return broadcast(a, c)
def adaptive_avg_pool2d(self: list[int], out: list[int]):
assert len(out) == 2
assert len(self) == 3 or len(self) == 4
for i in range(1, len(self)):
assert self[i] != 0
shape: list[int] = []
for i in range(0, len(self) - 2):
shape.append(self[i])
for elem in out:
shape.append(elem)
return shape
def _copy(self: list[int]):
out: list[int] = []
for elem in self:
out.append(elem)
return out
def unary(self: list[int]):
return _copy(self)
def broadcast_inplace(a: list[int], b: list[int]):
dimsA = len(a)
dimsB = len(b)
if dimsB > dimsA:
raise AssertionError(
f"The dims of tensor b ({dimsB}) must be less than or equal tothe dims of tensor a ({dimsA}) "
)
for dimA in range(dimsA):
dimB = dimsB - dimsA + dimA
sizeA = a[dimA]
sizeB = b[dimB] if (dimB >= 0) else 1
if sizeA != sizeB and sizeB != 1:
# TODO: only assertion error is bound in C++ compilation right now
raise AssertionError(
"The size of tensor a {} must match the size of tensor b ("
"{}) at non-singleton dimension {}".format(sizeA, sizeB, dimA)
)
return _copy(a)
def expand(self: list[int], sizes: list[int]):
assert len(sizes) >= len(self)
ndim = len(sizes)
tensor_dim = len(self)
if ndim == 0:
return _copy(sizes)
out: list[int] = []
for i in range(ndim):
offset = ndim - 1 - i
dim = tensor_dim - 1 - offset
size = self[dim] if dim >= 0 else 1
targetSize = sizes[i]
if targetSize == -1:
assert dim >= 0
targetSize = size
if size != targetSize:
assert size == 1
size = targetSize
out.append(size)
return out
def expand_one_unused(self: list[int], sizes: list[int], inp0: Any):
return expand(self, sizes)
def infer_size_impl(shape: list[int], numel: int) -> list[int]:
newsize = 1
infer_dim: Optional[int] = None
for dim in range(len(shape)):
if shape[dim] == -1:
if infer_dim is not None:
raise AssertionError("only one dimension can be inferred")
infer_dim = dim
elif shape[dim] >= 0:
newsize *= shape[dim]
else:
raise AssertionError("invalid shape dimensions")
if not (
numel == newsize
or (infer_dim is not None and newsize > 0 and numel % newsize == 0)
):
raise AssertionError("invalid shape")
out = _copy(shape)
if infer_dim is not None:
out[infer_dim] = numel // newsize
return out
def numel(sizes: list[int]):
numel = 1
for elem in sizes:
numel *= elem
return numel
def view(self: list[int], sizes: list[int]):
return infer_size_impl(sizes, numel(self))
def view_one_unused(self: list[int], sizes: list[int], *, implicit: bool = False):
return view(self, sizes)
def sum_mean_dim(
self: list[int], opt_dims: Optional[list[int]], keep_dim: bool, dt: Any
):
out: list[int] = []
if opt_dims is None or len(opt_dims) == 0:
dims: list[int] = list(range(len(self)))
else:
dims = opt_dims
for idx in range(len(self)):
is_mean_dim: bool = False
for reduce_dim in dims:
if idx == maybe_wrap_dim(reduce_dim, len(self)):
is_mean_dim = True
if is_mean_dim:
if keep_dim:
out.append(1)
else:
out.append(self[idx])
return out
def max_dim(self: list[int], dim: int, keep_dim: bool):
out = sum_mean_dim(self, [dim], keep_dim, None)
return out, out
# note: python already rounds down towards negative infinity on integer division, special arithmetic not needed
def div_rtn(x: int, y: int):
return x // y
def pooling_output_shape_pad_lr(
inputSize: int,
kernelSize: int,
pad_l: int,
pad_r: int,
stride: int,
dilation: int,
ceil_mode: bool,
):
outputSize = (
div_rtn(
inputSize
+ pad_l
+ pad_r
- dilation * (kernelSize - 1)
- 1
+ (stride - 1 if ceil_mode else 0),
stride,
)
+ 1
)
if ceil_mode:
if (outputSize - 1) * stride >= inputSize + pad_l:
outputSize = outputSize - 1
return outputSize
def pooling_output_shape(
inputSize: int,
kernelSize: int,
pad_l: int,
stride: int,
dilation: int,
ceil_mode: bool,
):
assert stride != 0, "stride should not be zeero"
return pooling_output_shape_pad_lr(
inputSize, kernelSize, pad_l, pad_l, stride, dilation, ceil_mode
)
def pool2d_shape_check(
input: list[int],
kH: int,
kW: int,
dH: int,
dW: int,
padH: int,
padW: int,
dilationH: int,
dilationW: int,
nInputPlane: int,
inputHeight: int,
inputWidth: int,
outputHeight: int,
outputWidth: int,
):
ndim = len(input)
assert kW > 0 and kH > 0
assert dW > 0 and dH > 0
assert dilationH > 0 and dilationW > 0
valid_dims = input[1] != 0 and input[2] != 0
assert (
ndim == 3
and input[0] != 0
and valid_dims
or (ndim == 4 and valid_dims and input[3] != 0)
)
assert kW // 2 >= padW and kH // 2 >= padH
assert outputWidth >= 1 and outputHeight >= 1
def max_pool2d(
input: list[int],
kernel_size: list[int],
stride: list[int],
padding: list[int],
dilation: list[int],
ceil_mode: bool,
):
assert (
len(kernel_size) == 1 or len(kernel_size) == 2
), "max_pool2d: kernel_size must either be a single int, or a tuple of two ints"
kH = kernel_size[0]
kW = kH if len(kernel_size) == 1 else kernel_size[1]
assert (
len(stride) == 0 or len(stride) == 1 or len(stride) == 2
), "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints"
dH = kH if len(stride) == 0 else stride[0]
if len(stride) == 0:
dW = kW
elif len(stride) == 1:
dW = dH
else:
dW = stride[1]
assert (
len(padding) == 1 or len(padding) == 2
), "max_pool2d: padding must either be a single int, or a tuple of two ints"
padH = padding[0]
padW = padH if len(padding) == 1 else padding[1]
assert (
len(dilation) == 1 or len(dilation) == 2
), "max_pool2d: dilation must be either a single int, or a tuple of two ints"
dilationH = dilation[0]
dilationW = dilationH if len(dilation) == 1 else dilation[1]
assert len(input) == 3 or len(input) == 4
nbatch = input[-4] if len(input) == 4 else 1
nInputPlane = input[-3]
inputHeight = input[-2]
inputWidth = input[-1]
outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, dilationH, ceil_mode)
outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, dilationW, ceil_mode)
pool2d_shape_check(
input,
kH,
kW,
dH,
dW,
padH,
padW,
dilationH,
dilationW,
nInputPlane,
inputHeight,
inputWidth,
outputHeight,
outputWidth,
)
if len(input) == 3:
return [nInputPlane, outputHeight, outputWidth]
else:
return [nbatch, nInputPlane, outputHeight, outputWidth]
def max_pool2d_with_indices(
input: list[int],
kernel_size: list[int],
stride: list[int],
padding: list[int],
dilation: list[int],
ceil_mode: bool,
):
out = max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)
return (out, out)
def upsample_nearest2d(
input: list[int],
output_size: Optional[list[int]],
scale_factors: Optional[list[float]],
):
out: list[int] = []
out.append(input[0])
out.append(input[1])
if scale_factors is None and output_size is None:
assert 0, "Either output_size or scale_factors must be presented"
if output_size is not None:
assert (
scale_factors is None
), "Must specify exactly one of output_size and scale_factors"
assert len(output_size) == 2
out.append(output_size[0])
out.append(output_size[1])
if scale_factors is not None:
assert (
output_size is None
), "Must specify exactly one of output_size and scale_factors"
assert len(scale_factors) == 2
out.append(int(input[2] * scale_factors[0]))
out.append(int(input[3] * scale_factors[1]))
return out
def mm(self: list[int], mat2: list[int]):
assert len(self) == 2, "self must be a matrix"
assert len(mat2) == 2, "mat2 must be a matrix"
assert self[1] == mat2[0]
return [self[0], mat2[1]]
def dot(self: list[int], tensor: list[int]):
assert len(self) == 1 and len(tensor) == 1
assert self[0] == tensor[0]
out: list[int] = []
return out
def mv(self: list[int], vec: list[int]):
assert len(self) == 2 and len(vec) == 1
assert self[1] == vec[0]
# TODO: return self
return [self[0]]
def unsqueeze(li: list[int], dim: int):
dim = maybe_wrap_dim(dim, len(li) + 1)
out = _copy(li)
out.insert(dim, 1)
return out
def squeeze_nodim(li: list[int]):
out: list[int] = []
for i in range(len(li)):
if li[i] != 1:
out.append(li[i])
return out
def squeeze(li: list[int], dim: int):
out: list[int] = []
wrapped_dim = maybe_wrap_dim(dim, len(li))
for i in range(len(li)):
if i == wrapped_dim:
if li[i] != 1:
out.append(li[i])
else:
out.append(li[i])
return out
def squeeze_dims(li: list[int], dims: list[int]):
if len(dims) == 0:
return li
wrapped_dims = _copy(dims)
for i in range(len(dims)):
wrapped_dims[i] = maybe_wrap_dim(wrapped_dims[i], len(li))
result: list[int] = []
for i in range(len(li)):
if li[i] == 1:
if i not in wrapped_dims:
result.append(li[i])
else:
result.append(li[i])
return result
def index_select(self: list[int], dim: int, index: list[int]):
dim = maybe_wrap_dim(dim, len(self))
numel = multiply_integers(index)
assert len(index) <= 1
assert dim == 0 or dim < len(self)
result_size: list[int] = []
for i in range(len(self)):
if dim == i:
result_size.append(numel)
else:
result_size.append(self[i])
return result_size
def embedding(
weight: list[int],
indices: list[int],
padding_idx: int = -1,
scale_grad_by_freq: bool = False,
sparse: bool = False,
):
assert len(weight) == 2
if len(indices) == 1:
return index_select(weight, 0, indices)
size = _copy(indices)
size.append(weight[1])
return size
def max_int():
return 9223372036854775807
def slice(
self: list[int], dim: int, start: Optional[int], end: Optional[int], step: int
):
ndim = len(self)
assert ndim != 0
dim = maybe_wrap_dim(dim, ndim)
start_val = start if start is not None else 0
end_val = end if end is not None else max_int()
assert step > 0
if start_val == max_int():
start_val = 0
if start_val < 0:
start_val += self[dim]
if end_val < 0:
end_val += self[dim]
if start_val < 0:
start_val = 0
elif start_val > self[dim]:
start_val = self[dim]
if end_val < start_val:
end_val = start_val
elif end_val >= self[dim]:
end_val = self[dim]
slice_len = end_val - start_val
out = _copy(self)
out[dim] = (slice_len + step - 1) // step
return out
def check_cat_no_zero_dim(tensors: list[list[int]]):
for tensor in tensors:
assert len(tensor) > 0
def legacy_cat_wrap_dim(dim: int, tensor_sizes: list[list[int]]):
out_dim: Optional[int] = None
for size in tensor_sizes:
if not (len(size) == 1 and size[0] == 0):
if out_dim is None:
out_dim = maybe_wrap_dim(dim, len(size))
if out_dim is None:
out_dim = dim
return out_dim
def should_skip(tensor: list[int]):
return numel(tensor) == 0 and len(tensor) == 1
def check_cat_shape_except_dim(
first: list[int], second: list[int], dimension: int, index: int
):
first_dims = len(first)
second_dims = len(second)
assert first_dims == second_dims, "Tensors must have same number of dimensions"
for dim in range(0, first_dims):
if dim != dimension:
assert (
first[dim] == second[dim]
), "Sizes of tensors must match except in dimension"
def cat(tensors: list[list[int]], dim: int):
check_cat_no_zero_dim(tensors)
dim = legacy_cat_wrap_dim(dim, tensors)
assert len(tensors) > 0
not_skipped_tensor: Optional[list[int]] = None
for tensor in tensors:
if not should_skip(tensor):
not_skipped_tensor = tensor
if not_skipped_tensor is None:
return [0]
cat_dim_size = 0
for i in range(len(tensors)):
tensor = tensors[i]
if not should_skip(tensor):
check_cat_shape_except_dim(not_skipped_tensor, tensor, dim, i)
cat_dim_size = cat_dim_size + tensor[dim]
result_size = _copy(not_skipped_tensor)
result_size[dim] = cat_dim_size
return result_size
def stack(tensors: list[list[int]], dim: int):
unsqueezed_tensors: list[list[int]] = []
for tensor in tensors:
unsqueezed = unsqueeze(tensor, dim)
unsqueezed_tensors.append(unsqueezed)
return cat(unsqueezed_tensors, dim)
def select(self: list[int], dim: int, index: int):
ndim = len(self)
assert ndim != 0
dim = maybe_wrap_dim(dim, ndim)
size = self[dim]
assert not (index < -size or index >= size)
if index < 0:
index += size
out: list[int] = []
for i in range(ndim):
if i != dim:
out.append(self[i])
return out
def matmul(tensor1: list[int], tensor2: list[int]):
dim_tensor1 = len(tensor1)
dim_tensor2 = len(tensor2)
if dim_tensor1 == 1 and dim_tensor2 == 1:
return dot(tensor1, tensor2)
elif dim_tensor1 == 2 and dim_tensor2 == 1:
return mv(tensor1, tensor2)
elif dim_tensor1 == 1 and dim_tensor2 == 2:
return squeeze(mm(unsqueeze(tensor1, 0), tensor2), 0)
elif dim_tensor1 == 2 and dim_tensor2 == 2:
return mm(tensor1, tensor2)
elif dim_tensor1 >= 1 and dim_tensor2 >= 1:
# We are multiplying b1 x n x m1 by x2 x m2 x p (where b1 can be a list);
# we track m1 vs m2 separately even though they must match for nicer error messages
n = tensor1[-2] if dim_tensor1 > 1 else 1
batch_tensor1: list[int] = []
# TODO: handling of slice
for i in range(dim_tensor1 - 2):
batch_tensor1.append(tensor1[i])
p = tensor2[-1]
batch_tensor2: list[int] = []
# TODO: handling of slice
for i in range(dim_tensor2 - 2):
batch_tensor2.append(tensor2[i])
# expand the batch portion (i.e. cut off matrix dimensions and expand rest)
expand_batch_portion = broadcast(batch_tensor1, batch_tensor2)
# todo: copy ?
output_shape = expand_batch_portion
if dim_tensor1 > 1:
output_shape.append(n)
if dim_tensor2 > 1:
output_shape.append(p)
return output_shape
else:
assert False, "both arguments to matmul need to be at least 1D"
def t(self: list[int]):
assert len(self) <= 2
self_len = len(self)
if self_len == 0:
out: list[int] = []
return out
elif self_len == 1:
return [self[0]]
else:
return [self[1], self[0]]
def transpose(self: list[int], dim0: int, dim1: int):
ndims = len(self)
dim0 = maybe_wrap_dim(dim0, ndims)
dim1 = maybe_wrap_dim(dim1, ndims)
if dim0 == dim1:
return _copy(self)
out: list[int] = []
for i in range(ndims):
if i == dim0:
out.append(self[dim1])
elif i == dim1:
out.append(self[dim0])
else:
out.append(self[i])
return out
def linear(input: list[int], weight: list[int], bias: Optional[list[int]]):
out = matmul(input, t(weight))
if bias is not None:
assert broadcast(bias, out) == out
return out
def addmm(self: list[int], mat1: list[int], mat2: list[int], beta: Any, alpha: Any):
return broadcast(self, mm(mat1, mat2))
def check_non_negative(array: list[int]) -> bool:
# TODO: look into rewriting with early return and getting loop unrolling to fire
non_negative = False
for val in array:
if val < 0:
non_negative = True
return non_negative
def check_shape_forward(
input: list[int],
weight_sizes: list[int],
bias: Optional[list[int]],
stride: list[int],
padding: list[int],
dilation: list[int],
groups: int,
):
k = len(input)
weight_dim = len(weight_sizes)
# TODO: assertions could be expanded with the error messages
assert not check_non_negative(padding)
assert not check_non_negative(stride)
assert weight_dim == k
assert weight_sizes[0] >= groups
assert (weight_sizes[0] % groups) == 0
# only handling not transposed
assert input[1] == weight_sizes[1] * groups
assert bias is None or (len(bias) == 1 and bias[0] == weight_sizes[0])
for i in range(2, k):
assert (input[i] + 2 * padding[i - 2]) >= (
dilation[i - 2] * (weight_sizes[i] - 1) + 1
)
# this is not handling transposed convolution yet
def conv_output_size(
input_size: list[int],
weight_size: list[int],
bias: Optional[list[int]],
stride: list[int],
padding: list[int],
dilation: list[int],
groups: int,
):
check_shape_forward(
input_size, weight_size, bias, stride, padding, dilation, groups
)
has_dilation = len(dilation) > 0
dim = len(input_size)
output_size: list[int] = []
input_batch_size_dim = 0
weight_output_channels_dim = 0
output_size.append(input_size[input_batch_size_dim])
output_size.append(weight_size[weight_output_channels_dim])
for d in range(2, dim):
dilation_ = dilation[d - 2] if has_dilation else 1
kernel = dilation_ * (weight_size[d] - 1) + 1
output_size.append(
(input_size[d] + (2 * padding[d - 2]) - kernel) // stride[d - 2] + 1
)
return output_size
def conv1d(
input: list[int],
weight: list[int],
bias: Optional[list[int]],
stride: list[int],
padding: list[int],
dilation: list[int],
groups: int,
):
assert len(weight) == 3
assert len(input) == 3
return conv_output_size(input, weight, bias, stride, padding, dilation, groups)
def conv2d(
input: list[int],
weight: list[int],
bias: Optional[list[int]],
stride: list[int],
padding: list[int],
dilation: list[int],
groups: int,
):
assert len(weight) == 4
assert len(input) == 4
return conv_output_size(input, weight, bias, stride, padding, dilation, groups)
def conv_backwards(
grad_output: list[int],
input: list[int],
weight: list[int],
biases: Optional[list[int]],
):
# Bias gradient is always generated regardess of if biases is supplied
return _copy(input), _copy(weight), [grad_output[1]]
def conv_transpose2d_input(
input: list[int],
weight: list[int],
bias: Optional[list[int]] = None,
stride: Optional[list[int]] = None,
padding: Optional[list[int]] = None,
output_padding: Optional[list[int]] = None,
groups: int = 1,
dilation: Optional[list[int]] = None,
) -> list[int]:
if stride is None:
stride = [1, 1]
if padding is None:
padding = [0, 0]
if output_padding is None:
output_padding = [0, 0]
if dilation is None:
dilation = [1, 1]
has_dilation = len(dilation) > 0
dim = len(input)
output_size: list[int] = []
input_batch_size_dim = 0
weight_output_channels_dim = 1
output_size.append(input[input_batch_size_dim])
output_size.append(weight[weight_output_channels_dim] * groups)
for d in range(2, dim):
dilation_ = dilation[d - 2] if has_dilation else 1
kernel = dilation_ * (weight[d] - 1)
output_size.append(
(input[d] - 1) * stride[d - 2]
- 2 * padding[d - 2]
+ kernel
+ output_padding[d - 2]
+ 1
)
return output_size
def conv_forwards(
input: list[int],
weight: list[int],
bias: Optional[list[int]],
stride: list[int],
padding: list[int],
dilation: list[int],
transposed: bool,
output_padding: list[int],
groups: int,
) -> list[int]:
has_dilation = len(dilation) > 0
has_output_padding = len(output_padding) > 0
dim = len(input)
output_size: list[int] = []
input_batch_size_dim = 0
weight_output_channels_dim = 1 if transposed else 0
output_size.append(input[input_batch_size_dim])
if transposed:
output_size.append(weight[weight_output_channels_dim] * groups)
else:
output_size.append(weight[weight_output_channels_dim])
for d in range(2, dim):
dilation_ = dilation[d - 2] if has_dilation else 1
output_padding_ = output_padding[d - 2] if has_output_padding else 0
if transposed:
kernel = dilation_ * (weight[d] - 1)
output_size.append(
(input[d] - 1) * stride[d - 2]
- 2 * padding[d - 2]
+ kernel
+ output_padding_
+ 1
)
else:
kernel = dilation_ * (weight[d] - 1) + 1
output_size.append(
(input[d] + (2 * padding[d - 2]) - kernel) // stride[d - 2] + 1
)
return output_size
def _conv_forwards(
input: list[int],
weight: list[int],
bias: Optional[list[int]],
stride: list[int],
padding: list[int],
dilation: list[int],
transposed: bool,
output_padding: list[int],
groups: int,
benchmark: bool,
deterministic: bool,
cudnn_enabled: bool,
allow_tf32: bool,
) -> list[int]:
return conv_forwards(
input,
weight,
bias,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
)
def batch_norm(
input: list[int],
weight: Optional[list[int]],
bias: Optional[list[int]],
running_mean: Optional[list[int]],
running_var: Optional[list[int]],
training: bool,
momentum: float,
eps: float,
cudnn_enabled: bool,
):
out: list[int] = []
for elem in input:
out.append(elem)
return out
def conv3d(
input: list[int],
weight: list[int],
bias: Optional[list[int]],
stride: list[int],
padding: list[int],
dilation: list[int],
groups: int,
):
assert len(weight) == 5
assert len(input) == 5
return conv_output_size(input, weight, bias, stride, padding, dilation, groups)
def maybe_wrap_dim(dim: int, dim_post_expr: int, wrap_scalar: bool = True):
if dim_post_expr <= 0:
assert wrap_scalar
dim_post_expr = 1
min = -dim_post_expr
max = dim_post_expr - 1
assert not (dim < min or dim > max)
if dim < 0:
dim += dim_post_expr
return dim
def zero_dim_tensor(input: Any):
out: list[int] = []
return out
def multiply_integers(li: list[int]):
out = 1
for elem in li:
out = out * elem
return out
def arange_end(end: number, inp0: Any, inp1: Any, inp2: Any, inp3: Any):
assert end >= 0
return [int(math.ceil(end))]
def arange_start(
start: number, end: number, inp0: Any, inp1: Any, inp2: Any, inp3: Any
):
assert end >= 0
assert end >= start
return [int(math.ceil(end - start))]
def arange_start_step(
start: number, end: number, step: number, inp0: Any, inp1: Any, inp2: Any, inp3: Any
):
assert step != 0
if step < 0:
assert start >= end
else:
assert end >= start
return [int(math.ceil((end - start) / step))]
def permute(input: list[int], dims: list[int]):
assert len(input) == len(dims)
ndim = len(dims)
seen_dims: list[int] = []
newSizes: list[int] = []
for i in range(ndim):
dim = maybe_wrap_dim(dims[i], ndim)
seen_dims.append(dim)
newSizes.append(input[dim])
for i in range(1, ndim):
for j in range(i):
assert seen_dims[i] != seen_dims[j]
return newSizes
def movedim(self: list[int], source: list[int], destination: list[int]) -> list[int]:
self_dim = len(self)
if self_dim <= 1:
return self
normalized_src: list[int] = []
normalized_dst: list[int] = []
for i in range(len(source)):
normalized_src.append(maybe_wrap_dim(source[i], self_dim))
normalized_dst.append(maybe_wrap_dim(destination[i], self_dim))
order = [-1 for i in range(self_dim)]
src_dims = [i for i in range(self_dim)]
dst_dims = [i for i in range(self_dim)]
for i in range(len(source)):
order[normalized_dst[i]] = normalized_src[i]
src_dims[normalized_src[i]] = -1
dst_dims[normalized_dst[i]] = -1
source_dims: list[int] = []
destination_dims: list[int] = []
for ele in src_dims:
if ele != -1:
source_dims.append(ele)
for ele in dst_dims:
if ele != -1:
destination_dims.append(ele)
rest_dim = self_dim - len(source)
for i in range(rest_dim):
order[destination_dims[i]] = source_dims[i]
return permute(self, order)
def flatten(input: list[int], start_dim: int, end_dim: int):
start_dim = maybe_wrap_dim(start_dim, len(input))
end_dim = maybe_wrap_dim(end_dim, len(input))
assert start_dim <= end_dim
if len(input) == 0:
return [1]
if start_dim == end_dim:
# TODO: return self
out: list[int] = []
for elem in input:
out.append(elem)
return out
slice_numel = 1
for i in range(start_dim, end_dim + 1):
slice_numel *= input[i]
# TODO: use slicing when slice optimization has landed
# slice_numel = multiply_integers(input[start_dim:end_dim - start_dim + 1])
shape: list[int] = []
for i in range(start_dim):
shape.append(input[i])
shape.append(slice_numel)
for i in range(end_dim + 1, len(input)):
shape.append(input[i])
return shape
def nonzero_lower_bound(input: list[int]):
return [0, len(input)]
def nonzero_upper_bound(input: list[int]):
return [numel(input), len(input)]
def _reduce_along_dim(self: list[int], dim: int, keepdim: bool):
dim = maybe_wrap_dim(dim, len(self))
out: list[int] = []
for i, self_dim in enumerate(self):
if i == dim:
if keepdim:
out.append(1)
else:
out.append(self_dim)
return out
def argmax(
self: list[int], dim: Optional[int] = None, keepdim: bool = False
) -> list[int]:
if dim is None:
return []
return _reduce_along_dim(self, dim, keepdim)
def bmm(self: list[int], mat2: list[int]) -> list[int]:
assert len(self) == 3, "bmm only supports 3D tensors"
assert len(mat2) == 3, "bmm only supports 3D tensors"
assert self[0] == mat2[0], "mismatching batch dimension"
assert self[2] == mat2[1], "mismatching contracting dimension"
return [self[0], self[1], mat2[2]]
def _shape_as_tensor(self: list[int]) -> list[int]:
return [len(self)]
def topk(self: list[int], k: int, dim: int = -1) -> tuple[list[int], list[int]]:
if len(self) == 0:
result: list[int] = []
else:
assert (
k <= self[dim]
), f"k ({k}) is too big for dimension {dim} of size {self[dim]}"
result = _copy(self)
result[dim] = k
return result, result
def nll_loss_forward(
self: list[int], target: list[int], weight: Optional[list[int]], reduction: int
) -> tuple[list[int], list[int]]:
# This is taken shamelessly from the meta function in LossNLL.cpp
self_dim = len(self)
target_dim = len(target)
assert 0 < self_dim <= 2
assert target_dim <= 1
no_batch_dim = self_dim == 1 and target_dim == 0
assert no_batch_dim or (self[0] == target[0])
n_classes = self[-1]
scalar_shape: list[int] = []
assert weight is None or (len(weight) == 1 and weight[0] == n_classes)
if reduction == 0 and self_dim == 2:
reduction_shape = [self[0]]
else:
reduction_shape = scalar_shape
return reduction_shape, scalar_shape
def native_layer_norm(
input: list[int], normalized_shape: list[int]
) -> tuple[list[int], list[int], list[int]]:
reduction_shape: list[int] = []
num_unreduced_dimensions = len(input) - len(normalized_shape)
assert num_unreduced_dimensions >= 0
for i in range(num_unreduced_dimensions):
reduction_shape.append(input[i])
for i in range(num_unreduced_dimensions, len(input)):
reduction_shape.append(1)
return _copy(input), reduction_shape, reduction_shape
def native_batch_norm(
input: list[int],
weight: Optional[list[int]],
bias: Optional[list[int]],
running_mean: Optional[list[int]],
running_var: Optional[list[int]],
training: bool,
) -> tuple[list[int], list[int], list[int]]:
if training:
_size = [input[1]]
else:
_size = [0]
return _copy(input), _size, _size
def _batch_norm_with_update(
input: list[int],
weight: Optional[list[int]],
bias: Optional[list[int]],
running_mean: Optional[list[int]],
running_var: Optional[list[int]],
) -> tuple[list[int], list[int], list[int], list[int]]:
_size = [input[1]]
return _copy(input), _size, _size, [0]
def cross_entropy_loss(
self: list[int],
target: list[int],
weight: Optional[list[int]] = None,
reduction: int = 1,
ignore_index: int = -100,
label_smoothing: float = 0.0,
) -> list[int]:
result_shape = nll_loss_forward(self, target, weight, reduction)[0]
return result_shape
"""
Currently deferring the enabling of this, as part of the propoasal to suspend
adding ops.
There are currently cases in the test case where this is being called
in the SSA opinfo tests with with unexpected values (eg list of two ints, see the first
opinfo test). The behavoir of index is significantly dependent on the inputs.
This could be an error with how we are matching up shape functions, or that this
function needs to just implement everything.
def index_Tensor(self: List[int], indices: List[Optional[List[int]]]) -> List[int]:
assert len(indices) <= len(self), "More indices than dimensions to index"
broadcasted_shape: List[int] = []
for index_tensor_shape in indices:
if index_tensor_shape is not None:
broadcasted_shape = broadcast(broadcasted_shape, index_tensor_shape)
return broadcasted_shape
"""
ScriptFn = torch._C.ScriptFunction
shape_compute_graph_mapping: dict[str, ScriptFn] = {}
bounded_compute_graph_mapping: dict[str, tuple[ScriptFn, ScriptFn]] = {}
script_func_map: dict[Callable, ScriptFn] = {}
def process_func(func: Callable):
if func not in script_func_map:
scripted_func = torch.jit.script(func)
torch._C._jit_pass_inline(scripted_func.graph)
for _ in range(2):
torch._C._jit_pass_peephole(scripted_func.graph)
torch._C._jit_pass_constant_propagation(scripted_func.graph)
script_func_map[func] = scripted_func
return script_func_map[func]
def add_shape_compute_mapping(operator_schema: str, func: Callable):
global shape_compute_graph_mapping
shape_compute_graph_mapping[operator_schema] = process_func(func)
def add_bounded_compute_mapping(
operator_schema: str, lower_bound_func: Callable, upper_bound_func: Callable
):
# Adds a shape compute function for both upper and lower bounds
fns = (process_func(lower_bound_func), process_func(upper_bound_func))
bounded_compute_graph_mapping[operator_schema] = fns
add_shape_compute_mapping(
"aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)",
unary,
)
add_shape_compute_mapping(
"aten::rsub.Tensor(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", unary
)
add_shape_compute_mapping(
"aten::dropout(Tensor input, float p, bool train) -> Tensor", unary
)
add_shape_compute_mapping(
"aten::adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor",
adaptive_avg_pool2d,
)
add_shape_compute_mapping(
"prim::NumToTensor.Scalar(Scalar a) -> Tensor", zero_dim_tensor
)
add_shape_compute_mapping("prim::NumToTensor.bool(bool a) -> Tensor", zero_dim_tensor)
add_shape_compute_mapping(
"aten::zeros(int[] size, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None) -> (Tensor)",
unary,
)
add_shape_compute_mapping(
"aten::to.dtype(Tensor(a) self, int dtype, bool non_blocking=False, bool copy=False, int? memory_format=None) -> (Tensor(a))",
unary,
)
add_shape_compute_mapping(
"aten::arange(Scalar end, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None) -> (Tensor)",
arange_end,
)
add_shape_compute_mapping(
"aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor",
arange_start,
)
add_shape_compute_mapping(
"aten::arange.start_step(Scalar start, Scalar end, Scalar step, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor",
arange_start_step,
)
add_shape_compute_mapping("aten::squeeze(Tensor(a) self) -> Tensor(a)", squeeze_nodim)
add_shape_compute_mapping(
"aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)", squeeze
)
add_shape_compute_mapping(
"aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)", squeeze_dims
)
add_shape_compute_mapping(
"aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)", unsqueeze
)
add_shape_compute_mapping(
"aten::slice.Tensor(Tensor(a) self, int dim=0, int? start=None, int? end=None, int step=1) -> Tensor(a)",
slice,
)
add_shape_compute_mapping(
"aten::select.int(Tensor(a) self, int dim, int index) -> Tensor(a)", select
)
add_shape_compute_mapping(
"aten::index_select(Tensor self, int dim, Tensor index) -> Tensor", index_select
)
add_shape_compute_mapping(
"aten::layer_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, "
"float eps=1e-05, bool cudnn_enable=True) -> Tensor",
unary,
)
add_shape_compute_mapping(
"aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", unary
)
add_shape_compute_mapping(
"aten::_no_grad_embedding_renorm_(Tensor weight, Tensor input, float max_norm, float norm_type) -> Tensor",
unary,
)
add_shape_compute_mapping(
"aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)",
unary,
)
add_shape_compute_mapping(
"aten::embedding(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor",
embedding,
)
add_shape_compute_mapping("aten::mm(Tensor self, Tensor mat2) -> Tensor", mm)
add_shape_compute_mapping("aten::dot(Tensor self, Tensor tensor) -> Tensor", dot)
add_shape_compute_mapping("aten::mv(Tensor self, Tensor vec) -> Tensor", mv)
add_shape_compute_mapping("aten::matmul(Tensor self, Tensor other) -> Tensor", matmul)
add_shape_compute_mapping(
"aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor", linear
)
add_shape_compute_mapping(
"aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor",
max_pool2d,
)
add_shape_compute_mapping(
"aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)",
max_pool2d_with_indices,
)
add_shape_compute_mapping("aten::t(Tensor(a) self) -> Tensor(a)", t)
add_shape_compute_mapping(
"aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)", transpose
)
add_shape_compute_mapping(
"aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor",
conv1d,
)
add_shape_compute_mapping(
"aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor",
conv2d,
)
add_shape_compute_mapping(
"aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor",
batch_norm,
)
add_shape_compute_mapping(
"aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor",
conv3d,
)
add_shape_compute_mapping(
"aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, int[]? bias_sizes, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)",
conv_backwards,
)
add_shape_compute_mapping(
"aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor",
conv_forwards,
)
add_shape_compute_mapping(
"aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor",
_conv_forwards,
)
add_shape_compute_mapping(
"aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor",
conv_transpose2d_input,
)
add_shape_compute_mapping(
"aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)",
flatten,
)
add_shape_compute_mapping("aten::cat(Tensor[] tensors, int dim=0) -> Tensor", cat)
add_shape_compute_mapping("aten::stack(Tensor[] tensors, int dim=0) -> Tensor", stack)
add_shape_compute_mapping(
"aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)", permute
)
add_shape_compute_mapping(
"aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)",
movedim,
)
add_shape_compute_mapping("aten::view(Tensor(a) self, int[] size) -> Tensor(a)", view)
add_shape_compute_mapping(
"aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a)", expand
)
add_shape_compute_mapping(
"aten::expand(Tensor(a) self, int[] size, *, bool implicit=False) -> Tensor(a)",
expand_one_unused,
)
add_shape_compute_mapping(
"aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor",
sum_mean_dim,
)
add_shape_compute_mapping(
"aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor",
sum_mean_dim,
)
add_shape_compute_mapping(
"aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)",
max_dim,
)
add_shape_compute_mapping(
"aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor", zero_dim_tensor
)
add_shape_compute_mapping(
"aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor", zero_dim_tensor
)
add_shape_compute_mapping(
"aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor",
addmm,
)
add_shape_compute_mapping(
"aten::upsample_nearest2d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> (Tensor)",
upsample_nearest2d,
)
add_shape_compute_mapping(
"aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor",
unary,
)
add_shape_compute_mapping(
"aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor",
unary,
)
add_shape_compute_mapping("aten::dequantize(Tensor self) -> Tensor", unary)
add_shape_compute_mapping(
"quantized::add(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc",
broadcast,
)
add_shape_compute_mapping(
"aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor", argmax
)
add_shape_compute_mapping("aten::bmm(Tensor self, Tensor mat2) -> Tensor", bmm)
add_shape_compute_mapping(
"aten::_shape_as_tensor(Tensor self) -> Tensor", _shape_as_tensor
)
add_shape_compute_mapping(
"aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)",
topk,
)
add_shape_compute_mapping(
"aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight)",
nll_loss_forward,
)
add_shape_compute_mapping(
"aten::native_layer_norm(Tensor input, int[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)",
native_layer_norm,
)
add_shape_compute_mapping(
"aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)",
native_batch_norm,
)
add_shape_compute_mapping(
"aten::_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)",
native_batch_norm,
)
add_shape_compute_mapping(
"aten::_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)",
native_batch_norm,
)
add_shape_compute_mapping(
"_batch_norm_with_update(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor)",
_batch_norm_with_update,
)
add_shape_compute_mapping(
"aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor",
cross_entropy_loss,
)
# add_shape_compute_mapping("aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor", index_Tensor)
# TODO: migrate over all of symbolic_shape_registry_util.cpp
# These are duplicated here so that the functions will be serialiazed
add_shape_compute_mapping(
"aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor",
broadcast_three,
)
add_shape_compute_mapping(
"aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor",
broadcast_one_three,
)
add_shape_compute_mapping(
"aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)",
broadcast_inplace,
)
# quantized_conv_prepack TODO
# Shape Compute Fn with upper and lower bounds
add_bounded_compute_mapping(
"aten::nonzero(Tensor self) -> (Tensor)", nonzero_lower_bound, nonzero_upper_bound
)
```
|
===========================================================================================================
SOURCE CODE FILE: _state.py
LINES: 1
SIZE: 3.80 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_state.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""JIT-related state.
This module stores various pieces of Python-global state relating to the JIT.
This is not intended to be imported directly; please the exposed
functionalities in `torch.jit`.
"""
import os
import weakref
from typing import Any
import torch
class EnabledProxy:
"""Stores whether the JIT is enabled or not.
This is just a wrapper for a bool, so that we get reference semantics
"""
def __init__(self) -> None:
self.enabled = self.parse_env(
"PYTORCH_JIT", True, "> Using PyTorch JIT", "> PyTorch JIT DISABLED"
)
def parse_env(self, name, default, true_message, false_message):
value = os.environ.get(name)
if value is None:
return default
if value.lower() in {"1", "true", "yes"}:
return True
elif value.lower() in {"0", "false", "no"}:
return False
if value == "1v":
print(true_message)
return True
elif value == "0v":
print(false_message)
return False
raise ValueError(f"Unknown setting of {name}. Try using 0 or 1.")
def __bool__(self):
return self.enabled
_enabled = EnabledProxy()
def disable():
_enabled.enabled = False
def enable():
_enabled.enabled = True
# The Python CompilationUnit. All functions and modules defined in Python will
# live in here. It's defined in Python because doing in cpp creates static
# destruction order issues.
_python_cu = torch._C.CompilationUnit()
# python class => ScriptClass mapping
_script_classes: dict[type[Any], type[Any]] = {}
_name_to_pyclass: dict[str, type[Any]] = {}
def _add_script_class(python_class, script_class):
_script_classes[python_class] = script_class
_name_to_pyclass[script_class.qualified_name()] = python_class
def _get_script_class(python_class):
override = getattr(python_class, "_jit_override_qualname", None)
if override is not None:
python_class = _get_python_class(override)
return _script_classes.get(python_class, None)
def _get_python_class(qualified_name):
return _name_to_pyclass.get(qualified_name, None)
def _clear_class_state():
_script_classes.clear()
_name_to_pyclass.clear()
# Caching: we currently cache compilation of free functions and overloaded functions.
# To cache free functions we hold a weak ref to the function object and
# map to the compiled fn's qualified name.
# To cache overloaded functions we hold a weak ref to the function obj and
# map to all of its overloaded compiled fns.
# In the future we could consider caching more types of objects so that
# aliasing is preserved across separate compilations of the same object.
_jit_caching_layer: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary()
_jit_function_overload_caching: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary()
def _try_get_jit_cached_overloads(key):
qual_names = _jit_function_overload_caching.get(key, None)
if qual_names:
return [_python_cu.find_function(qual_name) for qual_name in qual_names]
else:
return None
def _set_jit_overload_cache(key, compiled_fns):
_jit_function_overload_caching[key] = [fn.qualified_name for fn in compiled_fns]
def _try_get_jit_cached_function(key):
if getattr(key, "__disable_jit_function_caching__", False) is True:
return None
qual_name = _jit_caching_layer.get(key, None)
if qual_name:
return _python_cu.find_function(qual_name)
else:
return None
def _set_jit_function_cache(key, value):
# only free functions currently supported
assert isinstance(value, torch.jit.ScriptFunction)
_jit_caching_layer[key] = value.qualified_name
```
|
===========================================================================================================
SOURCE CODE FILE: _trace.py
LINES: 28
SIZE: 58.44 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\_trace.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""Tracing.
This module contains functionality to support the JIT's tracing frontend, notably:
* torch.jit.trace
* torch.jit.trace_module
This is not intended to be imported directly; please use the exposed
functionalities in `torch.jit`.
"""
import contextlib
import copy
import functools
import inspect
import os
import re
import warnings
from enum import Enum
from typing import Any, Callable, Optional, TypeVar
from typing_extensions import ParamSpec
import torch
from torch._jit_internal import (
_get_model_id,
_qualified_name,
get_callable_argument_names,
is_scripting,
)
from torch.autograd import function
from torch.jit._script import _CachedForward, script, ScriptModule
from torch.jit._state import _enabled, _python_cu
from torch.nn import Module
from torch.testing._comparison import default_tolerances
_flatten = torch._C._jit_flatten
_unflatten = torch._C._jit_unflatten
R = TypeVar("R", covariant=True) # return type (always covariant)
P = ParamSpec("P")
def _create_interpreter_name_lookup_fn(frames_up=1):
def _get_interpreter_name_for_var(var):
frame = inspect.currentframe()
if not frame:
raise RuntimeError("failed to inspect frame")
i = 0
while i < frames_up + 1:
frame = frame.f_back
if not frame:
raise RuntimeError("failed to get frame")
i += 1
f_locals = frame.f_locals
for k, v in f_locals.items():
if isinstance(v, torch.Tensor) and var is v:
return k if k != "self" else ""
return ""
return _get_interpreter_name_for_var
def _unique_state_dict(module, keep_vars=False):
# since Parameter.detach() always creates a new torch.Tensor instance,
# id(v) doesn't work with it. So we always get the Parameter or Buffer
# as values, and deduplicate the params using Parameters and Buffers
state_dict = module.state_dict(keep_vars=True)
filtered_dict = type(state_dict)()
seen_ids: set[int] = set()
for k, v in state_dict.items():
if id(v) in seen_ids:
continue
seen_ids.add(id(v))
if keep_vars:
filtered_dict[k] = v
else:
filtered_dict[k] = v.detach()
return filtered_dict
class ONNXTracedModule(torch.nn.Module):
def __init__(
self,
inner,
strict=True,
force_outplace=False,
return_inputs=False,
return_inputs_states=False,
):
super().__init__()
# inner may be a Module, or it may be an arbitrary callable
# If it's a Module, we get its parameters automatically, which lets
# us avoid a special casing functions versus modules.
self.inner = inner
self.strict = strict
self._force_outplace = force_outplace
self._return_inputs = return_inputs
self._return_inputs_states = return_inputs_states
def forward(self, *args: torch.Tensor):
in_vars, in_desc = _flatten(args)
# NOTE: use full state, because we need it for BatchNorm export
# This differs from the compiler path, which doesn't support it at the moment.
module_state = list(_unique_state_dict(self, keep_vars=True).values())
ret_inputs = []
inputs_states = []
outs = []
def wrapper(*args):
in_args: list[torch.Tensor] = []
for i in range(len(in_vars)):
if not isinstance(args[i], torch.Tensor):
raise RuntimeError("Expected Tensor argument")
in_args.append(args[i])
trace_inputs = _unflatten(in_args, in_desc)
if self._return_inputs:
ret_inputs.append(
tuple(x.clone(memory_format=torch.preserve_format) for x in args)
)
if self._return_inputs_states:
inputs_states.append(_unflatten(in_args, in_desc))
outs.append(self.inner(*trace_inputs))
if self._return_inputs_states:
inputs_states[0] = (inputs_states[0], trace_inputs)
out_vars, _ = _flatten(outs)
if len(out_vars) == 1:
return out_vars[0]
else:
return tuple(out_vars)
graph, _out = torch._C._create_graph_by_tracing(
wrapper,
in_vars + module_state,
_create_interpreter_name_lookup_fn(),
self.strict,
self._force_outplace,
)
if self._return_inputs:
return graph, outs[0], ret_inputs[0]
if self._return_inputs_states:
return graph, outs[0], inputs_states[0]
else:
return graph, outs[0]
def _clone_inputs(args):
def clone_input(a):
if a is None:
return None
elif isinstance(a, torch.Tensor):
# TODO: figure out one liner to .clone() and set requires_grad
v = (
a.detach()
.clone(memory_format=None if a.is_mkldnn else torch.preserve_format)
.requires_grad_(a.requires_grad)
)
if a.grad is not None:
v.grad = clone_input(v.grad)
return v
else:
return a.clone(memory_format=torch.preserve_format)
return function._nested_map(
lambda x: isinstance(x, torch.Tensor), clone_input, condition_msg="tensors"
)(args)
# This is purely for developer debugging. We are not going to advertise it.
_JIT_TIME = os.environ.get("PYTORCH_JIT_TIME", False) # CUDA-only timing
_JIT_DISABLE = os.environ.get("PYTORCH_JIT_DISABLE", False)
_JIT_STATS = os.environ.get("PYTORCH_JIT_STATS", False)
@contextlib.contextmanager
def _time(trace_name, name, time=True):
if (not _JIT_TIME and not time) or not torch.cuda.is_available():
yield
return
stream = torch.cuda.current_stream()
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
stream.record_event(start)
try:
yield
finally:
stream.record_event(end)
end.synchronize()
print(f"{trace_name} {name} time: {start.elapsed_time(end)} ms")
def verify(model, args, loss_fn=torch.sum, devices=None):
"""
Verify that a JIT compiled model has the same behavior as its uncompiled version along with its backwards pass.
If your model returns multiple outputs,
you must also specify a `loss_fn` to produce a loss for which
the backwards will be computed.
This function has side-effects (e.g., it executes your model / saves and loads
parameters), so don't expect the model to come out exactly the same as what
you passed in.
Args:
model (compiled torch.nn.Module or function): the module/function to be
verified. The module/function definition MUST have been decorated with
`@torch.jit.compile`.
args (tuple or Tensor): the positional arguments to pass to the
compiled function/module to be verified. A non-tuple is assumed to
be a single positional argument to be passed to the model.
loss_fn (function, optional): the loss function to be applied to
the output of the model, before backwards is invoked. By default,
we assume that a model returns a single result, and we :func:`torch.sum`
before calling backwards; if this is inappropriate, you can pass your
own loss function. Note that if a model returns a tuple of results,
these are passed as separate positional arguments to `loss_fn`.
devices (iterable of device IDs, optional): the GPU devices which the
compiled module will be run on. This determines the RNG state we
must save when running both compiled and uncompiled versions of the model.
"""
# TODO: In principle, we track device information in our trace, so it
# should be possible to check if our execution actually obeyed the 'devices'
# the user provided.
# TODO: Consider adding a utility function to torch.jit to test
# for this case
if not isinstance(model, torch._C.CompiledFunction): # type: ignore[attr-defined]
raise TypeError(
"Cannot verify an uncompiled module. Add @torch.jit.compile to compile it"
)
is_module = isinstance(model, Module)
if not isinstance(args, tuple):
args = (args,)
if is_module:
saved_state = copy.deepcopy(model.state_dict())
def run_fwd_bwd(args, force_trace=False, assert_compiled=False):
params = list(model.parameters()) if is_module else []
in_vars, _ = _flatten((args, params))
# We use a special API to reset the trace and compile it from scratch.
compiled_fn = model
if force_trace:
compiled_fn.clear_cache()
if assert_compiled:
hits = compiled_fn.hits
out = model(*args)
if assert_compiled and compiled_fn.hits == hits: # type: ignore[possibly-undefined]
raise RuntimeError("failed to use the compiled function")
if not isinstance(out, tuple):
out = (out,)
if loss_fn == torch.sum and len(out) != 1:
raise ValueError(
f"Model returns {len(out)} outputs, but default loss function "
"(torch.sum) can only handle a single output"
)
out_vars, _ = _flatten(out)
saved_outs = [
v.detach().clone(memory_format=torch.preserve_format) for v in out_vars
]
loss = loss_fn(*out)
grads = torch.autograd.grad([loss], in_vars)
# TODO: I'm not sure if the clone here is necessary but it is safer
saved_grads = [
v.detach().clone(memory_format=torch.preserve_format) for v in grads
]
return (saved_outs, saved_grads)
with torch.random.fork_rng(devices, _caller="torch.jit.verify"):
uncompiled_outs, uncompiled_grads = run_fwd_bwd(args, force_trace=True)
assert model.has_trace_for(*args)
if is_module:
model.load_state_dict(saved_state) # type: ignore[possibly-undefined]
compiled_outs, compiled_grads = run_fwd_bwd(args, assert_compiled=True)
_verify_equal(uncompiled_outs, compiled_outs)
_verify_equal(uncompiled_grads, compiled_grads)
def _verify_equal(xs, ys):
for x, y in zip(xs, ys):
if x.sub(y).abs().max() > 1e-6:
raise RuntimeError("JIT and real computation mismatch")
def indent(s):
return "\n".join(["\t" + line for line in s.splitlines()])
class TracingCheckError(Exception):
def __init__(self, graph_diff_error, tensor_compare_error, extra_msg=None):
self.message = "Tracing failed sanity checks!\n"
if extra_msg is not None:
self.message += extra_msg + "\n"
if graph_diff_error is not None:
self.message += "ERROR: Graphs differed across invocations!\n"
self.message += indent(graph_diff_error) + "\n"
if tensor_compare_error is not None:
self.message += (
"ERROR: Tensor-valued Constant nodes differed in value "
"across invocations. This often indicates that the tracer has"
" encountered untraceable code.\n"
)
self.message += indent(tensor_compare_error) + "\n"
super().__init__(self.message)
# Check the traced module against a set of user-provided validation inputs
@torch.no_grad()
def _check_trace(
check_inputs,
func,
traced_func,
check_tolerance,
strict,
force_outplace,
is_trace_module,
_module_class,
example_inputs_is_kwarg=False,
):
# Note: tracing is independent of optimizations, which consume the trace
for inputs in check_inputs:
if isinstance(inputs, torch.Tensor):
inputs = (inputs,)
if is_trace_module:
copied_dict = {}
for name, data in inputs.items():
copied_dict[name] = _clone_inputs(data)
check_mod = torch.jit.trace_module(
getattr(func, "__self__", func),
copied_dict,
check_trace=False,
strict=strict,
_force_outplace=force_outplace,
_module_class=_module_class,
_compilation_unit=torch._C.CompilationUnit(),
example_inputs_is_kwarg=example_inputs_is_kwarg,
_store_inputs=False,
)
check_mod_func = check_mod._c._get_method(traced_func.name)
inputs = inputs[traced_func.name]
if (
isinstance(inputs, (torch.Tensor))
or isinstance(inputs, dict)
and not example_inputs_is_kwarg
):
inputs = (inputs,)
else:
if example_inputs_is_kwarg:
check_mod = torch.jit.trace(
func,
check_trace=False,
strict=strict,
_force_outplace=force_outplace,
_module_class=_module_class,
example_kwarg_inputs=_clone_inputs(inputs),
_store_inputs=False,
)
else:
check_mod = torch.jit.trace(
func,
_clone_inputs(inputs),
check_trace=False,
strict=strict,
_force_outplace=force_outplace,
_module_class=_module_class,
_store_inputs=False,
)
check_mod_func = check_mod
def graph_diagnostic_info():
mod_canonicalized = torch._C._jit_pass_canonicalize(traced_func.graph)
torch._C._jit_pass_inline(mod_canonicalized)
torch._C._jit_pass_erase_shape_information(mod_canonicalized)
mod_str = str(mod_canonicalized)
mod_str = re.sub(r"___torch_mangle_[0-9]+\.", "", mod_str)
check_canonicalized = torch._C._jit_pass_canonicalize(check_mod_func.graph)
torch._C._jit_pass_inline(check_canonicalized)
torch._C._jit_pass_erase_shape_information(check_canonicalized)
check_str = str(check_canonicalized)
check_str = re.sub(r"___torch_mangle_[0-9]+\.", "", check_str)
graph_diff_errors = None
if mod_str != check_str:
import difflib
graph_diff = difflib.ndiff(
mod_str.splitlines(True), check_str.splitlines(True)
)
graph_diff_errors = "Graph diff:\n" + indent("".join(graph_diff)) + "\n"
for n_mod, n_check in zip(
mod_canonicalized.nodes(), check_canonicalized.nodes()
):
if str(n_mod) != str(n_check):
graph_diff_errors += "First diverging operator:\n"
node_diff = difflib.ndiff(
str(n_mod).splitlines(True), str(n_check).splitlines(True)
)
source_printout = (
"Node diff:\n" + indent("".join(node_diff)) + "\n"
)
mod_stack = n_mod.sourceRange()
if mod_stack:
source_printout += (
"Trace source location:\n" + indent(mod_stack) + "\n"
)
check_stack = n_check.sourceRange()
if check_stack:
source_printout += (
"Check source location:\n" + indent(check_stack) + "\n"
)
graph_diff_errors += source_printout
break # For now, only print out the first pair of nodes that diverges
tensor_compare_errors = None
# Check Tensor-valued constant nodes
for n_mod, n_check in zip(
mod_canonicalized.nodes(), check_canonicalized.nodes()
):
if n_mod.kind() != n_check.kind():
break # Graphs have already diverged
if n_mod.kind() == "prim::Constant" and not (
n_mod.mustBeNone() or n_check.mustBeNone()
):
if not n_mod.hasAttribute("value"):
continue
if n_mod.kindOf("value") != "t" or n_check.kindOf("value") != "t":
continue
mod_tensor_val = n_mod.t("value")
check_tensor_val = n_check.t("value")
try:
torch.testing.assert_close(
mod_tensor_val, check_tensor_val, equal_nan=True
)
except (RuntimeError, AssertionError) as e:
if tensor_compare_errors is None:
tensor_compare_errors = ""
tensor_compare_errors += "Node:\n" + indent(str(n_mod)) + "\n"
compare_stack = n_mod.sourceRange()
if compare_stack:
tensor_compare_errors += (
"Source Location:\n" + indent(compare_stack) + "\n"
)
tensor_compare_errors += "Comparison exception: " + indent(
str(e)
)
break # For now, only print the first diverging pair
return graph_diff_errors, tensor_compare_errors
def wrap_retval(x):
return x if isinstance(x, tuple) else (x,)
def run_mod_and_filter_tensor_outputs(mod, inputs, running_what):
try:
if isinstance(inputs, dict) and example_inputs_is_kwarg:
outs = wrap_retval(mod(**inputs))
else:
outs = wrap_retval(mod(*_clone_inputs(inputs)))
outs = [out for out in outs if isinstance(out, torch.Tensor)]
return outs
except Exception as e:
graph_diff_errors, tensor_compare_errors = graph_diagnostic_info()
msg = f"encountered an exception while running the {running_what} with test inputs.\nException:\n{indent(str(e))}"
raise TracingCheckError(
graph_diff_errors,
tensor_compare_errors,
extra_msg=msg,
) from e
has_warned = [False]
def maybe_warn_nondeterministic():
if has_warned[0]:
return
has_warned[0] = True
nondeterm_ops = [
op for op in traced_func.graph.nodes() if op.isNondeterministic()
]
if len(nondeterm_ops) > 0:
nondeterministic_ops_warning = "Trace had nondeterministic nodes. "
nondeterministic_ops_warning += (
"Did you forget call .eval() on your model? Nodes:\n"
)
nondeterministic_ops_warning += "\n".join(
[indent(str(op)) for op in nondeterm_ops][:20]
)
nondeterministic_ops_warning += (
"\nThis may cause errors in trace checking. To disable trace checking,"
" pass check_trace=False to torch.jit.trace()"
)
warnings.warn(
nondeterministic_ops_warning, category=TracerWarning, stacklevel=5
)
def compare_outputs(original, reference, match_what):
all_ok = True
for i, (orig, ref) in enumerate(zip(original, reference)):
try:
if orig.is_quantized:
orig = orig.dequantize()
if ref.is_quantized:
ref = ref.dequantize()
if orig.is_mkldnn:
orig = orig.to_dense()
if ref.is_mkldnn:
ref = ref.to_dense()
if ref.is_complex() or orig.is_complex():
torch.testing.assert_close(
orig.to(torch.cdouble),
ref.to(torch.cdouble),
rtol=check_tolerance,
atol=default_tolerances(orig, ref)[1],
equal_nan=True,
)
else:
if orig.is_mps or ref.is_mps:
torch.testing.assert_close(
orig.float(),
ref.float(),
rtol=check_tolerance,
atol=default_tolerances(orig, ref)[1],
equal_nan=True,
)
elif getattr(orig, "is_nested", None) or getattr(
ref, "is_nested", None
):
assert getattr(orig, "is_nested", None) == getattr(
ref, "is_nested", None
)
for t_orig, t_ref in zip(orig.unbind(), ref.unbind()):
torch.testing.assert_close(
t_orig.double(),
t_ref.double(),
rtol=check_tolerance,
atol=default_tolerances(t_orig, t_ref)[1],
equal_nan=True,
)
else:
torch.testing.assert_close(
orig.double(),
ref.double(),
rtol=check_tolerance,
atol=default_tolerances(orig, ref)[1],
equal_nan=True,
)
except AssertionError as e:
maybe_warn_nondeterministic()
warnings.warn(
"Output nr "
+ str(i + 1)
+ ". of the traced function does not match "
"the corresponding output of the "
+ match_what
+ ". Detailed error:\n"
+ str(e),
category=TracerWarning,
stacklevel=4,
)
all_ok = False
return all_ok
traced_outs = run_mod_and_filter_tensor_outputs(traced_func, inputs, "trace")
fn_outs = run_mod_and_filter_tensor_outputs(func, inputs, "Python function")
if compare_outputs(traced_outs, fn_outs, "Python function"):
check_outs = run_mod_and_filter_tensor_outputs(
check_mod_func, inputs, "repeated trace"
)
compare_outputs(traced_outs, check_outs, "repeated trace")
diag_info = graph_diagnostic_info()
if any(info is not None for info in diag_info):
raise TracingCheckError(*diag_info)
class TracerWarning(Warning):
@staticmethod
def ignore_lib_warnings():
# We ignore warnings from all submodules excluding the JIT, because we need them e.g. for _check_trace
warnings.filterwarnings(
"ignore", category=TracerWarning, module="torch.(?!jit)"
)
warnings.filterwarnings("ignore", "torch::jit::fuser::cuda")
# We ignore the tracer warnings coming form inside the library, because all our shape
# checks in nn will trigger them.
TracerWarning.ignore_lib_warnings()
torch._C._tracer_warn_use_python()
def make_tuple(example_inputs):
if isinstance(example_inputs, (torch.Tensor, dict)):
return (example_inputs,)
# done primarily so that weird iterables fail here and not pybind11 code
if not isinstance(example_inputs, tuple):
return tuple(example_inputs)
return example_inputs
def make_module(mod, _module_class, _compilation_unit):
if isinstance(mod, ScriptModule):
return mod
elif torch._jit_internal.module_has_exports(mod):
infer_methods_stubs_fn = torch.jit._recursive.make_stubs_from_exported_methods
return torch.jit._recursive.create_script_module(
mod, infer_methods_stubs_fn, share_types=False, is_tracing=True
)
else:
if _module_class is None:
_module_class = TopLevelTracedModule
return _module_class(mod, _compilation_unit=_compilation_unit)
def wrap_check_inputs(check_inputs):
if check_inputs is None:
return None
return [{"forward": c} for c in check_inputs]
def analyze_ts_result_with_export_result(export, trace):
import torch.utils._pytree as pytree
flat_export = pytree.tree_leaves(export)
flat_trace = pytree.tree_leaves(trace)
for orig, loaded in zip(flat_export, flat_trace):
if orig.layout != loaded.layout:
return False
# mkldnn is not supported for torch.allclose
if orig.layout == torch._mkldnn: # type: ignore[attr-defined]
return True
if type(orig) != type(loaded):
return False
if isinstance(orig, torch._subclasses.FakeTensor):
# Skip for FakeTensor.
return True
elif isinstance(orig, torch.Tensor):
if orig.dtype != loaded.dtype:
return False
if not torch.allclose(orig, loaded):
return False
else:
if orig != loaded:
return False
return True
def _trace_impl(
func,
example_inputs=None,
optimize=None,
check_trace=True,
check_inputs=None,
check_tolerance=1e-5,
strict=True,
_force_outplace=False,
_module_class=None,
_compilation_unit=_python_cu,
example_kwarg_inputs=None,
_store_inputs=True,
):
if isinstance(func, torch.jit.ScriptModule):
# it is hard to trace it because the forward method on ScriptModule is already defined, so it
# would result in an error.
warnings.warn(
"The input to trace is already a ScriptModule, tracing it is a no-op. Returning the object as is."
)
return func
if isinstance(func, torch.nn.Module):
if example_inputs is None:
if isinstance(example_kwarg_inputs, dict):
example_inputs = example_kwarg_inputs
else:
raise RuntimeError("example_kwarg_inputs should be a dict")
return trace_module(
func,
{"forward": example_inputs},
None,
check_trace,
wrap_check_inputs(check_inputs),
check_tolerance,
strict,
_force_outplace,
_module_class,
example_inputs_is_kwarg=isinstance(example_kwarg_inputs, dict),
_store_inputs=_store_inputs,
)
if (
hasattr(func, "__self__")
and isinstance(func.__self__, torch.nn.Module)
and func.__name__ == "forward"
):
if example_inputs is None:
if isinstance(example_kwarg_inputs, dict):
example_inputs = example_kwarg_inputs
else:
raise RuntimeError("example_kwarg_inputs should be a dict")
return trace_module(
func.__self__,
{"forward": example_inputs},
None,
check_trace,
wrap_check_inputs(check_inputs),
check_tolerance,
strict,
_force_outplace,
_module_class,
example_inputs_is_kwarg=isinstance(example_kwarg_inputs, dict),
_store_inputs=_store_inputs,
)
# Special case for common case of passing a single Tensor
if (
isinstance(example_inputs, (torch.Tensor, dict))
and example_kwarg_inputs is None
):
example_inputs = (example_inputs,)
# done primarily so that weird iterables fail here and not pybind11 code
elif example_kwarg_inputs is None and not isinstance(example_inputs, tuple):
example_inputs = tuple(example_inputs)
var_lookup_fn = _create_interpreter_name_lookup_fn(0)
if hasattr(func, "__self__") and isinstance(func.__self__, torch.nn.Module):
raise AttributeError(
"trace doesn't support compiling individual module's functions.\n"
"Please use trace_module"
)
name = _qualified_name(func)
if isinstance(example_kwarg_inputs, dict):
example_inputs = example_kwarg_inputs
traced = torch._C._create_function_from_trace_with_dict(
name,
func,
example_kwarg_inputs,
var_lookup_fn,
strict,
_force_outplace,
get_callable_argument_names(func),
)
else:
traced = torch._C._create_function_from_trace(
name,
func,
example_inputs,
var_lookup_fn,
strict,
_force_outplace,
get_callable_argument_names(func),
)
# Check the trace against new traces created from user-specified inputs
if check_trace:
if check_inputs is not None:
_check_trace(
check_inputs,
func,
traced,
check_tolerance,
strict,
_force_outplace,
False,
_module_class,
example_inputs_is_kwarg=isinstance(example_kwarg_inputs, dict),
)
else:
_check_trace(
[example_inputs],
func,
traced,
check_tolerance,
strict,
_force_outplace,
False,
_module_class,
example_inputs_is_kwarg=isinstance(example_kwarg_inputs, dict),
)
# Allow torch.compile() to inline
traced._torchdynamo_inline = func # type: ignore[attr-defined]
return traced
class _ExportType(str, Enum):
DIRECT_EXPORT = "DIRECT_EXPORT"
TRACE_AND_EXPORT = "TRACE_AND_EXPORT"
SOURCE_TO_SOURCE = "SOURCE_TO_SOURCE"
def __str__(self) -> str:
return self.value
class _ExportOutcome(str, Enum):
SUCCESS = "SUCCESS"
FAILED_TO_EXPORT = "FAILED_TO_EXPORT"
FAILED_TO_RUN = "FAILED_TO_RUN"
ACCURACY_ERROR = "ACCURACY_ERROR"
def __str__(self) -> str:
return self.value
def trace(
func,
example_inputs=None,
optimize=None,
check_trace=True,
check_inputs=None,
check_tolerance=1e-5,
strict=True,
_force_outplace=False,
_module_class=None,
_compilation_unit=_python_cu,
example_kwarg_inputs=None,
_store_inputs=True,
):
r"""
Trace a function and return an executable or :class:`ScriptFunction` that will be optimized using just-in-time compilation.
Tracing is ideal for code that operates only on
``Tensor``\\s and lists, dictionaries, and
tuples of ``Tensor``\\s.
Using `torch.jit.trace` and `torch.jit.trace_module`, you can turn an
existing module or Python function into a TorchScript
:class:`ScriptFunction` or :class:`ScriptModule`. You must provide example
inputs, and we run the function, recording the operations performed on all
the tensors.
* The resulting recording of a standalone function produces `ScriptFunction`.
* The resulting recording of `nn.Module.forward` or `nn.Module` produces
`ScriptModule`.
This module also contains any parameters that the original
module had as well.
Warning:
Tracing only correctly records functions and modules which are not data
dependent (e.g., do not have conditionals on data in tensors) and do not have
any untracked external dependencies (e.g., perform input/output or
access global variables). Tracing only records operations done when the given
function is run on the given tensors. Therefore, the returned
`ScriptModule` will always run the same traced graph on any input. This
has some important implications when your module is expected to run
different sets of operations, depending on the input and/or the module
state. For example,
* Tracing will not record any control-flow like if-statements or loops.
When this control-flow is constant across your module, this is fine
and it often inlines the control-flow decisions. But sometimes the
control-flow is actually part of the model itself. For instance, a
recurrent network is a loop over the (possibly dynamic) length of an
input sequence.
* In the returned :class:`ScriptModule`, operations that have different
behaviors in ``training`` and ``eval`` modes will always behave as if
it is in the mode it was in during tracing, no matter which mode the
`ScriptModule` is in.
In cases like these, tracing would not be appropriate and
:func:`scripting <torch.jit.script>` is a better choice. If you trace
such models, you may silently get incorrect results on subsequent
invocations of the model. The tracer will try to emit warnings when
doing something that may cause an incorrect trace to be produced.
Args:
func (callable or torch.nn.Module): A Python function or `torch.nn.Module`
that will be run with `example_inputs`. `func` arguments and return
values must be tensors or (possibly nested) tuples that contain
tensors. When a module is passed `torch.jit.trace`, only the
``forward`` method is run and traced (see :func:`torch.jit.trace
<torch.jit.trace_module>` for details).
Keyword arguments:
example_inputs (tuple or torch.Tensor or None, optional): A tuple of example
inputs that will be passed to the function while tracing.
Default: ``None``. Either this argument or ``example_kwarg_inputs``
should be specified. The resulting trace can be run with inputs of
different types and shapes assuming the traced operations support those
types and shapes. `example_inputs` may also be a single Tensor in which
case it is automatically wrapped in a tuple. When the value is None,
``example_kwarg_inputs`` should be specified.
check_trace (``bool``, optional): Check if the same inputs run through
traced code produce the same outputs. Default: ``True``. You might want
to disable this if, for example, your network contains non-
deterministic ops or if you are sure that the network is correct despite
a checker failure.
check_inputs (list of tuples, optional): A list of tuples of input
arguments that should be used to check the trace against what is
expected. Each tuple is equivalent to a set of input arguments that
would be specified in ``example_inputs``. For best results, pass in
a set of checking inputs representative of the space of shapes and
types of inputs you expect the network to see. If not specified,
the original ``example_inputs`` are used for checking
check_tolerance (float, optional): Floating-point comparison tolerance
to use in the checker procedure. This can be used to relax the
checker strictness in the event that results diverge numerically
for a known reason, such as operator fusion.
strict (``bool``, optional): run the tracer in a strict mode or not
(default: ``True``). Only turn this off when you want the tracer to
record your mutable container types (currently ``list``/``dict``)
and you are sure that the container you are using in your
problem is a ``constant`` structure and does not get used as
control flow (if, for) conditions.
example_kwarg_inputs (dict, optional): This parameter is a pack of keyword
arguments of example inputs that will be passed to the function while
tracing. Default: ``None``. Either this argument or ``example_inputs``
should be specified. The dict will be unpacking by the arguments name
of the traced function. If the keys of the dict don't not match with
the traced function's arguments name, a runtime exception will be raised.
Returns:
If `func` is `nn.Module` or ``forward`` of `nn.Module`, `trace` returns
a :class:`ScriptModule` object with a single ``forward`` method
containing the traced code. The returned `ScriptModule` will
have the same set of sub-modules and parameters as the original
``nn.Module``. If ``func`` is a standalone function, ``trace``
returns `ScriptFunction`.
Example (tracing a function):
.. testcode::
import torch
def foo(x, y):
return 2 * x + y
# Run `foo` with the provided inputs and record the tensor operations
traced_foo = torch.jit.trace(foo, (torch.rand(3), torch.rand(3)))
# `traced_foo` can now be run with the TorchScript interpreter or saved
# and loaded in a Python-free environment
Example (tracing an existing module)::
import torch
import torch.nn as nn
class Net(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = nn.Conv2d(1, 1, 3)
def forward(self, x):
return self.conv(x)
n = Net()
example_weight = torch.rand(1, 1, 3, 3)
example_forward_input = torch.rand(1, 1, 3, 3)
# Trace a specific method and construct `ScriptModule` with
# a single `forward` method
module = torch.jit.trace(n.forward, example_forward_input)
# Trace a module (implicitly traces `forward`) and construct a
# `ScriptModule` with a single `forward` method
module = torch.jit.trace(n, example_forward_input)
"""
if not _enabled:
return func
if optimize is not None:
warnings.warn(
"`optimize` is deprecated and has no effect. "
"Use `with torch.jit.optimized_execution()` instead",
FutureWarning,
stacklevel=2,
)
from torch._utils_internal import (
check_if_torch_exportable,
log_torch_jit_trace_exportability,
log_torchscript_usage,
)
traced_func = _trace_impl(
func,
example_inputs,
optimize,
check_trace,
check_inputs,
check_tolerance,
strict,
_force_outplace,
_module_class,
_compilation_unit,
example_kwarg_inputs,
_store_inputs,
)
log_torchscript_usage("trace", model_id=_get_model_id(traced_func))
if check_if_torch_exportable():
from torch._export.converter import TS2EPConverter
from torch.export._trace import (
_convert_ts_to_export_experimental,
_process_jit_trace_inputs_for_export,
)
traced_func_for_export = _trace_impl(
func,
example_inputs=example_inputs,
optimize=optimize,
check_trace=False,
check_inputs=check_inputs,
check_tolerance=check_tolerance,
strict=strict,
_force_outplace=_force_outplace,
_module_class=_module_class,
_compilation_unit=_compilation_unit,
example_kwarg_inputs=example_kwarg_inputs,
_store_inputs=_store_inputs,
)
export_args, _ = _process_jit_trace_inputs_for_export(
example_inputs, example_kwarg_inputs
)
def _log_exportability(func_to_export, export_func, export_args, export_type):
try:
traced_result = func_to_export(*export_args)
except Exception as e:
_ = e
log_torch_jit_trace_exportability(
"trace", str(export_type), str(_ExportOutcome.SUCCESS), "succeeded"
)
return
try:
ep_module = export_func(func_to_export, export_args)
except Exception as e:
log_torch_jit_trace_exportability(
"trace",
str(export_type),
str(_ExportOutcome.FAILED_TO_EXPORT),
str(e),
)
return
try:
export = ep_module(*export_args)
except Exception as e:
log_torch_jit_trace_exportability(
"trace", str(export_type), str(_ExportOutcome.FAILED_TO_RUN), str(e)
)
return
if not analyze_ts_result_with_export_result(export, traced_result):
log_torch_jit_trace_exportability(
"trace",
str(export_type),
str(_ExportOutcome.ACCURACY_ERROR),
"accuracy error",
)
return
log_torch_jit_trace_exportability(
"trace", str(export_type), str(_ExportOutcome.SUCCESS), "succeeded"
)
def _direct_export_and_lower(func, export_args):
return torch.export.export(func, export_args, strict=False).module()
def _convert_ts_to_export_source_to_source(func, export_args):
return TS2EPConverter(func, export_args).convert().module()
# torch.jit.trace is noop when the original module is torch.jit.ScriptModule
if not isinstance(traced_func_for_export, torch.jit.ScriptModule):
_log_exportability(
traced_func_for_export,
_direct_export_and_lower,
export_args,
_ExportType.DIRECT_EXPORT,
)
_log_exportability(
traced_func_for_export,
_convert_ts_to_export_experimental,
export_args,
_ExportType.TRACE_AND_EXPORT,
)
_log_exportability(
traced_func_for_export,
_convert_ts_to_export_source_to_source,
export_args,
_ExportType.SOURCE_TO_SOURCE,
)
return traced_func
_trace_module_map: Optional[dict[Any, Any]] = None
def trace_module(
mod,
inputs,
optimize=None,
check_trace=True,
check_inputs=None,
check_tolerance=1e-5,
strict=True,
_force_outplace=False,
_module_class=None,
_compilation_unit=_python_cu,
example_inputs_is_kwarg=False,
_store_inputs=True,
):
"""
Trace a module and return an executable :class:`ScriptModule` that will be optimized using just-in-time compilation.
When a module is passed to :func:`torch.jit.trace <torch.jit.trace>`, only
the ``forward`` method is run and traced. With ``trace_module``, you can specify a dictionary of
method names to example inputs to trace (see the ``inputs``) argument below.
See :func:`torch.jit.trace <torch.jit.trace>` for more information on tracing.
Args:
mod (torch.nn.Module): A ``torch.nn.Module`` containing methods whose names are
specified in ``inputs``. The given methods will be compiled
as a part of a single `ScriptModule`.
inputs (dict): A dict containing sample inputs indexed by method names in ``mod``.
The inputs will be passed to methods whose names correspond to inputs'
keys while tracing.
``{ 'forward' : example_forward_input, 'method2': example_method2_input}``
Keyword arguments:
check_trace (``bool``, optional): Check if the same inputs run through
traced code produce the same outputs. Default: ``True``. You might want
to disable this if, for example, your network contains non-
deterministic ops or if you are sure that the network is correct despite
a checker failure.
check_inputs (list of dicts, optional): A list of dicts of input arguments that should be used
to check the trace against what is expected. Each tuple
is equivalent to a set of input arguments that would
be specified in ``inputs``. For best results, pass in a
set of checking inputs representative of the space of
shapes and types of inputs you expect the network to see.
If not specified, the original ``inputs`` are used for checking
check_tolerance (float, optional): Floating-point comparison tolerance to use in the checker procedure.
This can be used to relax the checker strictness in the event that
results diverge numerically for a known reason, such as operator fusion.
example_inputs_is_kwarg (``bool``, optional): This parameter indicate whether the example inputs is a pack
pack of keyword arguments. Default: ``False``.
Returns:
A :class:`ScriptModule` object with a single ``forward`` method containing the traced code.
When ``func`` is a ``torch.nn.Module``, the returned :class:`ScriptModule` will have the same set of
sub-modules and parameters as ``func``.
Example (tracing a module with multiple methods)::
import torch
import torch.nn as nn
class Net(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = nn.Conv2d(1, 1, 3)
def forward(self, x):
return self.conv(x)
def weighted_kernel_sum(self, weight):
return weight * self.conv.weight
n = Net()
example_weight = torch.rand(1, 1, 3, 3)
example_forward_input = torch.rand(1, 1, 3, 3)
# Trace a specific method and construct `ScriptModule` with
# a single `forward` method
module = torch.jit.trace(n.forward, example_forward_input)
# Trace a module (implicitly traces `forward`) and construct a
# `ScriptModule` with a single `forward` method
module = torch.jit.trace(n, example_forward_input)
# Trace specific methods on a module (specified in `inputs`), constructs
# a `ScriptModule` with `forward` and `weighted_kernel_sum` methods
inputs = {"forward": example_forward_input, "weighted_kernel_sum": example_weight}
module = torch.jit.trace_module(n, inputs)
"""
if not _enabled:
return mod
if optimize is not None:
warnings.warn(
"`optimize` is deprecated and has no effect. "
"Use `with torch.jit.optimized_execution()` instead",
FutureWarning,
stacklevel=2,
)
var_lookup_fn = _create_interpreter_name_lookup_fn(0)
if not isinstance(mod, torch.nn.Module):
raise AttributeError("expected torch.nn.Module as the first argument")
if not isinstance(inputs, dict):
raise AttributeError("expected a dictionary of (method_name, input) pairs")
old_module_map = torch.jit._trace._trace_module_map
try:
trace_module_map: dict[Any, Any] = {}
def register_submods(mod, prefix):
for name, child in mod.named_children():
submod_qualname = prefix + "." + name
trace_module_map[child] = submod_qualname
register_submods(child, submod_qualname)
trace_module_map["__module"] = mod
torch.jit._trace._trace_module_map = trace_module_map
register_submods(mod, "__module")
module = make_module(mod, _module_class, _compilation_unit)
for method_name, example_inputs in inputs.items():
if method_name == "forward":
# "forward" is a special case because we need to trace
# `Module.__call__`, which sets up some extra tracing, but uses
# argument names of the real `Module.forward` method.
func = mod
forward_method = getattr(mod, method_name)
argument_names = get_callable_argument_names(forward_method)
else:
func = getattr(mod, method_name)
argument_names = get_callable_argument_names(func)
if isinstance(example_inputs, dict) and example_inputs_is_kwarg:
# Raise exception when the user provided key names are not aligned with forward() method's arguments' name/
for key in example_inputs:
if key not in argument_names:
valid_arguments = "[" + ",".join(argument_names) + "]"
raise NameError(
f"""'{key}' is not in forward() method's arguments,
valid arguments name are {valid_arguments}"""
)
module._c._create_method_from_trace_with_dict(
method_name,
func,
example_inputs,
var_lookup_fn,
strict,
_force_outplace,
argument_names,
_store_inputs,
)
else:
example_inputs = make_tuple(example_inputs)
module._c._create_method_from_trace(
method_name,
func,
example_inputs,
var_lookup_fn,
strict,
_force_outplace,
argument_names,
_store_inputs,
)
check_trace_method = module._c._get_method(method_name)
# Check the trace against new traces created from user-specified inputs
if check_trace:
if check_inputs is not None:
_check_trace(
check_inputs,
func,
check_trace_method,
check_tolerance,
strict,
_force_outplace,
True,
_module_class,
example_inputs_is_kwarg=example_inputs_is_kwarg,
)
else:
_check_trace(
[inputs],
func,
check_trace_method,
check_tolerance,
strict,
_force_outplace,
True,
_module_class,
example_inputs_is_kwarg=example_inputs_is_kwarg,
)
finally:
torch.jit._trace._trace_module_map = old_module_map
return module
def is_tracing():
"""Return a boolean value.
Returns ``True`` in tracing (if a function is called during the
tracing of code with ``torch.jit.trace``) and ``False`` otherwise.
"""
if is_scripting():
return False
return torch._C._is_tracing()
class TracedModule(ScriptModule):
_disable_script_meta = True
def __init__(self, orig, id_set=None, _compilation_unit=None):
# XXX: orig can be a nn.Module or a function!
super().__init__()
assert isinstance(orig, torch.nn.Module)
# Copy a subset of `orig` to a temporary nn.Module.
# This is a way to customize what will actually get compiled by create_script_module
id_set = set()
# This allows us to preserve the original module's qualified name by defining a new
# type with the attribute _jit_override_qualname. In torch._jit_internal._qualified_name
# we have a special case that will look up this attribute to override whatever qualname
# we would get from the python type system
class QualnameWrapper(torch.nn.Module):
pass
QualnameWrapper._jit_override_qualname = torch._jit_internal._qualified_name( # type: ignore[attr-defined]
type(orig)
)
tmp_module = QualnameWrapper()
def check_unique(param):
if param in id_set:
raise ValueError(
"TracedModules don't support parameter sharing between modules"
)
id_set.add(param)
tmp_module.training = orig.training
for name, param in orig._parameters.items():
if param is not None:
tmp_module._parameters[name] = param
check_unique(param)
for name, buf in orig._buffers.items():
if buf is not None:
tmp_module._buffers[name] = buf
check_unique(buf)
for name, val in orig.__dict__.items():
if (
torch._C._jit_is_script_object(val)
and name not in orig._parameters
and name not in orig._buffers
):
setattr(tmp_module, name, val)
if orig._backward_hooks:
raise ValueError(
"Modules that have backward hooks assigned can't be compiled: "
+ str(orig)
)
for name, submodule in orig._modules.items():
if submodule is None:
continue
tmp_module._modules[name] = make_module(
submodule, TracedModule, _compilation_unit=None
)
script_module = torch.jit._recursive.create_script_module(
tmp_module, lambda module: (), share_types=False, is_tracing=True
)
self.__dict__["_name"] = type(orig).__name__
self.__dict__["_actual_script_module"] = script_module
for name in ("_parameters", "_buffers", "_modules", "training"):
delattr(self, name)
def forward(self, *args, **kwargs):
raise RuntimeError("Trace submodules cannot be called.")
def __getattr__(self, attr):
if "_actual_script_module" not in self.__dict__:
return super().__getattr__(attr)
return getattr(self._actual_script_module, attr)
def __setattr__(self, attr, value):
if "_actual_script_module" not in self.__dict__:
return super().__setattr__(attr, value)
setattr(self._actual_script_module, attr, value)
def _get_name(self):
return self._name
def extra_repr(self):
return f"original_name={self._name}"
class TopLevelTracedModule(TracedModule):
forward: Callable[..., Any] = _CachedForward() # type: ignore[assignment]
def _reconstruct(self, cpp_module):
"""
Re-construct an instance of TopLevelTracedModule using an instance of a C++ module.
Args:
cpp_module: The C++ module that this TopLevelTracedModule will be rebuilt around.
"""
self.__dict__["_actual_script_module"]._reconstruct(cpp_module)
def _script_if_tracing(fn: Callable[P, R]) -> Callable[P, R]:
@functools.wraps(fn)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
if not is_tracing():
# Not tracing, don't do anything
return fn(*args, **kwargs)
compiled_fn: Callable[P, R] = script(wrapper.__original_fn) # type: ignore[attr-defined]
return compiled_fn(*args, **kwargs)
wrapper.__original_fn = fn # type: ignore[attr-defined]
wrapper.__script_if_tracing_wrapper = True # type: ignore[attr-defined]
return wrapper
def _get_trace_graph(
f,
args=(),
kwargs=None,
strict=True,
_force_outplace=False,
return_inputs=False,
_return_inputs_states=False,
):
"""Return a tuple on tracing a function or model.
.. warning::
This function is internal-only and should only be used by the ONNX
exporter. If you are trying to get a graph through tracing, please go
through the public API instead::
trace = torch.jit.trace(nn.LSTMCell(), (input, hidden))
trace_graph = trace.graph
Trace a function or model, returning a tuple consisting of the both the
*trace* of an execution, as well as the original return value. If return_inputs,
also returns the trace inputs as part of the tuple
Tracing is guaranteed not to change the semantics of the function/module
that is traced.
Args:
f (torch.nn.Module or function): the function or module
to be traced.
args (tuple or Tensor): the positional arguments to pass to the
function/module to be traced. A non-tuple is assumed to
be a single positional argument to be passed to the model.
kwargs (dict): the keyword arguments to pass to the function/module
to be traced.
Example (trace a cell):
.. testcode::
trace = torch.jit.trace(nn.LSTMCell(), (input, hidden))
"""
if kwargs is None:
kwargs = {}
if not isinstance(args, tuple):
args = (args,)
outs = ONNXTracedModule(
f, strict, _force_outplace, return_inputs, _return_inputs_states
)(*args, **kwargs)
return outs
```
|
================================================================================================================
SOURCE CODE FILE: annotations.py
LINES: 11
SIZE: 17.90 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\annotations.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import ast
import builtins
import dis
import enum
import inspect
import re
import typing
import warnings
from textwrap import dedent
import torch
from torch._C import (
_GeneratorType,
AnyType,
AwaitType,
BoolType,
ComplexType,
DeviceObjType,
DictType,
EnumType,
FloatType,
FutureType,
InterfaceType,
IntType,
ListType,
NoneType,
NumberType,
OptionalType,
StreamObjType,
StringType,
TensorType,
TupleType,
UnionType,
)
from torch._jit_internal import ( # type: ignore[attr-defined]
_Await,
_qualified_name,
Any,
BroadcastingList1,
BroadcastingList2,
BroadcastingList3,
Dict,
Future,
is_await,
is_dict,
is_future,
is_ignored_fn,
is_list,
is_optional,
is_tuple,
is_union,
List,
Optional,
Tuple,
Union,
)
from torch._sources import get_source_lines_and_file
from ._state import _get_script_class
if torch.distributed.rpc.is_available():
from torch._C import RRefType
from torch._jit_internal import is_rref, RRef
from torch._ops import OpOverloadPacket
class Module:
def __init__(self, name, members):
self.name = name
self.members = members
def __getattr__(self, name):
try:
return self.members[name]
except KeyError:
raise RuntimeError(
f"Module {self.name} has no member called {name}"
) from None
class EvalEnv:
env = {
"torch": Module("torch", {"Tensor": torch.Tensor}),
"Tensor": torch.Tensor,
"typing": Module("typing", {"Tuple": Tuple}),
"Tuple": Tuple,
"List": List,
"Dict": Dict,
"Optional": Optional,
"Union": Union,
"Future": Future,
"Await": _Await,
}
def __init__(self, rcb):
self.rcb = rcb
if torch.distributed.rpc.is_available():
self.env["RRef"] = RRef
def __getitem__(self, name):
if name in self.env:
return self.env[name]
if self.rcb is not None:
return self.rcb(name)
return getattr(builtins, name, None)
def get_signature(fn, rcb, loc, is_method):
if isinstance(fn, OpOverloadPacket):
signature = try_real_annotations(fn.op, loc)
else:
signature = try_real_annotations(fn, loc)
if signature is not None and is_method:
# If this is a method, then the signature will include a type for
# `self`, but type comments do not contain a `self`. So strip it
# away here so everything is consistent (`inspect.ismethod` does
# not work here since `fn` is unbound at this point)
param_types, return_type = signature
param_types = param_types[1:]
signature = (param_types, return_type)
if signature is None:
type_line, source = None, None
try:
source = dedent("".join(get_source_lines_and_file(fn)[0]))
type_line = get_type_line(source)
except TypeError:
pass
# This might happen both because we failed to get the source of fn, or
# because it didn't have any annotations.
if type_line is not None:
signature = parse_type_line(type_line, rcb, loc)
return signature
def is_function_or_method(the_callable):
# A stricter version of `inspect.isroutine` that does not pass for built-in
# functions
return inspect.isfunction(the_callable) or inspect.ismethod(the_callable)
def is_vararg(the_callable):
if not is_function_or_method(the_callable) and callable(the_callable): # noqa: B004
# If `the_callable` is a class, de-sugar the call so we can still get
# the signature
the_callable = the_callable.__call__
if is_function_or_method(the_callable):
return inspect.getfullargspec(the_callable).varargs is not None
else:
return False
def get_param_names(fn, n_args):
if isinstance(fn, OpOverloadPacket):
fn = fn.op
if (
not is_function_or_method(fn)
and callable(fn)
and is_function_or_method(fn.__call__)
): # noqa: B004
# De-sugar calls to classes
fn = fn.__call__
if is_function_or_method(fn):
if is_ignored_fn(fn):
fn = inspect.unwrap(fn)
return inspect.getfullargspec(fn).args
else:
# The `fn` was not a method or function (maybe a class with a __call__
# method, so use a default param name list)
return [str(i) for i in range(n_args)]
def check_fn(fn, loc):
# Make sure the function definition is not a class instantiation
try:
source = dedent("".join(get_source_lines_and_file(fn)[0]))
except (OSError, TypeError):
return
if source is None:
return
py_ast = ast.parse(source)
if len(py_ast.body) == 1 and isinstance(py_ast.body[0], ast.ClassDef):
raise torch.jit.frontend.FrontendError(
loc,
f"Cannot instantiate class '{py_ast.body[0].name}' in a script function",
)
if len(py_ast.body) != 1 or not isinstance(py_ast.body[0], ast.FunctionDef):
raise torch.jit.frontend.FrontendError(
loc, "Expected a single top-level function"
)
def _eval_no_call(stmt, glob, loc):
"""Evaluate statement as long as it does not contain any method/function calls."""
bytecode = compile(stmt, "", mode="eval")
for insn in dis.get_instructions(bytecode):
if "CALL" in insn.opname:
raise RuntimeError(
f"Type annotation should not contain calls, but '{stmt}' does"
)
return eval(bytecode, glob, loc) # type: ignore[arg-type] # noqa: P204
def parse_type_line(type_line, rcb, loc):
"""Parse a type annotation specified as a comment.
Example inputs:
# type: (Tensor, torch.Tensor) -> Tuple[Tensor]
# type: (Tensor, Tuple[Tensor, Tensor]) -> Tensor
"""
arg_ann_str, ret_ann_str = split_type_line(type_line)
try:
arg_ann = _eval_no_call(arg_ann_str, {}, EvalEnv(rcb))
except (NameError, SyntaxError) as e:
raise RuntimeError(
"Failed to parse the argument list of a type annotation"
) from e
if not isinstance(arg_ann, tuple):
arg_ann = (arg_ann,)
try:
ret_ann = _eval_no_call(ret_ann_str, {}, EvalEnv(rcb))
except (NameError, SyntaxError) as e:
raise RuntimeError(
"Failed to parse the return type of a type annotation"
) from e
arg_types = [ann_to_type(ann, loc) for ann in arg_ann]
return arg_types, ann_to_type(ret_ann, loc)
def get_type_line(source):
"""Try to find the line containing a comment with the type annotation."""
type_comment = "# type:"
lines = source.split("\n")
lines = list(enumerate(lines))
type_lines = list(filter(lambda line: type_comment in line[1], lines))
# `type: ignore` comments may be needed in JIT'ed functions for mypy, due
# to the hack in torch/_VF.py.
# An ignore type comment can be of following format:
# 1) type: ignore
# 2) type: ignore[rule-code]
# This ignore statement must be at the end of the line
# adding an extra backslash before the space, to avoid triggering
# one of the checks in .github/workflows/lint.yml
type_pattern = re.compile("# type:\\ ignore(\\[[a-zA-Z-]+\\])?$")
type_lines = list(filter(lambda line: not type_pattern.search(line[1]), type_lines))
if len(type_lines) == 0:
# Catch common typo patterns like extra spaces, typo in 'ignore', etc.
wrong_type_pattern = re.compile("#[\t ]*type[\t ]*(?!: ignore(\\[.*\\])?$):")
wrong_type_lines = list(
filter(lambda line: wrong_type_pattern.search(line[1]), lines)
)
if len(wrong_type_lines) > 0:
raise RuntimeError(
"The annotation prefix in line "
+ str(wrong_type_lines[0][0])
+ " is probably invalid.\nIt must be '# type:'"
+ "\nSee PEP 484 (https://www.python.org/dev/peps/pep-0484/#suggested-syntax-for-python-2-7-and-straddling-code)" # noqa: B950
+ "\nfor examples"
)
return None
elif len(type_lines) == 1:
# Only 1 type line, quit now
return type_lines[0][1].strip()
# Parse split up argument types according to PEP 484
# https://www.python.org/dev/peps/pep-0484/#suggested-syntax-for-python-2-7-and-straddling-code
return_line = None
parameter_type_lines = []
for line_num, line in type_lines:
if "# type: (...) -> " in line:
return_line = (line_num, line)
break
elif type_comment in line:
parameter_type_lines.append(line)
if return_line is None:
raise RuntimeError(
"Return type line '# type: (...) -> ...' not found on multiline "
"type annotation\nfor type lines:\n"
+ "\n".join([line[1] for line in type_lines])
+ "\n(See PEP 484 https://www.python.org/dev/peps/pep-0484/#suggested-syntax-for-python-2-7-and-straddling-code)"
)
def get_parameter_type(line):
item_type = line[line.find(type_comment) + len(type_comment) :]
return item_type.strip()
types = map(get_parameter_type, parameter_type_lines)
parameter_types = ", ".join(types)
return return_line[1].replace("...", parameter_types)
def split_type_line(type_line):
"""Split the comment with the type annotation into parts for argument and return types.
For example, for an input of:
# type: (Tensor, torch.Tensor) -> Tuple[Tensor, Tensor]
This function will return:
("(Tensor, torch.Tensor)", "Tuple[Tensor, Tensor]")
"""
start_offset = len("# type:")
try:
arrow_pos = type_line.index("->")
except ValueError:
raise RuntimeError(
"Syntax error in type annotation (couldn't find `->`)"
) from None
return type_line[start_offset:arrow_pos].strip(), type_line[arrow_pos + 2 :].strip()
def try_real_annotations(fn, loc):
"""Try to use the Py3.5+ annotation syntax to get the type."""
try:
# Note: anything annotated as `Optional[T]` will automatically
# be returned as `Union[T, None]` per
# https://github.com/python/typing/blob/master/src/typing.py#L850
sig = inspect.signature(fn)
except ValueError:
return None
all_annots = [sig.return_annotation] + [
p.annotation for p in sig.parameters.values()
]
if all(ann is sig.empty for ann in all_annots):
return None
arg_types = [ann_to_type(p.annotation, loc) for p in sig.parameters.values()]
return_type = ann_to_type(sig.return_annotation, loc)
return arg_types, return_type
# Finds common type for enum values belonging to an Enum class. If not all
# values have the same type, AnyType is returned.
def get_enum_value_type(e: type[enum.Enum], loc):
enum_values: List[enum.Enum] = list(e)
if not enum_values:
raise ValueError(f"No enum values defined for: '{e.__class__}'")
types = {type(v.value) for v in enum_values}
ir_types = [try_ann_to_type(t, loc) for t in types]
# If Enum values are of different types, an exception will be raised here.
# Even though Python supports this case, we chose to not implement it to
# avoid overcomplicate logic here for a rare use case. Please report a
# feature request if you find it necessary.
res = torch._C.unify_type_list(ir_types)
if not res:
return AnyType.get()
return res
def is_tensor(ann):
if issubclass(ann, torch.Tensor):
return True
if issubclass(
ann,
(
torch.LongTensor,
torch.DoubleTensor,
torch.FloatTensor,
torch.IntTensor,
torch.ShortTensor,
torch.HalfTensor,
torch.CharTensor,
torch.ByteTensor,
torch.BoolTensor,
),
):
warnings.warn(
"TorchScript will treat type annotations of Tensor "
"dtype-specific subtypes as if they are normal Tensors. "
"dtype constraints are not enforced in compilation either."
)
return True
return False
def _fake_rcb(inp):
return None
def try_ann_to_type(ann, loc, rcb=None):
ann_args = typing.get_args(ann) # always returns a tuple!
if ann is inspect.Signature.empty:
return TensorType.getInferred()
if ann is None:
return NoneType.get()
if inspect.isclass(ann) and is_tensor(ann):
return TensorType.get()
if is_tuple(ann):
# Special case for the empty Tuple type annotation `Tuple[()]`
if len(ann_args) == 1 and ann_args[0] == ():
return TupleType([])
return TupleType([try_ann_to_type(a, loc) for a in ann_args])
if is_list(ann):
elem_type = try_ann_to_type(ann_args[0], loc)
if elem_type:
return ListType(elem_type)
if is_dict(ann):
key = try_ann_to_type(ann_args[0], loc)
value = try_ann_to_type(ann_args[1], loc)
# Raise error if key or value is None
if key is None:
raise ValueError(
f"Unknown type annotation: '{ann_args[0]}' at {loc.highlight()}"
)
if value is None:
raise ValueError(
f"Unknown type annotation: '{ann_args[1]}' at {loc.highlight()}"
)
return DictType(key, value)
if is_optional(ann):
if issubclass(ann_args[1], type(None)):
contained = ann_args[0]
else:
contained = ann_args[1]
valid_type = try_ann_to_type(contained, loc)
msg = "Unsupported annotation {} could not be resolved because {} could not be resolved. At\n{}"
assert valid_type, msg.format(repr(ann), repr(contained), repr(loc))
return OptionalType(valid_type)
if is_union(ann):
# TODO: this is hack to recognize NumberType
if set(ann_args) == {int, float, complex}:
return NumberType.get()
inner: List = []
# We need these extra checks because both `None` and invalid
# values will return `None`
# TODO: Determine if the other cases need to be fixed as well
for a in typing.get_args(ann):
if a is None:
inner.append(NoneType.get())
maybe_type = try_ann_to_type(a, loc)
msg = "Unsupported annotation {} could not be resolved because {} could not be resolved. At\n{}"
assert maybe_type, msg.format(repr(ann), repr(maybe_type), repr(loc))
inner.append(maybe_type)
return UnionType(inner) # type: ignore[arg-type]
if torch.distributed.rpc.is_available() and is_rref(ann):
return RRefType(try_ann_to_type(ann_args[0], loc))
if is_future(ann):
return FutureType(try_ann_to_type(ann_args[0], loc))
if is_await(ann):
elementType = try_ann_to_type(ann_args[0], loc) if ann_args else AnyType.get()
return AwaitType(elementType)
if ann is float:
return FloatType.get()
if ann is complex:
return ComplexType.get()
if ann is int or ann is torch.SymInt:
return IntType.get()
if ann is str:
return StringType.get()
if ann is bool:
return BoolType.get()
if ann is Any:
return AnyType.get()
if ann is type(None):
return NoneType.get()
if inspect.isclass(ann) and hasattr(ann, "__torch_script_interface__"):
return InterfaceType(ann.__torch_script_interface__)
if ann is torch.device:
return DeviceObjType.get()
if ann is torch.Generator:
return _GeneratorType.get()
if ann is torch.Stream:
return StreamObjType.get()
if ann is torch.dtype:
return IntType.get() # dtype not yet bound in as its own type
if inspect.isclass(ann) and issubclass(ann, enum.Enum):
if _get_script_class(ann) is None:
scripted_class = torch.jit._script._recursive_compile_class(ann, loc)
name = scripted_class.qualified_name()
else:
name = _qualified_name(ann)
return EnumType(name, get_enum_value_type(ann, loc), list(ann))
if inspect.isclass(ann):
maybe_script_class = _get_script_class(ann)
if maybe_script_class is not None:
return maybe_script_class
if torch._jit_internal.can_compile_class(ann):
return torch.jit._script._recursive_compile_class(ann, loc)
# Maybe resolve a NamedTuple to a Tuple Type
if rcb is None:
rcb = _fake_rcb
return torch._C._resolve_type_from_object(ann, loc, rcb)
def ann_to_type(ann, loc, rcb=None):
the_type = try_ann_to_type(ann, loc, rcb)
if the_type is not None:
return the_type
raise ValueError(f"Unknown type annotation: '{ann}' at {loc.highlight()}")
__all__ = [
"Any",
"List",
"BroadcastingList1",
"BroadcastingList2",
"BroadcastingList3",
"Tuple",
"is_tuple",
"is_list",
"Dict",
"is_dict",
"is_optional",
"is_union",
"TensorType",
"TupleType",
"FloatType",
"ComplexType",
"IntType",
"ListType",
"StringType",
"DictType",
"AnyType",
"Module",
# TODO: Consider not exporting these during wildcard import (reserve
# that for the types; for idiomatic typing code.)
"get_signature",
"check_fn",
"get_param_names",
"parse_type_line",
"get_type_line",
"split_type_line",
"try_real_annotations",
"try_ann_to_type",
"ann_to_type",
]
```
|
=============================================================================================================
SOURCE CODE FILE: frontend.py
LINES: 5
SIZE: 45.43 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\frontend.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import ast
import copy
import dataclasses
import inspect
import re
import string
from collections import namedtuple
from textwrap import dedent
import torch
import torch.jit.annotations
from torch import _jit_internal
from torch._C._jit_tree_views import (
Apply,
Assert,
Assign,
Attribute,
AugAssign,
BinOp,
Break,
ClassDef,
Const,
Continue,
Decl,
Def,
Delete,
DictComp,
DictLiteral,
Dots,
EmptyTypeAnnotation,
ExprStmt,
FalseLiteral,
For,
Ident,
If,
ListComp,
ListLiteral,
NoneLiteral,
Param,
Pass,
Property,
Raise,
Return,
Select,
SliceExpr,
Starred,
Stmt,
StringLiteral,
Subscript,
TernaryIf,
TrueLiteral,
TupleLiteral,
UnaryOp,
Var,
While,
With,
WithItem,
)
from torch._jit_internal import ( # noqa: F401
_is_drop_fn,
FunctionModifiers,
is_static_fn,
should_drop,
)
from torch._sources import (
get_source_lines_and_file,
make_source_context,
parse_def,
ParsedDef as _ParsedDef,
)
from torch.jit._dataclass_impls import DATACLASS_MAGIC_METHODS
from torch.jit._monkeytype_config import get_qualified_name, monkeytype_trace
_IS_ASTUNPARSE_INSTALLED = False
try:
import astunparse # type: ignore[import]
_IS_ASTUNPARSE_INSTALLED = True
except ImportError:
pass
# Borrowed from cPython implementation
# https://github.com/python/cpython/blob/561612d8456cfab5672c9b445521113b847bd6b3/Lib/textwrap.py#L411#
_reserved_prefix = "__jit"
_reserved_names = {"print"}
_identifier_chars = set(string.ascii_lowercase + string.ascii_uppercase + string.digits)
def is_reserved_name(name):
return name.startswith(_reserved_prefix) or name in _reserved_names
pretty_node_names = {
ast.FunctionDef: "function definitions",
ast.For: "for loops",
ast.Delete: "del statements",
ast.ClassDef: "class definitions",
ast.With: "with statements",
ast.Raise: "raise statements",
ast.Assert: "assertions",
ast.Import: "import statements",
ast.ImportFrom: "import statements",
ast.Global: "global variables",
ast.Break: "break statements",
ast.Continue: "continue statements",
}
node_start_tokens = {
ast.FunctionDef: "def",
ast.For: "for",
ast.Delete: "del",
ast.ClassDef: "class",
ast.With: "with",
ast.Raise: "raise",
ast.Assert: "assert",
ast.Import: "import",
ast.ImportFrom: "from",
ast.Global: "global",
ast.Break: "break",
ast.Continue: "continue",
}
pretty_node_names.update(
{
ast.AsyncFunctionDef: "async function definitions",
ast.AsyncFor: "async for loops",
ast.AsyncWith: "async with statements",
ast.Try: "try blocks",
ast.Nonlocal: "nonlocal variables",
}
)
node_start_tokens.update(
{
ast.AsyncFunctionDef: "async def",
ast.AsyncFor: "async for",
ast.AsyncWith: "async with",
ast.Try: "try",
ast.Nonlocal: "nonlocal",
}
)
pretty_node_names.update(
{
ast.AnnAssign: "annotated assignments",
}
)
# NB: no specific token for AnnAssign
class FrontendError(Exception):
def __init__(self, source_range, msg):
self.source_range = source_range
self.msg = msg
# This has to be instantiated here so the ErrorReport is accurate to the
# call stack when the FrontendError was raised
self.error_report = torch._C.ErrorReport(self.source_range)
def __str__(self):
return self.msg + self.error_report.what().lstrip()
class NotSupportedError(FrontendError):
pass
class UnsupportedNodeError(NotSupportedError):
def __init__(self, ctx, offending_node, reason=""):
# If we don't have a specific token, we default to length of 1
node_type = type(offending_node)
range_len = len(node_start_tokens.get(node_type, " "))
source_range = ctx.make_range(
offending_node.lineno,
offending_node.col_offset,
offending_node.col_offset + range_len,
)
feature_name = pretty_node_names.get(node_type, node_type.__name__)
msg = f"{feature_name} {reason + ' ' if reason else ''}aren't supported"
super().__init__(source_range, msg)
class FrontendTypeError(FrontendError):
pass
def build_withitems(ctx, items):
items = [build_withitem(ctx, i) for i in items]
return list(items)
def build_stmts(ctx, stmts):
stmts = [build_stmt(ctx, s) for s in stmts]
return list(filter(None, stmts))
def get_class_properties(cls, self_name):
"""
Get a list of Property objects representing the properties of a class.
Args:
cls: The class to get properties of.
self_name: The name of the class that the properties should belong to.
Returns:
A list of Property objects corresponding to the properties of cls. Property
here refers to the subclass of TreeView.
"""
props = inspect.getmembers(cls, predicate=lambda m: isinstance(m, property))
# Any property that should not compiled must be in this list on the Module.
unused_properties = getattr(cls, "__jit_unused_properties__", [])
# Create Property TreeView objects from inspected property objects.
properties = []
for prop in props:
if prop[0] not in unused_properties and not should_drop(prop[1].fget):
getter = get_jit_def(
prop[1].fget, f"__{prop[0]}_getter", self_name=self_name
)
setter = (
get_jit_def(prop[1].fset, f"__{prop[0]}_setter", self_name=self_name)
if prop[1].fset
else None
)
properties.append(
Property(getter.range(), Ident(getter.range(), prop[0]), getter, setter)
)
return properties
def get_class_assigns(ctx, cls_ast):
assigns = []
def maybe_build_assign(builder, entry):
nonlocal assigns
try:
assigns.append(builder(ctx, entry))
except NotSupportedError:
pass
for entry in cls_ast.body:
if isinstance(entry, ast.Assign):
maybe_build_assign(StmtBuilder.build_Assign, entry)
elif isinstance(entry, ast.AnnAssign):
maybe_build_assign(StmtBuilder.build_AnnAssign, entry)
return assigns
def get_jit_class_def(cls, self_name):
"""Get definitions for each method within the current class independently.
Args:
cls: The class to get definition of.
self_name: The name of the class that the properties should belong to.
Returns:
torch._C._jit_tree_views.ClassDef: A representation of the class,
the methods in the class and their definition as a tree.
"""
# TODO: proper overriding analysis when implementing class inheritance
methods = inspect.getmembers(
cls,
predicate=lambda m: (inspect.ismethod(m) or inspect.isfunction(m))
and not is_static_fn(cls, m.__name__)
and m.__name__ in cls.__dict__
and not _is_drop_fn(m),
)
def is_classmethod(fn):
return inspect.ismethod(fn) and getattr(fn, "__self__", None) == cls
# Get and parse the source code for this class
sourcelines, file_lineno, filename = get_source_lines_and_file(
cls, torch._C.ErrorReport.call_stack()
)
source = "".join(sourcelines)
dedent_src = dedent(source)
py_ast = ast.parse(dedent_src)
class_ast = py_ast.body[0]
assert isinstance(class_ast, ast.ClassDef)
# Special case for dataclasses. In general we need access to the source code for
# an object in order to JIT compile it. But the dataclasses module dynamically synthesizes
# magic methods for classes, and we can't get the source code for these methods. As a
# workaround, we synthesize TorchScript-friendly implementations ourselves.
if dataclasses.is_dataclass(cls):
# Detect whether the user manually implemented any of the magic methods. If they did,
# we don't want to synthesize/override them.
overrides = {
method.name
for method in class_ast.body
if isinstance(method, ast.FunctionDef)
and method.name in DATACLASS_MAGIC_METHODS
}
for i, (name, _) in enumerate(methods):
# Is this a magic method we can synthesize?
synthesizer_fn = DATACLASS_MAGIC_METHODS.get(name)
if synthesizer_fn and name not in overrides:
parsed_def = synthesizer_fn(cls)
methods[i] = name, parsed_def
func = getattr(cls, name)
_jit_internal.loader.cache(func, parsed_def.source)
method_defs = [
get_jit_def(obj, name, self_name=self_name, is_classmethod=is_classmethod(obj))
for (name, obj) in methods
]
properties = get_class_properties(cls, self_name)
leading_whitespace_len = len(source.split("\n", 1)[0]) - len(
dedent_src.split("\n", 1)[0]
)
ctx = make_source_context(
source, filename, file_lineno, leading_whitespace_len, False
)
assigns = get_class_assigns(ctx, class_ast)
return build_class_def(ctx, class_ast, method_defs, properties, self_name, assigns)
def get_jit_def(fn, def_name, self_name=None, is_classmethod=False):
"""
Build a JIT AST (TreeView) from the given function.
Args:
fn: A function object to compile or a pre-parsed ParsedDef object
def_name: The name to give to the resulting AST object. This is not
always the same as `fn.__name__`, for example:
def _forward(self):
...
forward = _forward
In this case, the `__name__` attribute of the function object is "_forward",
but we want the result AST to have the name "forward".
self_name: If this function is a method, what the type name of `self` is.
"""
parsed_def = parse_def(fn) if not isinstance(fn, _ParsedDef) else fn
type_line = torch.jit.annotations.get_type_line(parsed_def.source)
fn_def = parsed_def.ast.body[0]
if is_classmethod:
arg_name = fn_def.args.args[0].arg # type:ignore[union-attr]
# Insert a statement that assigns the first argument to the class
assign_stmt = ast.parse(f"{arg_name} = {self_name}").body[0]
fn_def.body.insert(0, assign_stmt) # type:ignore[union-attr]
# Swap out the function signature and body if it is unused
if should_drop(fn):
unused_fn_def = ast.parse(
'def unused_fn(self: Any):\n\traise RuntimeError("Cannot call @unused methods")'
)
if len(unused_fn_def.body) != 1 or not isinstance(
unused_fn_def.body[0], ast.FunctionDef
):
raise RuntimeError(
f"Expected a single top-level function: {parsed_def.filename}:{parsed_def.file_lineno}"
)
unused_def = unused_fn_def.body[0]
fn_def.body = unused_def.body # type:ignore[union-attr]
# kwarg/vararg not supported by `build_def`
fn_def.args.kwarg = fn_def.args.vararg = None # type:ignore[union-attr]
for arg in fn_def.args.args + fn_def.args.kwonlyargs: # type:ignore[union-attr]
# Replace potentially unsupported type annotations by "Any"
arg.annotation = unused_def.args.args[0].annotation
if _is_drop_fn(fn):
# Dropping potentially unsupported return type annotation for jit._drop
fn_def.returns = None # type:ignore[union-attr]
fn_def.type_comment = None # type:ignore[union-attr]
# If MonkeyType is installed, get all the consolidated type traces
# for the arguments from type_trace_db
type_trace_db = torch.jit._script._get_type_trace_db()
pdt_arg_types = None
if monkeytype_trace and not isinstance(fn, _ParsedDef): # type: ignore[truthy-function]
qualname = get_qualified_name(fn)
pdt_arg_types = type_trace_db.get_args_types(qualname)
return build_def(
parsed_def.ctx,
fn_def,
type_line,
def_name,
self_name=self_name,
pdt_arg_types=pdt_arg_types,
)
# TODO: more robust handling of recognizing ignore context manager
def is_torch_jit_ignore_context_manager(stmt):
# checks if the statement is torch.jit.ignore context manager
if isinstance(stmt.items[0].context_expr, ast.Call):
# extract torch part
function = stmt.items[0].context_expr.func
if isinstance(function, ast.Attribute):
attr_name = function.attr
attr_value = function.value
if attr_name == "_IgnoreContextManager" and isinstance(
attr_value, ast.Attribute
):
# there should be at most two nested attributes (e.g torch.jit._IgnoreContextManager)
if attr_value.attr == "jit" and isinstance(attr_value.value, ast.Name):
if attr_value.value.id == "torch":
return True
return False
class Builder:
def __call__(self, ctx, node):
method = getattr(self, "build_" + node.__class__.__name__, None)
if method is None:
raise UnsupportedNodeError(ctx, node)
return method(ctx, node)
def build_class_def(ctx, py_def, methods, properties, self_name, assigns):
r = ctx.make_range(
py_def.lineno, py_def.col_offset, py_def.col_offset + len("class")
)
return ClassDef(
Ident(r, self_name), [Stmt(method) for method in methods], properties, assigns
)
def build_def(ctx, py_def, type_line, def_name, self_name=None, pdt_arg_types=None):
body = py_def.body
r = ctx.make_range(py_def.lineno, py_def.col_offset, py_def.col_offset + len("def"))
param_list = build_param_list(ctx, py_def.args, self_name, pdt_arg_types)
return_type = None
if getattr(py_def, "returns", None) is not None:
return_type = build_expr(ctx, py_def.returns)
decl = Decl(r, param_list, return_type)
is_method = self_name is not None
if type_line is not None:
type_comment_decl = torch._C.parse_type_comment(type_line)
decl = torch._C.merge_type_from_type_comment(decl, type_comment_decl, is_method)
return Def(Ident(r, def_name), decl, build_stmts(ctx, body))
_vararg_kwarg_err = (
"Compiled functions can't take variable number of arguments "
"or use keyword-only arguments with defaults"
)
def build_param_list(ctx, py_args, self_name, pdt_arg_types=None):
if py_args.kwarg is not None:
expr = py_args.kwarg
ctx_range = ctx.make_range(
expr.lineno, expr.col_offset - 1, expr.col_offset + len(expr.arg)
)
raise NotSupportedError(ctx_range, _vararg_kwarg_err)
if py_args.vararg is not None:
expr = py_args.vararg
ctx_range = ctx.make_range(
expr.lineno, expr.col_offset - 1, expr.col_offset + len(expr.arg)
)
raise NotSupportedError(ctx_range, _vararg_kwarg_err)
if len(py_args.kw_defaults) > 0:
# kw_defaults is a list of the values for the kwargs (which default to None),
# so they don't actually have line numbers.
for arg in py_args.kw_defaults:
if arg is not None:
ctx_range = build_expr(ctx, arg).range()
raise NotSupportedError(ctx_range, _vararg_kwarg_err)
# List of Tuple of args and type as inferred by profile directed typing
arg_and_types = [
(
arg,
pdt_arg_types[arg.arg]
if pdt_arg_types and bool(pdt_arg_types[arg.arg])
else None,
)
for arg in py_args.args
]
arg_and_types_kwonlyargs = [
(
arg,
pdt_arg_types[arg.arg]
if pdt_arg_types and bool(pdt_arg_types[arg.arg])
else None,
)
for arg in py_args.kwonlyargs
]
result = [
build_param(ctx, arg, self_name, kwarg_only=False, pdt_arg_type=arg_type)
for arg, arg_type in arg_and_types
]
result += [
build_param(ctx, arg, self_name, kwarg_only=True, pdt_arg_type=arg_type)
for arg, arg_type in arg_and_types_kwonlyargs
]
return result
def build_param(ctx, py_arg, self_name, kwarg_only, pdt_arg_type=None):
# NB: In Python3 py_arg is a pair of (str arg, expr? annotation)
name = py_arg.arg
r = ctx.make_range(py_arg.lineno, py_arg.col_offset, py_arg.col_offset + len(name))
if getattr(py_arg, "annotation", None) is not None:
annotation_expr = build_expr(ctx, py_arg.annotation)
elif pdt_arg_type:
annotation_expr = Var(Ident(r, pdt_arg_type))
elif self_name is not None and name == "self":
annotation_expr = Var(Ident(r, self_name))
else:
annotation_expr = EmptyTypeAnnotation(r)
return Param(annotation_expr, Ident(r, name), kwarg_only)
def build_ignore_context_manager(ctx, stmt):
InputType = namedtuple("InputType", ["name", "ann"])
OutputType = namedtuple("OutputType", ["name", "ann"])
def process_ins_outs(args):
# parse the context manager to figure out inputs and outputs
# with their annotated types
# TODO: add input, output validator
inputs = []
outputs = []
for arg in args:
var_name = arg.arg
var_ann = arg.value.value
var_decl_type, var_ann = var_ann.split(":")
if var_decl_type == "inp":
inputs.append(InputType(var_name, var_ann))
if var_decl_type == "out":
outputs.append(OutputType(var_name, var_ann))
return inputs, outputs
def create_unique_name_ext(ctx, stmt):
# extension will be based on the full path filename plus
# the line number of original context manager
fn = re.sub(r"[^a-zA-Z0-9_]", "_", ctx.filename)
return f"{fn}_{stmt.lineno}"
def build_return_ann_stmt(outputs):
return_type_ann = ""
return_statement_str = "return "
if len(outputs) == 0:
return_type_ann += " -> None"
if len(outputs) == 1:
return_type_ann = " -> " + outputs[0].ann
return_statement_str += outputs[0].name
if len(outputs) > 1:
return_type_ann = " -> tuple"
return_type_ann += "[" + ", ".join([var.ann for var in outputs]) + "]"
return_statement_str += ", ".join([var.name for var in outputs])
return return_type_ann, return_statement_str
def build_args(args):
return ", ".join([arg.name for arg in args])
inputs, outputs = process_ins_outs(stmt.items[0].context_expr.keywords)
# build the replacement function str with given inputs and outputs
ignore_function_name = "func_ignore_" + create_unique_name_ext(ctx, stmt)
ignore_function_str = "\ndef " + ignore_function_name
ignore_function_str += (
"(" + ", ".join([var.name + " :" + var.ann for var in inputs]) + ")"
)
return_ann, return_stmt = build_return_ann_stmt(outputs)
ignore_function_str += return_ann + ": pass"
# first create the functionDef object from just declaration
ignore_function = ast.parse(ignore_function_str).body[0]
# dump the body of context manager to dummy function
ignore_function.body = stmt.body # type: ignore[attr-defined]
# insert return statement to the function
return_stmt = ast.parse(return_stmt).body[0]
ignore_function.body.append(return_stmt) # type: ignore[attr-defined]
ignore_func_str = f"""\
# Backward compat: These used to be imported into the outer global scope so some
# code may still expect them.
from typing import List, Dict, Tuple
@torch.jit.ignore
{astunparse.unparse(ignore_function)}
"""
g = copy.copy(globals())
exec(ignore_func_str, g) # noqa: P204
# registers the custom function in the global context
globals()[ignore_function_name] = g[ignore_function_name]
# build the statements as:
# <out_1>, <out_2>, ... = torch.jit.frontend.<func>(<in_1>, <in_2>)
assign_str_lhs = build_args(outputs)
# this function will be registered in torch.jit.frontend module by default
assign_str_rhs = (
f"torch.jit.frontend.{ignore_function_name}(" + build_args(inputs) + ")"
)
if len(outputs) > 0:
assign_str = assign_str_lhs + " = " + assign_str_rhs
else:
assign_str = assign_str_rhs
assign_ast = ast.parse(assign_str).body[0]
return assign_ast
def get_default_args(fn):
"""
Get a dictionary of default arguments for a function.
Args:
fn: Callable - The function to inspect for default arguments.
Returns:
(Dict[str, Any]): mapping argument names to their default values if
:attr:`fn` is not None, else empty dictionary.
"""
if fn is None:
return {}
signature = inspect.signature(fn)
return {
k: v.default
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty
}
def get_default_args_for_class(cls):
"""
Get default arguments for all methods in a class (except for static methods).
Args:
cls: type - The class type to inspect for default arguments.
Returns:
A Dict[str, Dict[str, Any]] which maps each method name to a Dict[str, Any]
that maps each argument name to its default value.
"""
# Get methods (except static methods because those are compiled separately as
# if they were independent script functions).
methods = inspect.getmembers(
cls,
predicate=lambda m: (inspect.ismethod(m) or inspect.isfunction(m))
and not is_static_fn(cls, m.__name__)
and m.__name__ in cls.__dict__,
)
# Get method defaults. Property defaults do not need to be considered
# because setters cannot be invoked without a value.
defaults = {
method_name: get_default_args(method_impl)
for method_name, method_impl in methods
}
return defaults
class WithItemBuilder(Builder):
@staticmethod
def build_withitem(ctx, item):
lineno = item.context_expr.lineno
start = item.context_expr.col_offset
end = start + len(pretty_node_names[ast.With])
op_vars = item.optional_vars
r = ctx.make_range(lineno, start, end)
return WithItem(
r,
build_expr(ctx, item.context_expr),
build_expr(ctx, op_vars) if op_vars else None,
)
class StmtBuilder(Builder):
augassign_map = {
ast.Add: "+",
ast.Sub: "-",
ast.Mult: "*",
ast.Div: "/",
ast.Mod: "%",
ast.BitOr: "|",
ast.BitAnd: "&",
ast.BitXor: "^",
ast.LShift: "<<",
ast.RShift: ">>",
ast.Pow: "**",
}
@staticmethod
def build_Expr(ctx, stmt):
value = stmt.value
if value.__class__.__name__ == "Str":
# If a statement is a string literal expression,
# then it is a docstring. Just ignore it.
return None
else:
return ExprStmt(build_expr(ctx, value))
@staticmethod
def build_Assign(ctx, stmt):
rhs = build_expr(ctx, stmt.value)
lhs = [build_expr(ctx, x) for x in stmt.targets]
return Assign(lhs, rhs)
@staticmethod
def build_AnnAssign(ctx, stmt):
if stmt.value is None:
raise UnsupportedNodeError(ctx, stmt, reason="without assigned value")
# Disallow type annotations on instance attributes outside of __init__
if (
type(stmt.target) == ast.Attribute
and stmt.target.value.id == "self" # type: ignore[attr-defined]
and ctx.funcname != "__init__"
):
start = stmt.col_offset
end = start + len(f"self.{stmt.target.attr}")
if hasattr(stmt.annotation, "id"):
end += len(f": {stmt.annotation.id}")
sr = ctx.make_range(stmt.lineno, start, end)
raise ValueError(
"Type annotations on instance attributes must be declared in "
f"__init__, not '{ctx.funcname}': {sr}"
)
rhs = build_expr(ctx, stmt.value)
lhs = build_expr(ctx, stmt.target)
the_type = build_expr(ctx, stmt.annotation)
return Assign([lhs], rhs, the_type)
@staticmethod
def build_Delete(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("del"))
return Delete(r, [build_expr(ctx, target) for target in stmt.targets])
@staticmethod
def build_Return(ctx, stmt):
r = ctx.make_range(
stmt.lineno, stmt.col_offset, stmt.col_offset + len("return")
)
return Return(r, None if stmt.value is None else build_expr(ctx, stmt.value))
@staticmethod
def build_Raise(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("raise"))
expr = build_expr(ctx, stmt.exc)
return Raise(r, expr)
@staticmethod
def build_Assert(ctx, stmt):
r = ctx.make_range(
stmt.lineno, stmt.col_offset, stmt.col_offset + len("assert")
)
test = build_expr(ctx, stmt.test)
msg = build_expr(ctx, stmt.msg) if stmt.msg is not None else None
return Assert(r, test, msg)
@staticmethod
def build_AugAssign(ctx, stmt):
lhs = build_expr(ctx, stmt.target)
rhs = build_expr(ctx, stmt.value)
op = type(stmt.op)
if op in StmtBuilder.augassign_map:
op_token = StmtBuilder.augassign_map[op]
else:
raise NotSupportedError(
find_before(ctx, rhs.range().start, "=", offsets=(-1, 0)),
"unsupported kind of augmented assignment: " + op.__name__,
)
return AugAssign(lhs, op_token, rhs)
@staticmethod
def build_While(ctx, stmt):
if stmt.orelse:
# TODO: try to recover the location of else:? Python doesn't give us useful
# annotations in this case
raise NotSupportedError(
None, "else branches of while loops aren't supported"
)
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("while"))
return While(r, build_expr(ctx, stmt.test), build_stmts(ctx, stmt.body))
@staticmethod
def build_For(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("for"))
if stmt.orelse:
raise NotSupportedError(r, "else branches of for loops aren't supported")
return For(
r,
[build_expr(ctx, stmt.target)],
[build_expr(ctx, stmt.iter)],
build_stmts(ctx, stmt.body),
)
@staticmethod
def build_If(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("if"))
return If(
r,
build_expr(ctx, stmt.test),
build_stmts(ctx, stmt.body),
build_stmts(ctx, stmt.orelse),
)
@staticmethod
def build_Print(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("print"))
if stmt.dest:
raise NotSupportedError(
r, "print statements with non-default destinations aren't supported"
)
args = [build_expr(ctx, val) for val in stmt.values]
return ExprStmt(Apply(Var(Ident(r, "print")), args, []))
@staticmethod
def build_Pass(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("pass"))
return Pass(r)
@staticmethod
def build_Break(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("break"))
return Break(r)
@staticmethod
def build_Continue(ctx, stmt):
r = ctx.make_range(
stmt.lineno, stmt.col_offset, stmt.col_offset + len("continue")
)
return Continue(r)
@staticmethod
def build_With(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("with"))
# Handle ignore context manager
if is_torch_jit_ignore_context_manager(stmt):
if not _IS_ASTUNPARSE_INSTALLED:
raise RuntimeError(
"torch.jit._IgnoreContextManager requires installing Python library `astunparse`, \
please install it in your Python environment"
)
assign_ast = build_ignore_context_manager(ctx, stmt)
return build_stmt(ctx, assign_ast)
return With(r, build_withitems(ctx, stmt.items), build_stmts(ctx, stmt.body))
class ExprBuilder(Builder):
binop_map = {
ast.Add: "+",
ast.Sub: "-",
ast.Mult: "*",
ast.Div: "/",
ast.Pow: "**",
ast.Mod: "%",
ast.FloorDiv: "//",
ast.BitAnd: "&",
ast.BitXor: "^",
ast.BitOr: "|",
ast.LShift: "<<",
ast.RShift: ">>",
}
binop_map[ast.MatMult] = "@"
unop_map = {
ast.Not: "not",
ast.USub: "-",
ast.Invert: "~",
}
boolop_map = {
ast.And: "and",
ast.Or: "or",
}
cmpop_map = {
ast.Eq: "==",
ast.NotEq: "!=",
ast.LtE: "<=",
ast.Lt: "<",
ast.GtE: ">=",
ast.Gt: ">",
ast.Is: "is",
ast.IsNot: "is not",
ast.In: "in",
ast.NotIn: "not in",
}
@staticmethod
def build_Attribute(ctx, expr):
base = build_expr(ctx, expr.value)
# expr.attr is just a string, so it's not annotated in any way, so we have
# to build the range manually
source = ctx.source.encode("utf-8")
def get_char(index):
return chr(source[index])
start_pos = base.range().end + 1
while get_char(start_pos) in string.whitespace: # Skip whitespace
start_pos += 1
end_pos = start_pos + len(expr.attr)
name_range = ctx.make_raw_range(start_pos, end_pos)
return Select(base, Ident(name_range, expr.attr))
@staticmethod
def build_Call(ctx, expr):
func = build_expr(ctx, expr.func)
args = [build_expr(ctx, py_arg) for py_arg in expr.args]
if hasattr(expr, "starargs") and expr.starargs:
stararg_expr = build_expr(ctx, expr.starargs)
args += [Starred(stararg_expr.range(), stararg_expr)]
kwargs = []
for kw in expr.keywords:
kw_expr = build_expr(ctx, kw.value)
# XXX: we could do a better job at figuring out the range for the name here
if not kw.arg:
raise NotSupportedError(
kw_expr.range(), "keyword-arg expansion is not supported"
)
kwargs.append(Attribute(Ident(kw_expr.range(), kw.arg), kw_expr))
return Apply(func, args, kwargs)
@staticmethod
def build_Ellipsis(ctx, expr):
r = ctx.make_range(
expr.lineno, expr.col_offset, expr.col_offset + 3
) # len("...") == 3
return Dots(r)
@staticmethod
def build_Name(ctx, expr):
r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + len(expr.id))
if expr.id.startswith(_reserved_prefix):
raise NotSupportedError(
r,
"names of variables used in JIT-ed functions "
"can't start with " + _reserved_prefix,
)
if expr.id == "True":
return TrueLiteral(r)
elif expr.id == "False":
return FalseLiteral(r)
elif expr.id == "None":
return NoneLiteral(r)
elif expr.id == "Ellipsis":
return Dots(r)
return Var(Ident(r, expr.id))
@staticmethod
def build_NameConstant(ctx, expr):
r = ctx.make_range(
expr.lineno, expr.col_offset, expr.col_offset + len(str(expr.value))
)
if expr.value is True:
return TrueLiteral(r)
elif expr.value is False:
return FalseLiteral(r)
elif expr.value is None:
return NoneLiteral(r)
elif expr.value == Ellipsis:
return Dots(r)
else:
raise ValueError("Name constant value unsupported: " + str(expr.value))
@staticmethod
def build_BinOp(ctx, expr):
lhs = build_expr(ctx, expr.left)
rhs = build_expr(ctx, expr.right)
op = type(expr.op)
if op == ast.Div and not ctx.uses_true_division:
err_range = ctx.make_raw_range(lhs.range().end, rhs.range().start)
raise FrontendError(
err_range,
"Division of ints in TorchScript uses Python 3 true "
"division semantics. Please put `from __future__ "
"import division` at the top of your file",
)
op_token = ExprBuilder.binop_map.get(op)
if op_token is None:
err_range = ctx.make_raw_range(lhs.range().end, rhs.range().start)
raise NotSupportedError(
err_range, "unsupported binary operator: " + op.__name__
)
return BinOp(op_token, lhs, rhs)
@staticmethod
def build_UnaryOp(ctx, expr):
sub_expr = build_expr(ctx, expr.operand)
op = type(expr.op)
op_token = ExprBuilder.unop_map.get(op)
if op_token is None:
raise NotSupportedError(
expr.range(), "unsupported unary operator: " + op.__name__
)
r = ctx.make_range(
expr.lineno, expr.col_offset, expr.col_offset + len(op_token)
)
return UnaryOp(r, op_token, sub_expr)
@staticmethod
def build_BoolOp(ctx, expr):
if len(expr.values) < 2:
raise AssertionError(
"expected at least 2 values in BoolOp, but got " + str(len(expr.values))
)
sub_exprs = [build_expr(ctx, sub_expr) for sub_expr in expr.values]
op = type(expr.op)
op_token = ExprBuilder.boolop_map.get(op)
if op_token is None:
err_range = ctx.make_raw_range(
sub_exprs[0].range().end, sub_exprs[1].range().start
)
raise NotSupportedError(
err_range, "unsupported boolean operator: " + op.__name__
)
lhs = sub_exprs[0]
for rhs in sub_exprs[1:]:
lhs = BinOp(op_token, lhs, rhs)
return lhs
@staticmethod
def build_IfExp(ctx, expr):
return TernaryIf(
build_expr(ctx, expr.test),
build_expr(ctx, expr.body),
build_expr(ctx, expr.orelse),
)
@staticmethod
def build_Compare(ctx, expr):
operands = [build_expr(ctx, e) for e in [expr.left] + list(expr.comparators)]
result = None
for lhs, op_, rhs in zip(operands, expr.ops, operands[1:]):
op = type(op_)
op_token = ExprBuilder.cmpop_map.get(op)
r = ctx.make_raw_range(lhs.range().end, rhs.range().start)
if op_token is None:
raise NotSupportedError(
r, "unsupported comparison operator: " + op.__name__
)
if op == ast.NotIn:
# NB: `not in` is just `not( in )`, so we don't introduce new tree view
# but just make it a nested call in our tree view structure
in_expr = BinOp("in", lhs, rhs)
cmp_expr = UnaryOp(r, "not", in_expr)
else:
cmp_expr = BinOp(op_token, lhs, rhs)
if result is None:
result = cmp_expr
else:
result = BinOp("and", result, cmp_expr)
return result
@staticmethod
def build_Subscript(ctx, expr):
def build_SliceExpr(ctx, base, slice_expr):
lower = (
build_expr(ctx, slice_expr.lower)
if slice_expr.lower is not None
else None
)
upper = (
build_expr(ctx, slice_expr.upper)
if slice_expr.upper is not None
else None
)
step = (
build_expr(ctx, slice_expr.step)
if slice_expr.step is not None
else None
)
return SliceExpr(base.range(), lower, upper, step)
def build_Index(ctx, base, index_expr):
if isinstance(index_expr.value, ast.Tuple):
raise NotSupportedError(
base.range(),
"slicing multiple dimensions with tuples not supported yet",
)
return build_expr(ctx, index_expr.value)
def build_ExtSlice(ctx, base, extslice):
sub_exprs = []
for expr in extslice.dims:
sub_type = type(expr)
if sub_type is ast.Index:
sub_exprs.append(build_Index(ctx, base, expr))
elif sub_type is ast.Slice:
sub_exprs.append(build_SliceExpr(ctx, base, expr))
elif sub_type is ast.Constant and expr.value is Ellipsis:
sub_exprs.append(Dots(base.range()))
else:
raise NotSupportedError(
base.range(),
f"slicing multiple dimensions with {sub_type} not supported",
)
return sub_exprs
base = build_expr(ctx, expr.value)
sub_type = type(expr.slice)
if sub_type is ast.Index:
if isinstance(expr.slice.value, ast.Tuple):
# N-dimensional indexing using Tuple: x[(i, j, k)] is equivalent to x[i, j, k]
# XXX: Indexing using a list is **different**! It triggers advanced indexing.
indices = [
build_expr(ctx, index_expr) for index_expr in expr.slice.value.elts
]
if not indices:
# `col_offset` is an int, but `end_col_offset` is
# `Optional[int]`. The magic number is here to make
# sure we can parse `()` on any machine
r = ctx.make_range(
expr.lineno,
expr.slice.value.col_offset,
expr.slice.value.col_offset + 2,
)
tup = TupleLiteral(r, [])
indices.append(tup)
return Subscript(base, indices)
else:
return Subscript(base, [build_expr(ctx, expr.slice.value)])
elif sub_type is ast.Slice:
return Subscript(base, [build_SliceExpr(ctx, base, expr.slice)])
elif sub_type is ast.ExtSlice:
return Subscript(base, build_ExtSlice(ctx, base, expr.slice))
else: # In Python3.9 array indicies are not wrapped in ast.Index
if sub_type is ast.Tuple:
# N-dimensional indexing using Tuple: x[(i, j, k)] is equivalent to x[i, j, k]
indices = []
for index_expr in expr.slice.elts:
if isinstance(index_expr, ast.Slice):
indices.append(build_SliceExpr(ctx, base, index_expr))
else:
indices.append(build_expr(ctx, index_expr))
# Special-case logic for `typing.Tuple[()]`
if not indices:
# See note above r.e. magic number
r = ctx.make_range(
expr.lineno, expr.slice.col_offset, expr.slice.col_offset + 2
)
tup = TupleLiteral(r, [])
indices.append(tup)
return Subscript(base, indices)
return Subscript(base, [build_expr(ctx, expr.slice)])
@staticmethod
def build_List(ctx, expr):
return ListLiteral(
ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1),
[build_expr(ctx, e) for e in expr.elts],
)
@staticmethod
def build_Tuple(ctx, expr):
return TupleLiteral(
ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1),
[build_expr(ctx, e) for e in expr.elts],
)
@staticmethod
def build_Dict(ctx, expr):
range = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1)
if expr.keys and not expr.keys[0]:
raise NotSupportedError(
range, "Dict expansion (e.g. `{**dict}`) is not supported"
)
return DictLiteral(
range,
[build_expr(ctx, e) for e in expr.keys],
[build_expr(ctx, e) for e in expr.values],
)
@staticmethod
def build_Num(ctx, expr):
value = str(expr.value)
r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + len(value))
return Const(r, value)
@staticmethod
def build_Constant(ctx, expr):
value = expr.value
if value is None or isinstance(value, bool):
# NB: this check has to happen before the int check because bool is
# a subclass of int
return ExprBuilder.build_NameConstant(ctx, expr)
if isinstance(value, (int, float, complex)):
return ExprBuilder.build_Num(ctx, expr)
elif isinstance(value, str):
return ExprBuilder.build_Str(ctx, expr)
elif isinstance(value, type(Ellipsis)):
return ExprBuilder.build_Ellipsis(ctx, expr)
else:
error_range = ctx.make_range(
expr.lineno, expr.col_offset, expr.col_offset + len(str(value))
)
raise FrontendError(error_range, "Unknown Constant expression type")
@staticmethod
def build_Str(ctx, expr):
value = str(expr.value)
r = ctx.make_range(
expr.lineno, expr.col_offset, expr.col_offset + len(value) + 1
)
return StringLiteral(r, value)
@staticmethod
def build_JoinedStr(ctx, expr):
s = ""
args = []
for value in expr.values:
r = ctx.make_range(value.lineno, value.col_offset, value.col_offset + 1)
if isinstance(value, ast.FormattedValue):
if value.conversion != -1:
raise NotSupportedError(r, "Don't support conversion in JoinedStr")
if value.format_spec is not None:
raise NotSupportedError(r, "Don't support formatting in JoinedStr")
s += "{}"
args.append(build_expr(ctx, value.value))
elif isinstance(value, ast.Constant):
s += value.value
else:
raise NotSupportedError(r, "Unsupported value in JoinedStr")
r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1)
return Apply(Select(StringLiteral(r, s), Ident(r, "format")), args, [])
@staticmethod
def build_ListComp(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset)
if len(stmt.generators) != 1:
raise NotSupportedError(r, "Only a single generator is currently supported")
if len(stmt.generators[0].ifs) != 0:
raise NotSupportedError(r, "Comprehension ifs are not supported yet")
elt_expr = build_expr(ctx, stmt.elt)
target_expr = build_expr(ctx, stmt.generators[0].target)
iter_expr = build_expr(ctx, stmt.generators[0].iter)
return ListComp(r, elt_expr, target_expr, iter_expr)
@staticmethod
def build_GeneratorExp(ctx, stmt):
# Convert Generator expression to ListComp
return ExprBuilder.build_ListComp(ctx, stmt)
@staticmethod
def build_DictComp(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset)
if len(stmt.generators) != 1:
raise NotSupportedError(r, "Only a single generator is currently supported")
if len(stmt.generators[0].ifs) != 0:
raise NotSupportedError(r, "Comprehension ifs are not supported yet")
key_expr = build_expr(ctx, stmt.key)
value_expr = build_expr(ctx, stmt.value)
target_expr = build_expr(ctx, stmt.generators[0].target)
iter_expr = build_expr(ctx, stmt.generators[0].iter)
return DictComp(r, key_expr, value_expr, target_expr, iter_expr)
@staticmethod
def build_Starred(ctx, expr):
r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1)
return Starred(r, build_expr(ctx, expr.value))
build_expr = ExprBuilder()
build_stmt = StmtBuilder()
build_withitem = WithItemBuilder()
def find_before(ctx, pos, substr, offsets=(0, 0)):
new_pos = ctx.source[:pos].rindex(substr)
return ctx.make_raw_range(new_pos + offsets[0], new_pos + len(substr) + offsets[1])
```
|
======================================================================================================================
SOURCE CODE FILE: generate_bytecode.py
LINES: 1
SIZE: 1.05 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\generate_bytecode.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from torch._C import _compile_graph_to_code_table, _generate_upgraders_graph
def format_bytecode(table):
# given a nested tuple, convert it to nested list
def listify(content):
if not isinstance(content, tuple):
return content
return [listify(i) for i in content]
formatted_table = {}
for entry in table:
identifier = entry[0]
content = entry[1]
content = listify(content)
formatted_table[identifier] = content
return formatted_table
def generate_upgraders_bytecode() -> list:
yaml_content = []
upgraders_graph_map = _generate_upgraders_graph()
for upgrader_name, upgrader_graph in upgraders_graph_map.items():
bytecode_table = _compile_graph_to_code_table(upgrader_name, upgrader_graph)
entry = {upgrader_name: format_bytecode(bytecode_table)}
yaml_content.append(entry)
return yaml_content
if __name__ == "__main__":
raise RuntimeError("This file is not meant to be run directly")
```
|
====================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 8.54 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\mobile\__init__.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import os
import torch
from torch.jit._serialization import validate_map_location
def _load_for_lite_interpreter(f, map_location=None):
r"""
Load a :class:`LiteScriptModule` saved with :func:`torch.jit._save_for_lite_interpreter`.
Args:
f: a file-like object (has to implement read, readline, tell, and seek),
or a string containing a file name
map_location: a string or torch.device used to dynamically remap
storages to an alternative set of devices.
Returns:
A :class:`LiteScriptModule` object.
Example:
.. testcode::
import torch
import io
# Load LiteScriptModule from saved file path
torch.jit._load_for_lite_interpreter('lite_script_module.pt')
# Load LiteScriptModule from io.BytesIO object
with open('lite_script_module.pt', 'rb') as f:
buffer = io.BytesIO(f.read())
# Load all tensors to the original device
torch.jit.mobile._load_for_lite_interpreter(buffer)
"""
if isinstance(f, (str, os.PathLike)):
if not os.path.exists(f):
raise ValueError(f"The provided filename {f} does not exist")
if os.path.isdir(f):
raise ValueError(f"The provided filename {f} is a directory")
map_location = validate_map_location(map_location)
if isinstance(f, (str, os.PathLike)):
cpp_module = torch._C._load_for_lite_interpreter(os.fspath(f), map_location)
else:
cpp_module = torch._C._load_for_lite_interpreter_from_buffer(
f.read(), map_location
)
return LiteScriptModule(cpp_module)
class LiteScriptModule:
def __init__(self, cpp_module):
self._c = cpp_module
super().__init__()
def __call__(self, *input):
return self._c.forward(input)
def find_method(self, method_name):
return self._c.find_method(method_name)
def forward(self, *input):
return self._c.forward(input)
def run_method(self, method_name, *input):
return self._c.run_method(method_name, input)
def _export_operator_list(module: LiteScriptModule):
r"""Return a set of root operator names (with overload name) that are used by any method in this mobile module."""
return torch._C._export_operator_list(module._c)
def _get_model_bytecode_version(f_input) -> int:
r"""Take a file-like object to return an integer.
Args:
f_input: a file-like object (has to implement read, readline, tell, and seek),
or a string containing a file name
Returns:
version: An integer. If the integer is -1, the version is invalid. A warning
will show in the log.
Example:
.. testcode::
from torch.jit.mobile import _get_model_bytecode_version
# Get bytecode version from a saved file path
version = _get_model_bytecode_version("path/to/model.ptl")
"""
if isinstance(f_input, (str, os.PathLike)):
if not os.path.exists(f_input):
raise ValueError(f"The provided filename {f_input} does not exist")
if os.path.isdir(f_input):
raise ValueError(f"The provided filename {f_input} is a directory")
if isinstance(f_input, (str, os.PathLike)):
return torch._C._get_model_bytecode_version(os.fspath(f_input))
else:
return torch._C._get_model_bytecode_version_from_buffer(f_input.read())
def _get_mobile_model_contained_types(f_input) -> int:
r"""Take a file-like object and return a set of string, like ("int", "Optional").
Args:
f_input: a file-like object (has to implement read, readline, tell, and seek),
or a string containing a file name
Returns:
type_list: A set of string, like ("int", "Optional"). These are types used in bytecode.
Example:
.. testcode::
from torch.jit.mobile import _get_mobile_model_contained_types
# Get type list from a saved file path
type_list = _get_mobile_model_contained_types("path/to/model.ptl")
"""
if isinstance(f_input, (str, os.PathLike)):
if not os.path.exists(f_input):
raise ValueError(f"The provided filename {f_input} does not exist")
if os.path.isdir(f_input):
raise ValueError(f"The provided filename {f_input} is a directory")
if isinstance(f_input, (str, os.PathLike)):
return torch._C._get_mobile_model_contained_types(os.fspath(f_input))
else:
return torch._C._get_mobile_model_contained_types_from_buffer(f_input.read())
def _backport_for_mobile(f_input, f_output, to_version):
r"""Take a input string containing a file name (file-like object) and a new destination to return a boolean.
Args:
f_input: a file-like object (has to implement read, readline, tell, and seek),
or a string containing a file name
f_output: path to new model destination
to_version: the expected output model bytecode version
Returns:
success: A boolean. If backport success, return true, otherwise false
"""
if isinstance(f_input, (str, os.PathLike)):
if not os.path.exists(f_input):
raise ValueError(f"The provided filename {f_input} does not exist")
if os.path.isdir(f_input):
raise ValueError(f"The provided filename {f_input} is a directory")
if (isinstance(f_input, (str, os.PathLike))) and (
isinstance(f_output, (str, os.PathLike))
):
return torch._C._backport_for_mobile(
os.fspath(f_input), os.fspath(f_output), to_version
)
else:
return torch._C._backport_for_mobile_from_buffer(
f_input.read(), str(f_output), to_version
)
def _backport_for_mobile_to_buffer(f_input, to_version):
r"""Take a string containing a file name (file-like object).
Args:
f_input: a file-like object (has to implement read, readline, tell, and seek),
or a string containing a file name
"""
if isinstance(f_input, (str, os.PathLike)):
if not os.path.exists(f_input):
raise ValueError(f"The provided filename {f_input} does not exist")
if os.path.isdir(f_input):
raise ValueError(f"The provided filename {f_input} is a directory")
if isinstance(f_input, (str, os.PathLike)):
return torch._C._backport_for_mobile_to_buffer(os.fspath(f_input), to_version)
else:
return torch._C._backport_for_mobile_from_buffer_to_buffer(
f_input.read(), to_version
)
def _get_model_ops_and_info(f_input):
r"""Retrieve the root (top level) operators of a model and their corresponding compatibility info.
These root operators can call other operators within them (traced ops), and
a root op can call many different traced ops depending on internal code paths in the root op.
These traced ops are not returned by this function. Those operators are abstracted into the
runtime as an implementation detail (and the traced ops themselves can also call other operators)
making retrieving them difficult and their value from this api negligible since they will differ
between which runtime version the model is run on. Because of this, there is a false positive this
api can't prevent in a compatibility usecase. All the root ops of a model are present in a
target runtime, but not all the traced ops are which prevents a model from being able to run.
Args:
f_input: a file-like object (has to implement read, readline, tell, and seek),
or a string containing a file name
Returns:
Operators and info: A Dictionary mapping strings (the qualified names of the root operators)
of the model to their OperatorInfo structs.
Example:
.. testcode::
from torch.jit.mobile import _get_model_ops_and_info
# Get bytecode version from a saved file path
ops_and_info = _get_model_ops_and_info("path/to/model.ptl")
"""
if isinstance(f_input, (str, os.PathLike)):
if not os.path.exists(f_input):
raise ValueError(f"The provided filename {f_input} does not exist")
if os.path.isdir(f_input):
raise ValueError(f"The provided filename {f_input} is a directory")
if isinstance(f_input, (str, os.PathLike)):
return torch._C._get_model_ops_and_info(os.fspath(f_input))
else:
return torch._C._get_model_ops_and_info(f_input.read())
```
|
==============================================================================================================
SOURCE CODE FILE: quantized.py
LINES: 1
SIZE: 3.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\quantized.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import torch
class QuantizedLinear(torch.jit.ScriptModule):
def __init__(self, other):
raise RuntimeError(
"torch.jit.QuantizedLinear is no longer supported. Please use "
"torch.ao.nn.quantized.dynamic.Linear instead."
)
# FP16 weights
class QuantizedLinearFP16(torch.jit.ScriptModule):
def __init__(self, other):
super().__init__()
raise RuntimeError(
"torch.jit.QuantizedLinearFP16 is no longer supported. "
"Please use the torch.ao.nn.quantized.dynamic.Linear instead."
)
# Quantized RNN cell implementations
class QuantizedRNNCellBase(torch.jit.ScriptModule):
def __init__(self, other):
raise RuntimeError(
"torch.jit.QuantizedRNNCellBase is no longer supported. "
"Please use the torch.ao.nn.quantized.dynamic.RNNCell instead."
)
class QuantizedRNNCell(QuantizedRNNCellBase):
def __init__(self, other):
raise RuntimeError(
"torch.jit.QuantizedRNNCell is no longer supported. "
"Please use the torch.ao.nn.quantized.dynamic.RNNCell instead."
)
class QuantizedLSTMCell(QuantizedRNNCellBase):
def __init__(self, other):
super().__init__(other)
raise RuntimeError(
"torch.jit.QuantizedLSTMCell is no longer supported. "
"Please use the torch.ao.nn.quantized.dynamic.LSTMCell instead."
)
class QuantizedGRUCell(QuantizedRNNCellBase):
def __init__(self, other):
super().__init__(other)
raise RuntimeError(
"torch.jit.QuantizedGRUCell is no longer supported. "
"Please use the torch.ao.nn.quantized.dynamic.GRUCell instead."
)
class QuantizedRNNBase(torch.jit.ScriptModule):
def __init__(self, other, dtype=torch.int8):
raise RuntimeError(
"torch.jit.QuantizedRNNBase is no longer supported. "
"Please use the torch.ao.nn.quantized.dynamic instead."
)
class QuantizedLSTM(QuantizedRNNBase):
def __init__(self, other, dtype):
raise RuntimeError(
"torch.jit.QuantizedLSTM is no longer supported. "
"Please use the torch.ao.nn.quantized.dynamic.LSTM instead."
)
class QuantizedGRU(QuantizedRNNBase):
def __init__(self, *args, **kwargs):
raise RuntimeError(
"torch.jit.QuantizedGRU is no longer supported. "
"Please use the torch.ao.nn.quantized.dynamic.GRU instead."
)
def quantize_rnn_cell_modules(module):
raise RuntimeError(
"quantize_rnn_cell_modules function is no longer supported. "
"Please use torch.ao.quantization.quantize_dynamic API instead."
)
def quantize_linear_modules(module, dtype=torch.int8):
raise RuntimeError(
"quantize_linear_modules function is no longer supported. "
"Please use torch.ao.quantization.quantize_dynamic API instead."
)
def quantize_rnn_modules(module, dtype=torch.int8):
raise RuntimeError(
"quantize_rnn_modules function is no longer supported. "
"Please use torch.ao.quantization.quantize_dynamic API instead."
)
```
|
==================================================================================================================
SOURCE CODE FILE: supported_ops.py
LINES: 20
SIZE: 10.36 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\supported_ops.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import inspect
import textwrap
import torch.jit
from torch.jit._builtins import _find_builtin
# this file is for generating documentation using sphinx autodoc
# > help(torch.jit.supported_ops) will also give a nice listed of the
# supported ops programmatically
def _hidden(name):
return name.startswith("_") and not name.startswith("__")
def _emit_type(type):
return str(type)
def _emit_arg(indent, i, arg):
v = f"{arg.name} : {_emit_type(arg.type)}"
default = arg.default_value
if default is not None:
v = f"{v}={str(default)}"
if i > 0:
v = f"\n{' ' * indent}{v}"
return v
def _emit_args(indent, arguments):
return ",".join(_emit_arg(indent, i, arg) for i, arg in enumerate(arguments))
def _emit_ret(ret):
return _emit_type(ret.type)
def _emit_rets(returns):
if len(returns) == 1:
return _emit_ret(returns[0])
return f"Tuple[{', '.join(_emit_ret(r) for r in returns)}]"
def _emit_schema(mod, name, schema, arg_start=0, padding=4):
if mod is None:
qualified_name = name
else:
qualified_name = f"{mod}.{name}"
schema_str = (
f"{qualified_name}"
f"({_emit_args(len(qualified_name) + 1 + padding, schema.arguments[arg_start:])}) "
f"-> {_emit_rets(schema.returns)}"
)
return schema_str
def _get_tensor_ops():
def is_tensor_method(schema):
if len(schema.arguments) == 0:
return False
self = schema.arguments[0]
if self.name != "self":
return False
if not self.type.isSubtypeOf(torch._C.TensorType.get()):
return False
return True
methods = []
# discover methods
for elem in dir(torch.Tensor):
if not _hidden(elem):
schemas = torch._C._jit_get_schemas_for_operator("aten::" + elem)
for schema in schemas:
if is_tensor_method(schema):
methods.append(_emit_schema("Tensor", elem, schema, arg_start=1))
return "Supported Tensor Methods", methods
def _get_nn_functional_ops():
functions = []
# Iterate over torch.nn.functional
mod = torch.nn.functional
name = mod.__name__
for elem in dir(torch.nn.functional):
attr = getattr(mod, elem)
if not inspect.isfunction(attr) or _hidden(elem[0]):
# Ignore non-functions and internal methods
continue
attr_module = inspect.getmodule(attr)
if not attr_module:
raise RuntimeError(f"Module for {attr} not found")
if "torch.nn.functional" not in attr_module.__name__:
# Ignore functions from outside torch.nn.functional
continue
try:
# compile fn, get schema
scripted = torch.jit.script(attr)
scripted_schema = scripted.schema
functions.append(_emit_schema(name, elem, scripted_schema))
except: # noqa: B001,E722
# Skip interpolate / boolean dispatched things
pass
# Iterate over modules that we know contain a lot of builtins
for mod in torch.jit._builtins._modules_containing_builtins:
name = mod.__name__
for elem in dir(mod):
builtin = _find_builtin(getattr(mod, elem))
if builtin is not None:
schemas = torch._C._jit_get_schemas_for_operator(builtin)
for schema in schemas:
# remove _tan but not __and__
if not _hidden(elem):
functions.append(_emit_schema(name, elem, schema))
return "Supported PyTorch Functions", functions
def _get_builtins_helper():
builtins = []
for fn, _builtin_name in torch.jit._builtins._builtin_ops:
mod = inspect.getmodule(fn)
if not hasattr(fn, "__name__"):
# typing classes
continue
if not mod:
continue
if _hidden(fn.__name__) or _hidden(fn.__qualname__) or _hidden(mod.__name__):
# skip internal-only methods
continue
if "torch._C" in mod.__name__:
continue
builtins.append((fn, _builtin_name))
return builtins
def _is_math_fn(fn):
mod = inspect.getmodule(fn)
if not mod:
raise RuntimeError(f"Module for {fn} not found")
return mod.__name__ == "math"
def _get_torchscript_builtins():
functions = []
builtins = filter(lambda fn: not _is_math_fn(fn[0]), _get_builtins_helper())
builtins_list = list(builtins)
# Iterate over the specially added builtins
for fn, _builtin_name in builtins_list:
mod = inspect.getmodule(fn)
if not mod:
raise RuntimeError(f"Module for {fn} not found")
builtin = _find_builtin(fn)
if builtin is not None:
schemas = torch._C._jit_get_schemas_for_operator(builtin)
for schema in schemas:
functions.append(_emit_schema(mod.__name__, fn.__name__, schema))
return "TorchScript Builtin Functions", functions
def _get_math_builtins():
functions = []
builtins = filter(lambda fn: _is_math_fn(fn[0]), _get_builtins_helper())
builtins_list = list(builtins)
# Iterate over the specially added builtins
for fn, _builtin_name in builtins_list:
mod = inspect.getmodule(fn)
if not mod:
raise RuntimeError(f"Module for {fn} not found")
builtin = _find_builtin(fn)
if builtin is not None:
schemas = torch._C._jit_get_schemas_for_operator(builtin)
for schema in schemas:
schema_str = _emit_schema(mod.__name__, fn.__name__, schema)
if "Tensor" in schema_str:
# Skip Tensor ops that have the same name as math functions
# (they will show up in the tensor methods section)
continue
functions.append(schema)
return "``math`` Module", functions
def _get_global_builtins():
# Taken from the 'globals' map in torch/csrc/jit/frontend/ir_emitter.cpp
supported_builtins = [
"print",
"tuple",
"float",
"complex",
"int",
"bool",
"str",
"getattr",
"hasattr",
"isinstance",
"len",
"hex",
"oct",
"round",
"hash",
"min",
"max",
"abs",
"all",
"divmod",
"list",
"ord",
"chr",
"bin",
"range",
"zip",
"enumerate",
"sorted",
]
op_renames = {
"bool": "aten::Bool",
"int": "aten::Int",
"float": "aten::Float",
"complex": "aten::Complex",
"abs": "prim::abs",
"max": "prim::max",
"min": "prim::min",
"range": "fake::does_not_exist",
}
schemaless_op_explanations = {
"print": "Print any value",
"tuple": "Lists cannot be converted to tuples with this method since their size is not statically known",
"getattr": "Attribute name must be a literal string",
"hasattr": "Attribute name must be a literal string",
"isinstance": "Result is static",
"zip": "Arguments must be iterable. See :ref:`Iterables <jit_iterables>` for details.",
"enumerate": "Arguments must be iterable. See :ref:`Iterables <jit_iterables>` for details.",
"range": "Can only be used as an iterator in a for loop",
}
magic_methods = [
("complex", "__complex__"),
("float", "__float__"),
("int", "__int__"),
("bool", "__bool__"),
("str", "__str__"),
("len", "__len__"),
("hex", "__hex__"),
("oct", "__oct__"),
]
magic_methods_rows = []
for fn, magic_method in magic_methods:
magic_methods_rows.append(f'"{fn}", "``{magic_method}``"')
schematized_ops = []
schemaless_ops = []
for fn in supported_builtins:
op_name = f"aten::{fn}"
if fn in op_renames:
op_name = op_renames[fn]
schemas = torch._C._jit_get_schemas_for_operator(op_name)
for s in schemas:
schematized_ops.append(_emit_schema(None, fn, s, padding=0))
if len(schemas) > 0:
schematized_ops.append("")
else:
table_row = (
f'":external+python:py:obj:`{fn}`", "{schemaless_op_explanations[fn]}"'
)
schemaless_ops.append(table_row)
schematized_ops_str = "\n".join(schematized_ops)
schemaless_ops_str = "\n".join(schemaless_ops)
magic_methods_rows_str = "\n".join(magic_methods_rows)
schematized_ops_str = textwrap.indent(schematized_ops_str, "\t")
schemaless_ops_str = textwrap.indent(schemaless_ops_str, "\t")
magic_methods_rows_str = textwrap.indent(magic_methods_rows_str, "\t")
section = f"""
The functions in the following table are supported but do not have a static schema
.. csv-table::
:header: "Function", "Note"
{schemaless_ops_str}
The following functions will use the corresponding magic method on :any:`TorchScript classes`
.. csv-table::
:header: "Function", "Magic Method"
{magic_methods_rows_str}
These built-in functions use the schema
.. rst-class:: codeblock-height-limiter
::
{schematized_ops_str}
"""
return "Python Built-in Functions", section
def _list_supported_ops():
def emit_block(decls):
return "\n.. rst-class:: codeblock-height-limiter\n\n::\n\n{}\n".format(
"".join(f" {d}\n\n" for d in decls)
)
body = ""
op_gathering_fns = (
_get_tensor_ops,
_get_nn_functional_ops,
_get_torchscript_builtins,
_get_global_builtins,
_get_math_builtins,
)
for fn in op_gathering_fns:
header, items = fn()
link_target = header.replace("`", "").replace("-", "").lower().replace(" ", "-")
if isinstance(items, str):
section = f"{header}\n{'~' * len(header)}\n{items}\n"
else:
section = f"{header}\n{'~' * len(header)}\n{emit_block(items)}"
section = f".. _{link_target}:" + "\n\n" + section
body += section
return body
__doc__ = _list_supported_ops()
```
|
===========================================================================================================================
SOURCE CODE FILE: unsupported_tensor_ops.py
LINES: 7
SIZE: 2.02 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\jit\unsupported_tensor_ops.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from textwrap import dedent
from typing import Any
import torch.jit
def execWrapper(code, glob, loc):
exec(code, glob, loc)
def _gen_unsupported_methods_properties():
tensor_attrs = set(filter(lambda x: x[0] != "_", dir(torch.Tensor)))
tensor = torch.tensor([2])
funcs_template = dedent(
"""
def func(x):
return x.{op}()
"""
)
deprecated_apis = {
"volatile",
"resize",
"reinforce",
"new",
"name",
"map2_",
"has_names",
"grad_fn",
"resize_as",
}
tensor_attrs = tensor_attrs - deprecated_apis
properties = []
methods = []
sorted_tensor_attrs = sorted(tensor_attrs, key=lambda x: x.lower())
for attr in sorted_tensor_attrs:
funcs_str = funcs_template.format(op=attr)
scope: dict[str, Any] = {}
execWrapper(funcs_str, globals(), scope)
try:
torch.jit.CompilationUnit(funcs_str)
except Exception as e:
if "nonexistent attribute" not in repr(e):
continue
attr_repr = repr(getattr(tensor, attr))
if "bound method" in attr_repr or "built-in method" in attr_repr:
methods.append(attr)
else:
properties.append(attr)
mapped_methods = ("\t* :meth:`~torch.Tensor." + x + r"`" for x in methods)
mapped_properties = ("\t* :attr:`~torch.Tensor." + x + r"`" for x in properties)
return "\n".join(mapped_methods), "\n".join(mapped_properties)
def _list_unsupported_tensor_ops():
header = """\n\n
Unsupported Tensor Methods
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
methods, properties = _gen_unsupported_methods_properties()
return (
header
+ "\n"
+ methods
+ """
Unsupported Tensor Properties
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
+ "\n"
+ properties
)
__doc__ = _list_unsupported_tensor_ops()
```
|
========================================================================================================
SOURCE CODE FILE: library.py
LINES: 1
SIZE: 60.95 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\library.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import contextlib
import functools
import inspect
import re
import sys
import traceback
import weakref
from collections.abc import Sequence
from typing import (
Any,
Callable,
Literal,
Optional,
overload,
TYPE_CHECKING,
TypeVar,
Union,
)
from typing_extensions import deprecated, ParamSpec
import torch
import torch._library as _library
from torch._library.custom_ops import (
_cast,
_maybe_get_opdef,
custom_op,
CustomOpDef,
device_types_t,
)
from torch._library.infer_schema import infer_schema # noqa: F401
from torch._library.triton import triton_op, wrap_triton
from torch._ops import OpOverload
from torch.types import _dtype
__all__ = [
"Library",
"impl",
"define",
"fallthrough_kernel",
"impl_abstract",
"register_autocast",
"register_fake",
"register_torch_dispatch",
"register_vmap",
"get_ctx",
"custom_op",
"triton_op",
"wrap_triton",
"infer_schema",
]
_T = TypeVar("_T")
_P = ParamSpec("_P")
# Set containing the combination of (namespace, operator, DispatchKey) for which a new kernel has been registered
# The keys in the set are of the form `namespace + "/" + op_name + "/" + dispatch_key`.
# This set is maintained to ensure that two libraries don't try to override the exact same functionality to avoid
# libraries calling into kernels not intended to be called.
_impls: set[str] = set()
_defs: set[str] = set()
# prim is reserved by TorchScript interpreter
_reserved_namespaces = ["prim"]
def fallthrough_kernel():
"""
A dummy function to pass to ``Library.impl`` in order to register a fallthrough.
"""
raise NotImplementedError("fallthrough_kernel() should never be called.")
class Library:
"""
A class to create libraries that can be used to register new operators or
override operators in existing libraries from Python.
A user can optionally pass in a dispatch keyname if they only want to register
kernels corresponding to only one specific dispatch key.
To create a library to override operators in an existing library (with name ns), set the kind to "IMPL".
To create a new library (with name ns) to register new operators, set the kind to "DEF".
To create a fragment of a possibly existing library to register operators (and bypass
the limitation that there is only one library for a given namespace), set the kind to
"FRAGMENT".
Args:
ns: library name
kind: "DEF", "IMPL" (default: "IMPL"), "FRAGMENT"
dispatch_key: PyTorch dispatch key (default: "")
"""
def __init__(self, ns, kind, dispatch_key=""):
if kind not in ("IMPL", "DEF", "FRAGMENT"):
raise ValueError("Unsupported kind: ", kind)
if ns in _reserved_namespaces and (kind == "DEF" or kind == "FRAGMENT"):
raise ValueError(
ns,
" is a reserved namespace. Please try creating a library with another name.",
)
if torch._running_with_deploy():
_library.utils.warn_deploy()
return
frame = traceback.extract_stack(limit=3)[0]
filename, lineno = frame.filename, frame.lineno
self.m: Optional[Any] = torch._C._dispatch_library(
kind, ns, dispatch_key, filename, lineno
)
self.ns = ns
self._op_defs: set[str] = set()
self._op_impls: set[str] = set()
self._registration_handles: list[torch._library.utils.RegistrationHandle] = []
self.kind = kind
self.dispatch_key = dispatch_key
# Use a finalizer to setup the "destructor" instead of __del__.
# Python __del__ can lead to weird things (globals and locals may already
# be gone when __del__ actually gets called!). finalizers help the
# situation because it lets us capture references and keeps them alive
weakref.finalize(
self,
_del_library,
_impls,
self._op_impls,
_defs,
self._op_defs,
self._registration_handles,
)
def __repr__(self):
return f"Library(kind={self.kind}, ns={self.ns}, dispatch_key={self.dispatch_key})>"
def define(self, schema, alias_analysis="", *, tags=()):
r"""Defines a new operator and its semantics in the ns namespace.
Args:
schema: function schema to define a new operator.
alias_analysis (optional): Indicates if the aliasing properties of the operator arguments can be
inferred from the schema (default behavior) or not ("CONSERVATIVE").
tags (Tag | Sequence[Tag]): one or more torch.Tag to apply to this
operator. Tagging an operator changes the operator's behavior
under various PyTorch subsystems; please read the docs for the
torch.Tag carefully before applying it.
Returns:
name of the operator as inferred from the schema.
Example::
>>> my_lib = Library("mylib", "DEF")
>>> my_lib.define("sum(Tensor self) -> Tensor")
"""
if torch._running_with_deploy():
_library.utils.warn_deploy()
return
# This is added because we also want to disallow PURE_FUNCTION alias analysis which is a valid
# AliasAnalysis type in C++
if alias_analysis not in ["", "FROM_SCHEMA", "CONSERVATIVE"]:
raise RuntimeError(f"Invalid alias_analysis type {alias_analysis}")
assert self.m is not None
if isinstance(tags, torch.Tag):
tags = (tags,)
name = schema.split("(")[0]
packet_name = name.split(".")[0] if "." in name else name
has_preexisting_packet = hasattr(torch.ops, self.ns) and hasattr(
getattr(torch.ops, self.ns), packet_name
)
result = self.m.define(schema, alias_analysis, tuple(tags))
name = schema.split("(")[0]
qualname = self.ns + "::" + name
# If the OpOverloadPacket exists already, then this means we're adding a
# new OpOverload for it. Refresh the packet to include the new OpOverload.
if has_preexisting_packet:
ns = getattr(torch.ops, self.ns)
packet = getattr(ns, packet_name)
torch._ops._refresh_packet(packet)
self._op_defs.add(qualname)
_defs.add(qualname)
return result
def _register_fake(self, op_name, fn, _stacklevel=1):
r"""Registers the fake impl for an operator defined in the library."""
if torch._running_with_deploy():
_library.utils.warn_deploy()
return
source = torch._library.utils.get_source(_stacklevel + 1)
frame = sys._getframe(_stacklevel)
caller_module = inspect.getmodule(frame)
# Can be none if you call register_fake from somewhere there isn't a module
# (e.g. __main__)
caller_module_name = None if caller_module is None else caller_module.__name__
# TODO(rzou): We're gonna need to stage this change with torchvision,
# since torchvision is github first.
if caller_module_name is not None and caller_module_name.startswith(
"torchvision."
):
caller_module_name = None
qualname = f"{self.ns}::{op_name}"
entry = torch._library.simple_registry.singleton.find(qualname)
if caller_module_name is not None:
func_to_register = _check_pystubs_once(fn, qualname, caller_module_name)
else:
func_to_register = fn
handle = entry.fake_impl.register(func_to_register, source)
self._registration_handles.append(handle)
def _register_torch_dispatch_rule(self, op_name, torch_dispatch_class, fn):
r"""Registers a torch_dispatch rule for the given operator and torch_dispatch_class.
This allows for open registration to specify the behavior between the operator
and the torch_dispatch_class without needing to modify the torch_dispatch_class
or the operator directly.
The torch_dispatch_class is either a Tensor subclass with `__torch_dispatch__` or a
TorchDispatchMode.
If it is a Tensor subclass, we expect fn to have the following signature:
(cls, func: OpOverload, types: Tuple[type, ...], args, kwargs) -> Any
If it is a TorchDispatchMode, we expect fn to have the following signature:
(mode, func: OpOverload, types: Tuple[type, ...], args, kwargs) -> Any
"""
if torch._running_with_deploy():
_library.utils.warn_deploy()
return
qualname = f"{self.ns}::{op_name}"
entry = torch._library.simple_registry.singleton.find(qualname)
handle = entry.torch_dispatch_rules.register(torch_dispatch_class, fn)
self._registration_handles.append(handle)
def _impl_with_aoti_compile(self, op_name, dispatch_key=""):
r"""Register the operator to use the AOTI-compiled implementation.
Args:
op_name: operator name (along with the overload) or OpOverload object.
dispatch_key: dispatch key that the input function should be registered for. By default, it uses
the dispatch key that the library was created with.
Example::
>>> my_lib = Library("aten", "IMPL")
>>> my_lib._impl_with_aoti_compile("div.Tensor", "CPU")
"""
if torch._running_with_deploy():
_library.utils.warn_deploy()
return
if dispatch_key == "":
dispatch_key = self.dispatch_key
assert torch.DispatchKeySet(dispatch_key).has(torch._C.DispatchKey.Dense)
if isinstance(op_name, str):
name = op_name
elif isinstance(op_name, OpOverload):
name = op_name._schema.name
overload_name = op_name._schema.overload_name
if overload_name != "":
name = name + "." + overload_name
else:
raise RuntimeError(
"_impl_with_aoti_compile should be passed either a name or an OpOverload object "
"as the first argument"
)
key = self.ns + "/" + name.split("::")[-1] + "/" + dispatch_key
if key in _impls:
# TODO: in future, add more info about where the existing function is registered (this info is
# today already returned by the C++ warning when _impl_with_aoti_compile is called but we error out before that)
raise RuntimeError(
"This is not allowed since there's already a kernel registered from python overriding {}"
"'s behavior for {} dispatch key and {} namespace.".format(
name.split("::")[-1], dispatch_key, self.ns
)
)
assert self.m is not None
impl_fn: Callable = self.m.impl_with_aoti_compile
impl_fn(self.ns, name.split("::")[-1], dispatch_key)
_impls.add(key)
self._op_impls.add(key)
def impl(self, op_name, fn, dispatch_key="", *, with_keyset=False):
r"""Registers the function implementation for an operator defined in the library.
Args:
op_name: operator name (along with the overload) or OpOverload object.
fn: function that's the operator implementation for the input dispatch key or :func:`~fallthrough_kernel`
to register a fallthrough.
dispatch_key: dispatch key that the input function should be registered for. By default, it uses
the dispatch key that the library was created with.
with_keyset: flag controlling if the current dispatcher call keyset should be passed as the first argument
to :attr:`fn` when calling. This should be used to create the appropriate keyset for redispatch calls.
Example::
>>> my_lib = Library("aten", "IMPL")
>>> def div_cpu(self, other):
>>> return self * (1 / other)
>>> my_lib.impl("div.Tensor", div_cpu, "CPU")
"""
if torch._running_with_deploy():
_library.utils.warn_deploy()
return
if not callable(fn):
raise TypeError(
f"Input function is required to be a callable but found type {type(fn)}"
)
if dispatch_key == "":
dispatch_key = self.dispatch_key
if isinstance(op_name, str):
name = op_name
elif isinstance(op_name, OpOverload):
name = op_name._schema.name
overload_name = op_name._schema.overload_name
if overload_name != "":
name = name + "." + overload_name
else:
raise RuntimeError(
"impl should be passed either a name or an OpOverload object as the first argument"
)
key = self.ns + "/" + name.split("::")[-1] + "/" + dispatch_key
if key in _impls:
# TODO: in future, add more info about where the existing function is registered (this info is
# today already returned by the C++ warning when impl is called but we error out before that)
raise RuntimeError(
"This is not allowed since there's already a kernel registered from python overriding {}"
"'s behavior for {} dispatch key and {} namespace.".format(
name.split("::")[-1], dispatch_key, self.ns
)
)
if dispatch_key == "Meta":
dispatcher_op_name = name
if "::" not in dispatcher_op_name:
dispatcher_op_name = f"{self.ns}::{dispatcher_op_name}"
# Internally, we shouldn't be registering meta kernels for any operators that
# have CompositeImplicitAutograd kernels.
# Instead, we should be letting those decompositions run, and writing meta kernels
# only for the base operators.
if torch._C._dispatch_has_kernel_for_dispatch_key(
dispatcher_op_name, "CompositeImplicitAutograd"
):
raise RuntimeError(
f"We should not register a meta kernel directly to the operator '{name}',"
" because it has a CompositeImplicitAutograd kernel in core."
" Instead we should let the operator decompose, and ensure that we have meta kernels"
" for the base ops that it decomposes into."
)
assert self.m is not None
self.m.impl(
name,
dispatch_key if dispatch_key != "" else "CompositeImplicitAutograd",
fn,
with_keyset,
)
_impls.add(key)
self._op_impls.add(key)
def fallback(self, fn, dispatch_key="", *, with_keyset=False):
r"""Registers the function implementation as the fallback for the given key.
This function only works for a library with global namespace ("_").
Args:
fn: function used as fallback for the given dispatch key or :func:`~fallthrough_kernel`
to register a fallthrough.
dispatch_key: dispatch key that the input function should be registered for. By default, it uses
the dispatch key that the library was created with.
with_keyset: flag controlling if the current dispatcher call keyset should be passed as the first argument
to :attr:`fn` when calling. This should be used to create the appropriate keyset for redispatch calls.
Example::
>>> my_lib = Library("_", "IMPL")
>>> def fallback_kernel(op, *args, **kwargs):
>>> # Handle all autocast ops generically
>>> # ...
>>> my_lib.fallback(fallback_kernel, "Autocast")
"""
if torch._running_with_deploy():
_library.utils.warn_deploy()
return
if dispatch_key == "":
dispatch_key = self.dispatch_key
if self.ns != "_":
raise RuntimeError(
f"""Fallback can only be registered using libary fragment on the global namespace "_" but it is {self.ns}"""
)
assert dispatch_key != ""
assert self.m is not None
self.m.fallback(dispatch_key, fn, with_keyset)
def _destroy(self):
if self.m is not None:
self.m.reset()
self.m = None
for handle in self._registration_handles:
handle.destroy()
self._registration_handles.clear()
global _impls
_impls -= self._op_impls
for name in self._op_defs:
# Delete the cached torch.ops.ns.foo if it was registered.
# Otherwise, accessing it leads to a segfault.
# It's possible that we only registered an overload in this Library
# and another library owns an alive overload.
# That's OK - the next time torch.ops.ns.foo gets called, it'll be
# recomputed to point at the right collection of overloads.
ns, name_with_overload = name.split("::")
name = name_with_overload.split(".")[0]
if not hasattr(torch.ops, ns):
continue
namespace = getattr(torch.ops, ns)
if not hasattr(namespace, name):
continue
delattr(namespace, name)
namespace._dir.remove(name)
def _del_library(
captured_impls,
op_impls,
captured_defs,
op_defs,
registration_handles,
):
captured_impls -= op_impls
captured_defs -= op_defs
for handle in registration_handles:
handle.destroy()
@contextlib.contextmanager
def _scoped_library(*args, **kwargs):
try:
lib = Library(*args, **kwargs)
yield lib
finally:
lib._destroy()
_keep_alive: list[Library] = []
NAMELESS_SCHEMA = re.compile(r"\(.*\) -> .*")
@functools.singledispatch
def define(qualname, schema, *, lib=None, tags=()):
r"""Defines a new operator.
In PyTorch, defining an op (short for "operator") is a two step-process:
- we need to define the op (by providing an operator name and schema)
- we need to implement behavior for how the operator interacts with
various PyTorch subsystems, like CPU/CUDA Tensors, Autograd, etc.
This entrypoint defines the custom operator (the first step)
you must then perform the second step by calling various
``impl_*`` APIs, like :func:`torch.library.impl` or
:func:`torch.library.register_fake`.
Args:
qualname (str): The qualified name for the operator. Should be
a string that looks like "namespace::name", e.g. "aten::sin".
Operators in PyTorch need a namespace to
avoid name collisions; a given operator may only be created once.
If you are writing a Python library, we recommend the namespace to
be the name of your top-level module.
schema (str): The schema of the operator. E.g. "(Tensor x) -> Tensor"
for an op that accepts one Tensor and returns one Tensor. It does
not contain the operator name (that is passed in ``qualname``).
lib (Optional[Library]): If provided, the lifetime of this operator
will be tied to the lifetime of the Library object.
tags (Tag | Sequence[Tag]): one or more torch.Tag to apply to this
operator. Tagging an operator changes the operator's behavior
under various PyTorch subsystems; please read the docs for the
torch.Tag carefully before applying it.
Example::
>>> import torch
>>> import numpy as np
>>>
>>> # Define the operator
>>> torch.library.define("mylib::sin", "(Tensor x) -> Tensor")
>>>
>>> # Add implementations for the operator
>>> @torch.library.impl("mylib::sin", "cpu")
>>> def f(x):
>>> return torch.from_numpy(np.sin(x.numpy()))
>>>
>>> # Call the new operator from torch.ops.
>>> x = torch.randn(3)
>>> y = torch.ops.mylib.sin(x)
>>> assert torch.allclose(y, x.sin())
"""
if not isinstance(qualname, str):
raise ValueError(
f"define(qualname, schema): expected qualname "
f"to be instance of str, got {type(qualname)}"
)
namespace, name = torch._library.utils.parse_namespace(qualname)
if lib is None:
lib = Library(namespace, "FRAGMENT")
_keep_alive.append(lib)
if not NAMELESS_SCHEMA.fullmatch(schema):
raise ValueError(
f"define(qualname, schema, ...): expected schema "
f'to look like e.g. "(Tensor x) -> Tensor" but '
f'got "{schema}"'
)
lib.define(name + schema, alias_analysis="", tags=tags)
@define.register
def _(lib: Library, schema, alias_analysis=""):
"""The old torch.library.define.
We're keeping this around for BC reasons
"""
def wrap(f):
name = lib.define(schema, alias_analysis)
lib.impl(name, f)
return f
return wrap
@overload
def impl(
qualname: str,
types: Union[str, Sequence[str]],
func: Literal[None] = None,
*,
lib: Optional[Library] = None,
) -> Callable[[Callable[..., object]], None]: ...
@overload
def impl(
qualname: str,
types: Union[str, Sequence[str]],
func: Callable[..., object],
*,
lib: Optional[Library] = None,
) -> None: ...
# Deprecated BC API
@overload
def impl(
lib: Library,
name: str,
dispatch_key: str = "",
) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]: ...
@functools.singledispatch
def impl(
qualname: str,
types: Union[str, Sequence[str]],
func: Optional[Callable[_P, _T]] = None,
*,
lib: Optional[Library] = None,
) -> object:
"""Register an implementation for a device type for this operator.
You may pass "default" for ``types`` to register this implementation as the
default implementation for ALL device types.
Please only use this if the implementation truly supports all device types;
for example, this is true if it is a composition of built-in PyTorch operators.
This API may be used as a decorator. You can use nested decorators
with this API provided they return a function and are placed inside
this API (see Example 2).
Some valid types are: "cpu", "cuda", "xla", "mps", "ipu", "xpu".
Args:
qualname (str): Should be a string that looks like "namespace::operator_name".
types (str | Sequence[str]): The device types to register an impl to.
lib (Optional[Library]): If provided, the lifetime of this registration
will be tied to the lifetime of the Library object.
Examples:
>>> import torch
>>> import numpy as np
>>> # Example 1: Register function.
>>> # Define the operator
>>> torch.library.define("mylib::mysin", "(Tensor x) -> Tensor")
>>>
>>> # Add implementations for the cpu device
>>> @torch.library.impl("mylib::mysin", "cpu")
>>> def f(x):
>>> return torch.from_numpy(np.sin(x.numpy()))
>>>
>>> x = torch.randn(3)
>>> y = torch.ops.mylib.mysin(x)
>>> assert torch.allclose(y, x.sin())
>>>
>>> # Example 2: Register function with decorator.
>>> def custom_decorator(func):
>>> def wrapper(*args, **kwargs):
>>> return func(*args, **kwargs) + 1
>>> return wrapper
>>>
>>> # Define the operator
>>> torch.library.define("mylib::sin_plus_one", "(Tensor x) -> Tensor")
>>>
>>> # Add implementations for the operator
>>> @torch.library.impl("mylib::sin_plus_one", "cpu")
>>> @custom_decorator
>>> def f(x):
>>> return torch.from_numpy(np.sin(x.numpy()))
>>>
>>> # Call the new operator from torch.ops.
>>> x = torch.randn(3)
>>>
>>> y1 = torch.ops.mylib.sin_plus_one(x)
>>> y2 = torch.sin(x) + 1
>>> assert torch.allclose(y1, y2)
"""
return _impl(qualname, types, func, lib=lib, disable_dynamo=False)
if not TYPE_CHECKING:
@impl.register
def _(
lib: Library, name: str, dispatch_key: str = ""
) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]:
"""Legacy torch.library.impl API. Kept around for BC"""
def wrap(f: Callable[_P, _T]) -> Callable[_P, _T]:
lib.impl(name, f, dispatch_key)
return f
return wrap
@overload
def _impl(
qualname: str,
types: Union[str, Sequence[str]],
func: Literal[None] = None,
*,
lib: Optional[Library] = None,
disable_dynamo: bool = False,
) -> Callable[[Callable[..., object]], None]: ...
@overload
def _impl(
qualname: str,
types: Union[str, Sequence[str]],
func: Callable[..., object],
*,
lib: Optional[Library] = None,
disable_dynamo: bool = False,
) -> None: ...
def _impl(
qualname: str,
types: Union[str, Sequence[str]],
func: Optional[Callable[..., object]] = None,
*,
lib: Optional[Library] = None,
disable_dynamo: bool = False,
) -> Optional[Callable[[Callable[..., object]], None]]:
# See impl()
if isinstance(types, str):
types = (types,)
keys = set({})
for typ in types:
is_dispatch_key = torch._C._parse_dispatch_key(typ)
if is_dispatch_key:
# We also support passing a DispatchKey to impl. Please prefer using
# the higher-level torch.library APIs and only pass DispatchKey to
# torch.library.impl with caution (or even better, don't use this
# option and file an issue on GitHub for what you need).
# We don't advertise this to users because
# it is very easy to shoot yourself in the foot.
keys.add(typ)
else:
keys.add(_device_type_to_key(typ))
def register_(func: Callable[..., object]) -> None:
namespace, _ = torch._library.utils.parse_namespace(qualname)
if lib is None:
use_lib = Library(namespace, "FRAGMENT")
_keep_alive.append(use_lib)
else:
use_lib = lib
if disable_dynamo:
@torch._disable_dynamo
def func_no_dynamo(*args, **kwargs):
return func(*args, **kwargs)
for key in keys:
use_lib.impl(qualname, func_no_dynamo, key)
else:
for key in keys:
use_lib.impl(qualname, func, key)
if func is None:
return register_
else:
register_(func)
return None
def _device_type_to_key(device_type: str) -> str:
if device_type == "default":
# This is technically not correct, because although all device_type
# DispatchKeys are included in CompositeExplicitAutograd,
# not everything in CompositeExplicitAutograd is associated with a
# device_type. I don't really care that much about the difference.
return "CompositeExplicitAutograd"
return torch._C._dispatch_key_for_device(device_type)
@deprecated(
"`torch.library.impl_abstract` was renamed to `torch.library.register_fake`. Please use that "
"instead; we will remove `torch.library.impl_abstract` in a future version of PyTorch.",
category=FutureWarning,
)
def impl_abstract(qualname, func=None, *, lib=None, _stacklevel=1):
r"""This API was renamed to :func:`torch.library.register_fake` in PyTorch 2.4.
Please use that instead.
"""
if func is not None:
_stacklevel = _stacklevel + 1
return register_fake(qualname, func, lib=lib, _stacklevel=_stacklevel)
_op_identifier = Union[
str, "torch._ops.OpOverload", "torch._library.custom_ops.CustomOpDef"
]
def register_kernel(
op: _op_identifier,
device_types: device_types_t,
func: Optional[Callable] = None,
/,
*,
lib: Optional[Library] = None,
):
"""Register an implementation for a device type for this operator.
Some valid device_types are: "cpu", "cuda", "xla", "mps", "ipu", "xpu".
This API may be used as a decorator.
Args:
op (str | OpOverload): The operator to register an impl to.
device_types (None | str | Sequence[str]): The device_types to register an impl to.
If None, we will register to all device types -- please only use
this option if your implementation is truly device-type-agnostic.
func (Callable): The function to register as the implementation for
the given device types.
lib (Optional[Library]): If provided, the lifetime of this registration
Examples::
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
>>> import torch
>>> from torch import Tensor
>>> from torch.library import custom_op
>>> import numpy as np
>>>
>>> # Create a custom op that works on cpu
>>> @custom_op("mylib::numpy_sin", mutates_args=(), device_types="cpu")
>>> def numpy_sin(x: Tensor) -> Tensor:
>>> x_np = x.numpy()
>>> y_np = np.sin(x_np)
>>> return torch.from_numpy(y_np)
>>>
>>> # Add implementations for the cuda device
>>> @torch.library.register_kernel("mylib::numpy_sin", "cuda")
>>> def _(x):
>>> x_np = x.cpu().numpy()
>>> y_np = np.sin(x_np)
>>> return torch.from_numpy(y_np).to(device=x.device)
>>>
>>> x_cpu = torch.randn(3)
>>> x_cuda = x_cpu.cuda()
>>> assert torch.allclose(numpy_sin(x_cpu), x_cpu.sin())
>>> assert torch.allclose(numpy_sin(x_cuda), x_cuda.sin())
"""
if not isinstance(
op, (str, torch._ops.OpOverload, torch._library.custom_ops.CustomOpDef)
):
raise ValueError(
f"register_kernel({op}): got unexpected type for op: {type(op)}"
)
if isinstance(op, torch._ops.OpOverload):
op = op._name
opdef = _maybe_get_opdef(op)
if opdef is not None:
return opdef.register_kernel(device_types, func)
assert isinstance(op, str)
if device_types is None:
device_types = "CompositeExplicitAutograd"
return _impl(op, device_types, func, lib=lib, disable_dynamo=True)
def register_autocast(
op: _op_identifier,
device_type: str,
cast_inputs: _dtype,
/,
*,
lib: Optional[Library] = None,
):
r"""Register an autocast dispatch rule for this custom op.
Valid `device_type` include: "cpu" and "cuda".
Args:
op (str | OpOverload): The operator to register an autocast dispatch rule to.
device_type(str): Device type to use. 'cuda' or 'cpu'.
The type is the same as the `type` attribute of a :class:`torch.device`.
Thus, you may obtain the device type of a tensor using `Tensor.device.type`.
cast_inputs (:class:`torch.dtype`): When custom op runs in an autocast-enabled region,
casts incoming floating-point Tensors to the target dtype (non-floating-point Tensors
are not affected), then executes custom op with autocast disabled.
lib (Optional[Library]): If provided, the lifetime of this registration
Examples::
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
>>> import torch
>>> from torch import Tensor
>>> from torch.library import custom_op
>>>
>>> # Create a custom op that works on cuda
>>> @torch.library.custom_op("mylib::my_sin", mutates_args=())
>>> def my_sin(x: Tensor) -> Tensor:
>>> return torch.sin(x)
>>>
>>> # Register autocast dispatch rule for the cuda device
>>> torch.library.register_autocast("mylib::my_sin", "cuda", torch.float16)
>>>
>>> x = torch.randn(3, dtype=torch.float32, device="cuda")
>>> with torch.autocast("cuda", dtype=torch.float16):
>>> y = torch.ops.mylib.my_sin(x)
>>> assert y.dtype == torch.float16
"""
if not isinstance(
op, (str, torch._ops.OpOverload, torch._library.custom_ops.CustomOpDef)
):
raise ValueError(
f"register_autocast({op}): got unexpected type for op: {type(op)}"
)
if device_type not in ["cpu", "cuda"]:
raise ValueError(f"Unknown device type: {device_type}")
if isinstance(op, torch._ops.OpOverload):
op = op._name
opdef = _maybe_get_opdef(op)
if opdef is not None:
return opdef.register_autocast(device_type, cast_inputs)
assert isinstance(op, str)
qualname = op
_op = torch._library.utils.lookup_op(qualname)
namespace, opname = torch._library.utils.parse_namespace(qualname)
if lib is None:
lib = Library(namespace, "FRAGMENT")
_keep_alive.append(lib)
def kernel(_, *args, **kwargs):
assert len(kwargs) == 0, "Custom ops do not support kwargs yet."
autocast_keyset = torch._C.DispatchKeySet(
torch._C.DispatchKey.AutocastCPU
) | torch._C.DispatchKeySet(torch._C.DispatchKey.AutocastCUDA)
with torch._C._ExcludeDispatchKeyGuard(autocast_keyset):
return _op(*_cast(args, device_type, cast_inputs))
if device_type == "cuda":
return lib.impl(opname, kernel, "AutocastCUDA", with_keyset=True)
else:
# device_type is "cpu"
return lib.impl(opname, kernel, "AutocastCPU", with_keyset=True)
def register_fake(
op: _op_identifier,
func: Optional[Callable] = None,
/,
*,
lib: Optional[Library] = None,
_stacklevel: int = 1,
):
r"""Register a FakeTensor implementation ("fake impl") for this operator.
Also sometimes known as a "meta kernel", "abstract impl".
An "FakeTensor implementation" specifies the behavior of this operator on
Tensors that carry no data ("FakeTensor"). Given some input Tensors with
certain properties (sizes/strides/storage_offset/device), it specifies
what the properties of the output Tensors are.
The FakeTensor implementation has the same signature as the operator.
It is run for both FakeTensors and meta tensors. To write a FakeTensor
implementation, assume that all Tensor inputs to the operator are
regular CPU/CUDA/Meta tensors, but they do not have storage, and
you are trying to return regular CPU/CUDA/Meta tensor(s) as output.
The FakeTensor implementation must consist of only PyTorch operations
(and may not directly access the storage or data of any input or
intermediate Tensors).
This API may be used as a decorator (see examples).
For a detailed guide on custom ops, please see
https://pytorch.org/tutorials/advanced/custom_ops_landing_page.html
Examples:
>>> import torch
>>> import numpy as np
>>> from torch import Tensor
>>>
>>> # Example 1: an operator without data-dependent output shape
>>> @torch.library.custom_op("mylib::custom_linear", mutates_args=())
>>> def custom_linear(x: Tensor, weight: Tensor, bias: Tensor) -> Tensor:
>>> raise NotImplementedError("Implementation goes here")
>>>
>>> @torch.library.register_fake("mylib::custom_linear")
>>> def _(x, weight, bias):
>>> assert x.dim() == 2
>>> assert weight.dim() == 2
>>> assert bias.dim() == 1
>>> assert x.shape[1] == weight.shape[1]
>>> assert weight.shape[0] == bias.shape[0]
>>> assert x.device == weight.device
>>>
>>> return (x @ weight.t()) + bias
>>>
>>> with torch._subclasses.fake_tensor.FakeTensorMode():
>>> x = torch.randn(2, 3)
>>> w = torch.randn(3, 3)
>>> b = torch.randn(3)
>>> y = torch.ops.mylib.custom_linear(x, w, b)
>>>
>>> assert y.shape == (2, 3)
>>>
>>> # Example 2: an operator with data-dependent output shape
>>> @torch.library.custom_op("mylib::custom_nonzero", mutates_args=())
>>> def custom_nonzero(x: Tensor) -> Tensor:
>>> x_np = x.numpy(force=True)
>>> res = np.stack(np.nonzero(x_np), axis=1)
>>> return torch.tensor(res, device=x.device)
>>>
>>> @torch.library.register_fake("mylib::custom_nonzero")
>>> def _(x):
>>> # Number of nonzero-elements is data-dependent.
>>> # Since we cannot peek at the data in an fake impl,
>>> # we use the ctx object to construct a new symint that
>>> # represents the data-dependent size.
>>> ctx = torch.library.get_ctx()
>>> nnz = ctx.new_dynamic_size()
>>> shape = [nnz, x.dim()]
>>> result = x.new_empty(shape, dtype=torch.int64)
>>> return result
>>>
>>> from torch.fx.experimental.proxy_tensor import make_fx
>>>
>>> x = torch.tensor([0, 1, 2, 3, 4, 0])
>>> trace = make_fx(torch.ops.mylib.custom_nonzero, tracing_mode="symbolic")(x)
>>> trace.print_readable()
>>>
>>> assert torch.allclose(trace(x), torch.ops.mylib.custom_nonzero(x))
"""
if not isinstance(
op, (str, torch._ops.OpOverload, torch._library.custom_ops.CustomOpDef)
):
raise ValueError(f"register_fake({op}): got unexpected type for op: {type(op)}")
if isinstance(op, torch._ops.OpOverload):
op = op._name
opdef = _maybe_get_opdef(op)
if opdef is not None:
if func is None:
return opdef.register_fake
else:
return opdef.register_fake(func)
assert isinstance(op, str)
stacklevel = _stacklevel
def register(func):
namespace, op_name = torch._library.utils.parse_namespace(op)
if lib is None:
use_lib = Library(namespace, "FRAGMENT")
_keep_alive.append(use_lib)
else:
use_lib = lib
use_lib._register_fake(op_name, func, _stacklevel=stacklevel + 1)
return func
if func is None:
return register
else:
stacklevel += 1
return register(func)
def register_autograd(
op: _op_identifier,
backward: Callable,
/,
*,
setup_context: Optional[Callable] = None,
lib=None,
) -> None:
r"""Register a backward formula for this custom op.
In order for an operator to work with autograd, you need to register
a backward formula:
1. You must tell us how to compute gradients during the backward pass
by providing us a "backward" function.
2. If you need any values from the forward to compute gradients, you can
use `setup_context` to save values for backward.
``backward`` runs during the backward pass. It accepts ``(ctx, *grads)``:
- ``grads`` is one or more gradients. The number of gradients matches
the number of outputs of the operator.
The ``ctx`` object is `the same ctx object <context_method_mixins>`_ used by
:class:`torch.autograd.Function`. The semantics of ``backward_fn`` are the
same as :meth:`torch.autograd.Function.backward`.
``setup_context(ctx, inputs, output)`` runs during the forward pass.
Please save quantities needed for backward onto the ``ctx`` object via
either :meth:`torch.autograd.function.FunctionCtx.save_for_backward`
or assigning them as attributes of ``ctx``. If your custom op has
kwarg-only arguments, we expect the signature of ``setup_context``
to be ``setup_context(ctx, inputs, keyword_only_inputs, output)``.
Both ``setup_context_fn`` and ``backward_fn`` must be traceable. That is,
they may not directly access :meth:`torch.Tensor.data_ptr` and they must
not depend on or mutate global state. If you need a non-traceable backward,
you can make it a separate custom_op that you call inside ``backward_fn``.
If you need different autograd behavior on different devices, then we
recommend creating two different custom operators, one for each device
that needs different behavior, and switching between them at runtime.
Examples:
>>> import torch
>>> import numpy as np
>>> from torch import Tensor
>>>
>>> @torch.library.custom_op("mylib::numpy_sin", mutates_args=())
>>> def numpy_sin(x: Tensor) -> Tensor:
>>> x_np = x.cpu().numpy()
>>> y_np = np.sin(x_np)
>>> return torch.from_numpy(y_np).to(device=x.device)
>>>
>>> def setup_context(ctx, inputs, output) -> Tensor:
>>> x, = inputs
>>> ctx.save_for_backward(x)
>>>
>>> def backward(ctx, grad):
>>> x, = ctx.saved_tensors
>>> return grad * x.cos()
>>>
>>> torch.library.register_autograd(
... "mylib::numpy_sin", backward, setup_context=setup_context
... )
>>>
>>> x = torch.randn(3, requires_grad=True)
>>> y = numpy_sin(x)
>>> (grad_x,) = torch.autograd.grad(y, x, torch.ones_like(y))
>>> assert torch.allclose(grad_x, x.cos())
>>>
>>> # Example with a keyword-only arg
>>> @torch.library.custom_op("mylib::numpy_mul", mutates_args=())
>>> def numpy_mul(x: Tensor, *, val: float) -> Tensor:
>>> x_np = x.cpu().numpy()
>>> y_np = x_np * val
>>> return torch.from_numpy(y_np).to(device=x.device)
>>>
>>> def setup_context(ctx, inputs, keyword_only_inputs, output) -> Tensor:
>>> ctx.val = keyword_only_inputs["val"]
>>>
>>> def backward(ctx, grad):
>>> return grad * ctx.val
>>>
>>> torch.library.register_autograd(
... "mylib::numpy_mul", backward, setup_context=setup_context
... )
>>>
>>> x = torch.randn(3, requires_grad=True)
>>> y = numpy_mul(x, val=3.14)
>>> (grad_x,) = torch.autograd.grad(y, x, torch.ones_like(y))
>>> assert torch.allclose(grad_x, torch.full_like(x, 3.14))
"""
if not isinstance(
op, (str, torch._ops.OpOverload, torch._library.custom_ops.CustomOpDef)
):
raise ValueError(
f"register_autograd({op}): got unexpected type for op: {type(op)}"
)
if isinstance(op, torch._ops.OpOverload):
op = op._name
opdef = _maybe_get_opdef(op)
if opdef is not None:
opdef.register_autograd(backward, setup_context=setup_context)
return
assert isinstance(op, str)
qualname = op
op = torch._library.utils.lookup_op(qualname)
schema = op._schema
if not _library.utils.is_functional_schema(schema):
raise RuntimeError(
f"Cannot register autograd formula for non-functional operator "
f"{op} with schema {schema}. Please create "
f"a functional operator and register an autograd formula for that."
)
if _library.utils.has_kwarg_only_tensors(schema):
raise NotImplementedError(
f"register_autograd with kwarg-only Tensor args. In the original "
f"definition of the op, please make your tensors not kwarg-only. "
f"Got: {schema}"
)
info = _library.autograd.Info(backward, setup_context)
autograd_kernel = _library.autograd.make_autograd_impl(op, info)
namespace, opname = torch._library.utils.parse_namespace(qualname)
if lib is None:
lib = Library(namespace, "FRAGMENT")
_keep_alive.append(lib)
lib.impl(opname, autograd_kernel, "Autograd", with_keyset=True)
def register_torch_dispatch(
op: _op_identifier,
torch_dispatch_class: Any,
func: Optional[Callable] = None,
/,
*,
lib: Optional[Library] = None,
):
r"""Registers a torch_dispatch rule for the given operator and ``torch_dispatch_class``.
This allows for open registration to specify the behavior between the operator
and the ``torch_dispatch_class`` without needing to modify the ``torch_dispatch_class``
or the operator directly.
The ``torch_dispatch_class`` is either a Tensor subclass with ``__torch_dispatch__`` or a
TorchDispatchMode.
If it is a Tensor subclass, we expect ``func`` to have the following signature:
``(cls, func: OpOverload, types: Tuple[type, ...], args, kwargs) -> Any``
If it is a TorchDispatchMode, we expect ``func`` to have the following signature:
``(mode, func: OpOverload, types: Tuple[type, ...], args, kwargs) -> Any``
``args`` and ``kwargs`` will have been normalized the same way they are
in ``__torch_dispatch__`` (see :ref:`torch-dispatch-calling-convention`).
Examples:
>>> import torch
>>>
>>> @torch.library.custom_op("mylib::foo", mutates_args={})
>>> def foo(x: torch.Tensor) -> torch.Tensor:
>>> return x.clone()
>>>
>>> class MyMode(torch.utils._python_dispatch.TorchDispatchMode):
>>> def __torch_dispatch__(self, func, types, args=(), kwargs=None):
>>> return func(*args, **kwargs)
>>>
>>> @torch.library.register_torch_dispatch("mylib::foo", MyMode)
>>> def _(mode, func, types, args, kwargs):
>>> x, = args
>>> return x + 1
>>>
>>> x = torch.randn(3)
>>> y = foo(x)
>>> assert torch.allclose(y, x)
>>>
>>> with MyMode():
>>> y = foo(x)
>>> assert torch.allclose(y, x + 1)
"""
if not isinstance(
op, (str, torch._ops.OpOverload, torch._library.custom_ops.CustomOpDef)
):
raise ValueError(
f"register_torch_dispatch({op}): got unexpected type for op: {type(op)}"
)
if isinstance(op, torch._ops.OpOverload):
op = op._name
opdef = _maybe_get_opdef(op)
if opdef is not None:
return opdef.register_torch_dispatch(torch_dispatch_class, func)
assert isinstance(op, str)
def register(func):
namespace, op_name = torch._library.utils.parse_namespace(op)
if lib is None:
use_lib = Library(namespace, "FRAGMENT")
_keep_alive.append(use_lib)
else:
use_lib = lib
use_lib._register_torch_dispatch_rule(op_name, torch_dispatch_class, func)
return func
if func is None:
return register
else:
return register(func)
def register_vmap(
op: _op_identifier,
func: Optional[Callable] = None,
/,
*,
lib=None,
):
r"""Register a vmap implementation to support :func:`torch.vmap` for this custom op.
This API may be used as a decorator (see examples).
In order for an operator to work with :func:`torch.vmap`, you may need to register a
vmap implementation in the following signature:
``vmap_func(info, in_dims: Tuple[Optional[int]], *args, **kwargs)``,
where ``*args`` and ``**kwargs`` are the arguments and kwargs for ``op``.
We do not support kwarg-only Tensor args.
It specifies how do we compute the batched version of ``op`` given inputs with an additional
dimension (specified by ``in_dims``).
For each arg in ``args``, ``in_dims`` has a corresponding ``Optional[int]``. It is ``None``
if the arg is not a Tensor or if the arg is not being vmapped over, otherwise, it is an integer
specifying what dimension of the Tensor is being vmapped over.
``info`` is a collection of additional metadata that may be helpful:
``info.batch_size`` specifies the size of the dimension being vmapped over, while
``info.randomness`` is the ``randomness`` option that was passed to :func:`torch.vmap`.
The return of the function ``func`` is a tuple of ``(output, out_dims)``. Similar to ``in_dims``,
``out_dims`` should be of the same structure as ``output`` and contain one ``out_dim``
per output that specifies if the output has the vmapped dimension and what index it is in.
Examples:
>>> import torch
>>> import numpy as np
>>> from torch import Tensor
>>> from typing import Tuple
>>>
>>> def to_numpy(tensor):
>>> return tensor.cpu().numpy()
>>>
>>> lib = torch.library.Library("mylib", "FRAGMENT")
>>> @torch.library.custom_op("mylib::numpy_cube", mutates_args=())
>>> def numpy_cube(x: Tensor) -> Tuple[Tensor, Tensor]:
>>> x_np = to_numpy(x)
>>> dx = torch.tensor(3 * x_np ** 2, device=x.device)
>>> return torch.tensor(x_np ** 3, device=x.device), dx
>>>
>>> def numpy_cube_vmap(info, in_dims, x):
>>> result = numpy_cube(x)
>>> return result, (in_dims[0], in_dims[0])
>>>
>>> torch.library.register_vmap(numpy_cube, numpy_cube_vmap)
>>>
>>> x = torch.randn(3)
>>> torch.vmap(numpy_cube)(x)
>>>
>>> @torch.library.custom_op("mylib::numpy_mul", mutates_args=())
>>> def numpy_mul(x: Tensor, y: Tensor) -> Tensor:
>>> return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device)
>>>
>>> @torch.library.register_vmap("mylib::numpy_mul")
>>> def numpy_mul_vmap(info, in_dims, x, y):
>>> x_bdim, y_bdim = in_dims
>>> x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1)
>>> y = y.movedim(y_bdim, -1) if y_bdim is not None else y.unsqueeze(-1)
>>> result = x * y
>>> result = result.movedim(-1, 0)
>>> return result, 0
>>>
>>>
>>> x = torch.randn(3)
>>> y = torch.randn(3)
>>> torch.vmap(numpy_mul)(x, y)
.. note::
The vmap function should aim to preserve the semantics of the entire custom operator.
That is, ``grad(vmap(op))`` should be replaceable with a ``grad(map(op))``.
If your custom operator has any custom behavior in the backward pass, please
keep this in mind.
"""
if not isinstance(
op, (str, torch._ops.OpOverload, torch._library.custom_ops.CustomOpDef)
):
raise ValueError(f"register_vmap({op}): got unexpected type for op: {type(op)}")
if isinstance(op, torch._ops.OpOverload):
op = op._name
opdef = _maybe_get_opdef(op)
if opdef is not None:
return opdef.register_vmap(func)
assert isinstance(op, str)
qualname = op
op = torch._library.utils.lookup_op(qualname)
schema = op._schema
if _library.utils.has_kwarg_only_tensors(schema):
raise NotImplementedError(
f"register_vmap with kwarg-only Tensor args. In the original "
f"definition of the op, please make your tensors not kwarg-only. "
f"Got: {schema}"
)
def register(func):
nonlocal op, lib
namespace, opname = torch._library.utils.parse_namespace(qualname)
if lib is None:
lib = Library(namespace, "FRAGMENT")
_keep_alive.append(lib)
from torch._functorch.autograd_function import custom_function_call_vmap_helper
from torch._functorch.pyfunctorch import retrieve_current_functorch_interpreter
def wrapped_func(keyset, *args, **kwargs):
interpreter = retrieve_current_functorch_interpreter()
return custom_function_call_vmap_helper(
interpreter, func, op, *args, **kwargs
)
lib.impl(opname, wrapped_func, "FuncTorchBatched", with_keyset=True)
if func is None:
return register
else:
return register(func)
# If the op was defined in C++, then we want to make sure there was an
# m.set_python_module(module, ...) call and that the module is the
# same as the module that called torch.library.register_fake.
def _check_pystubs_once(func, qualname, actual_module_name):
checked = False
def inner(*args, **kwargs):
nonlocal checked
if checked:
return func(*args, **kwargs)
op = torch._library.utils.lookup_op(qualname)
if op._defined_in_python:
checked = True
return func(*args, **kwargs)
maybe_pystub = torch._C._dispatch_pystub(
op._schema.name, op._schema.overload_name
)
if maybe_pystub is None:
if torch._library.utils.requires_set_python_module():
namespace = op.namespace
cpp_filename = op._handle.debug()
raise RuntimeError(
f"Operator '{qualname}' was defined in C++ and has a Python "
f"fake impl. In this situation, we require there to also be a "
f'companion C++ `m.set_python_module("{actual_module_name}")` '
f"call, but we could not find one. Please add that to "
f"to the top of the C++ TORCH_LIBRARY({namespace}, ...) block the "
f"operator was registered in ({cpp_filename})"
)
else:
pystub_module = maybe_pystub[0]
if actual_module_name != pystub_module:
cpp_filename = op._handle.debug()
raise RuntimeError(
f"Operator '{qualname}' specified that its python fake impl "
f"is in the Python module '{pystub_module}' but it was actually found "
f"in '{actual_module_name}'. Please either move the fake impl "
f"or correct the m.set_python_module call ({cpp_filename})"
)
checked = True
return func(*args, **kwargs)
return inner
# NOTE [ctx inside the fake implementation]
# If a user has an operator with data-dependent output shape, then when writing
# a fake implementation they must query the current ctx and use methods on the
# ctx to construct a new unbacked symint.
#
# This is done via us setting the global_ctx_getter function every time a fake
# implementation is invoked.
def get_ctx() -> "torch._library.fake_impl.FakeImplCtx":
"""get_ctx() returns the current AbstractImplCtx object.
Calling ``get_ctx()`` is only valid inside of an fake impl
(see :func:`torch.library.register_fake` for more usage details.
"""
return torch._library.fake_impl.global_ctx_getter()
_OPCHECK_DEFAULT_UTILS = (
"test_schema",
"test_autograd_registration",
"test_faketensor",
"test_aot_dispatch_dynamic",
)
def opcheck(
op: Union[torch._ops.OpOverload, torch._ops.OpOverloadPacket, CustomOpDef],
args: tuple[Any, ...],
kwargs: Optional[dict[str, Any]] = None,
*,
test_utils: Union[str, Sequence[str]] = _OPCHECK_DEFAULT_UTILS,
raise_exception: bool = True,
atol=None,
rtol=None,
) -> dict[str, str]:
"""Given an operator and some sample arguments, tests if the operator is
registered correctly.
That is, when you use the torch.library/TORCH_LIBRARY APIs to create a
custom op, you specified metadata (e.g. mutability info) about the custom op
and these APIs require that the functions you pass them satisfy certain
properties (e.g. no data pointer access in the fake/meta/abstract kernel)
``opcheck`` tests these metadata and properties.
Concretely, we test the following:
- test_schema: If the schema matches the implementation of
the operator. For example: if the schema specifies a Tensor is mutated,
then we check the implementation mutates the Tensor. If the schema
specifies that we return a new Tensor, then we check that the
implementation returns a new Tensor (instead of an existing one or
a view of an existing one).
- test_autograd_registration: If the operator supports training
(autograd): we check that its autograd formula is registered via
torch.library.register_autograd or a manual registration to one
or more DispatchKey::Autograd keys. Any other DispatchKey-based
registrations may lead to undefined behavior.
- test_faketensor: If the operator has a FakeTensor kernel
(and if it is correct). The FakeTensor kernel is necessary (
but not sufficient) for the operator to work with PyTorch compilation
APIs (torch.compile/export/FX). We check that a FakeTensor kernel
(also sometimes known as a meta kernel) was registered for the
operator and that it is correct. This test takes the result of
running the operator on real tensors and the result of running
the operator on FakeTensors and checks that they have the same
Tensor metadata (sizes/strides/dtype/device/etc).
- test_aot_dispatch_dynamic: If the operator has correct behavior
with PyTorch compilation APIs (torch.compile/export/FX).
This checks that the outputs (and gradients, if applicable) are the
same under eager-mode PyTorch and torch.compile.
This test is a superset of ``test_faketensor`` and is an e2e test;
other things it tests are that the operator supports
functionalization and that the backward pass (if it exists) also
supports FakeTensor and functionalization.
For best results, please call ``opcheck`` multiple times with a
representative set of inputs. If your operator supports
autograd, please use ``opcheck`` with inputs with ``requires_grad = True``;
if your operator supports multiple devices (e.g. CPU and CUDA), please
use ``opcheck`` with inputs on all supported devices.
Args:
op: The operator. Must either be a function decorated with
:func:`torch.library.custom_op` or an OpOverload/OpOverloadPacket
found in torch.ops.* (e.g. torch.ops.aten.sin, torch.ops.mylib.foo)
args: The args to the operator
kwargs: The kwargs to the operator
test_utils: Tests that we should run. Default: all of them.
Example: ("test_schema", "test_faketensor")
raise_exception: If we should raise an exception on the first
error. If False, we will return a dict with information
on if each test passed or not.
rtol (Optional[float]): Relative tolerance for floating point comparisons.
If specified ``atol`` must also be specified.
If omitted, default values based on the ``dtype`` are selected
(see the table in :func:`torch.testing.assert_close`).
atol (Optional[float]): Absolute tolerance for floating point comparisons.
If specified ``rtol`` must also be specified.
If omitted, default values based on the ``dtype`` are selected
(see the table in :func:`torch.testing.assert_close`).
.. warning::
opcheck and :func:`torch.autograd.gradcheck` test different things;
opcheck tests if your usage of torch.library APIs is correct while
:func:`torch.autograd.gradcheck` tests if your autograd formula is
mathematically correct. Use both to test custom ops that support
gradient computation.
Example:
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
>>> @torch.library.custom_op("mylib::numpy_mul", mutates_args=())
>>> def numpy_mul(x: Tensor, y: float) -> Tensor:
>>> x_np = x.numpy(force=True)
>>> z_np = x_np * y
>>> return torch.from_numpy(z_np).to(x.device)
>>>
>>> @numpy_mul.register_fake
>>> def _(x, y):
>>> return torch.empty_like(x)
>>>
>>> def setup_context(ctx, inputs, output):
>>> y, = inputs
>>> ctx.y = y
>>>
>>> def backward(ctx, grad):
>>> return grad * ctx.y, None
>>>
>>> numpy_mul.register_autograd(backward, setup_context=setup_context)
>>>
>>> sample_inputs = [
>>> (torch.randn(3), 3.14),
>>> (torch.randn(2, 3, device='cuda'), 2.718),
>>> (torch.randn(1, 10, requires_grad=True), 1.234),
>>> (torch.randn(64, 64, device='cuda', requires_grad=True), 90.18),
>>> ]
>>>
>>> for args in sample_inputs:
>>> torch.library.opcheck(numpy_mul, args)
"""
import torch.testing._internal.optests as optests
return optests.opcheck(
op,
args,
kwargs,
test_utils=test_utils,
raise_exception=raise_exception,
rtol=rtol,
atol=atol,
)
```
|
================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 6
SIZE: 114.93 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\linalg\__init__.py
ENCODING: utf-8
```py
from torch._C import ( # type: ignore[attr-defined]
_add_docstr,
_linalg,
_LinAlgError as LinAlgError,
)
common_notes = {
"experimental_warning": """This function is "experimental" and it may change in a future PyTorch release.""",
"sync_note": "When inputs are on a CUDA device, this function synchronizes that device with the CPU.",
"sync_note_ex": r"When the inputs are on a CUDA device, this function synchronizes only when :attr:`check_errors`\ `= True`.",
"sync_note_has_ex": (
"When inputs are on a CUDA device, this function synchronizes that device with the CPU. "
"For a version of this function that does not synchronize, see :func:`{}`."
),
}
# Note: This not only adds doc strings for functions in the linalg namespace, but
# also connects the torch.linalg Python namespace to the torch._C._linalg builtins.
cross = _add_docstr(
_linalg.linalg_cross,
r"""
linalg.cross(input, other, *, dim=-1, out=None) -> Tensor
Computes the cross product of two 3-dimensional vectors.
Supports input of float, double, cfloat and cdouble dtypes. Also supports batches
of vectors, for which it computes the product along the dimension :attr:`dim`.
It broadcasts over the batch dimensions.
Args:
input (Tensor): the first input tensor.
other (Tensor): the second input tensor.
dim (int, optional): the dimension along which to take the cross-product. Default: `-1`.
Keyword args:
out (Tensor, optional): the output tensor. Ignored if `None`. Default: `None`.
Example:
>>> a = torch.randn(4, 3)
>>> a
tensor([[-0.3956, 1.1455, 1.6895],
[-0.5849, 1.3672, 0.3599],
[-1.1626, 0.7180, -0.0521],
[-0.1339, 0.9902, -2.0225]])
>>> b = torch.randn(4, 3)
>>> b
tensor([[-0.0257, -1.4725, -1.2251],
[-1.1479, -0.7005, -1.9757],
[-1.3904, 0.3726, -1.1836],
[-0.9688, -0.7153, 0.2159]])
>>> torch.linalg.cross(a, b)
tensor([[ 1.0844, -0.5281, 0.6120],
[-2.4490, -1.5687, 1.9792],
[-0.8304, -1.3037, 0.5650],
[-1.2329, 1.9883, 1.0551]])
>>> a = torch.randn(1, 3) # a is broadcast to match shape of b
>>> a
tensor([[-0.9941, -0.5132, 0.5681]])
>>> torch.linalg.cross(a, b)
tensor([[ 1.4653, -1.2325, 1.4507],
[ 1.4119, -2.6163, 0.1073],
[ 0.3957, -1.9666, -1.0840],
[ 0.2956, -0.3357, 0.2139]])
""",
)
cholesky = _add_docstr(
_linalg.linalg_cholesky,
r"""
linalg.cholesky(A, *, upper=False, out=None) -> Tensor
Computes the Cholesky decomposition of a complex Hermitian or real symmetric positive-definite matrix.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **Cholesky decomposition** of a complex Hermitian or real symmetric positive-definite matrix
:math:`A \in \mathbb{K}^{n \times n}` is defined as
.. math::
A = LL^{\text{H}}\mathrlap{\qquad L \in \mathbb{K}^{n \times n}}
where :math:`L` is a lower triangular matrix with real positive diagonal (even in the complex case) and
:math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex, and the transpose when :math:`L` is real-valued.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
"""
+ rf"""
.. note:: {common_notes["sync_note_has_ex"].format("torch.linalg.cholesky_ex")}
"""
+ r"""
.. seealso::
:func:`torch.linalg.cholesky_ex` for a version of this operation that
skips the (slow) error checking by default and instead returns the debug
information. This makes it a faster way to check if a matrix is
positive-definite.
:func:`torch.linalg.eigh` for a different decomposition of a Hermitian matrix.
The eigenvalue decomposition gives more information about the matrix but it
slower to compute than the Cholesky decomposition.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
consisting of symmetric or Hermitian positive-definite matrices.
Keyword args:
upper (bool, optional): whether to return an upper triangular matrix.
The tensor returned with upper=True is the conjugate transpose of the tensor
returned with upper=False.
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Raises:
RuntimeError: if the :attr:`A` matrix or any matrix in a batched :attr:`A` is not Hermitian
(resp. symmetric) positive-definite. If :attr:`A` is a batch of matrices,
the error message will include the batch index of the first matrix that fails
to meet this condition.
Examples::
>>> A = torch.randn(2, 2, dtype=torch.complex128)
>>> A = A @ A.T.conj() + torch.eye(2) # creates a Hermitian positive-definite matrix
>>> A
tensor([[2.5266+0.0000j, 1.9586-2.0626j],
[1.9586+2.0626j, 9.4160+0.0000j]], dtype=torch.complex128)
>>> L = torch.linalg.cholesky(A)
>>> L
tensor([[1.5895+0.0000j, 0.0000+0.0000j],
[1.2322+1.2976j, 2.4928+0.0000j]], dtype=torch.complex128)
>>> torch.dist(L @ L.T.conj(), A)
tensor(4.4692e-16, dtype=torch.float64)
>>> A = torch.randn(3, 2, 2, dtype=torch.float64)
>>> A = A @ A.mT + torch.eye(2) # batch of symmetric positive-definite matrices
>>> L = torch.linalg.cholesky(A)
>>> torch.dist(L @ L.mT, A)
tensor(5.8747e-16, dtype=torch.float64)
""",
)
cholesky_ex = _add_docstr(
_linalg.linalg_cholesky_ex,
r"""
linalg.cholesky_ex(A, *, upper=False, check_errors=False, out=None) -> (Tensor, Tensor)
Computes the Cholesky decomposition of a complex Hermitian or real
symmetric positive-definite matrix.
This function skips the (slow) error checking and error message construction
of :func:`torch.linalg.cholesky`, instead directly returning the LAPACK
error codes as part of a named tuple ``(L, info)``. This makes this function
a faster way to check if a matrix is positive-definite, and it provides an
opportunity to handle decomposition errors more gracefully or performantly
than :func:`torch.linalg.cholesky` does.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
If :attr:`A` is not a Hermitian positive-definite matrix, or if it's a batch of matrices
and one or more of them is not a Hermitian positive-definite matrix,
then ``info`` stores a positive integer for the corresponding matrix.
The positive integer indicates the order of the leading minor that is not positive-definite,
and the decomposition could not be completed.
``info`` filled with zeros indicates that the decomposition was successful.
If ``check_errors=True`` and ``info`` contains positive integers, then a RuntimeError is thrown.
"""
+ rf"""
.. note:: {common_notes["sync_note_ex"]}
.. warning:: {common_notes["experimental_warning"]}
"""
+ r"""
.. seealso::
:func:`torch.linalg.cholesky` is a NumPy compatible variant that always checks for errors.
Args:
A (Tensor): the Hermitian `n \times n` matrix or the batch of such matrices of size
`(*, n, n)` where `*` is one or more batch dimensions.
Keyword args:
upper (bool, optional): whether to return an upper triangular matrix.
The tensor returned with upper=True is the conjugate transpose of the tensor
returned with upper=False.
check_errors (bool, optional): controls whether to check the content of ``infos``. Default: `False`.
out (tuple, optional): tuple of two tensors to write the output to. Ignored if `None`. Default: `None`.
Examples::
>>> A = torch.randn(2, 2, dtype=torch.complex128)
>>> A = A @ A.t().conj() # creates a Hermitian positive-definite matrix
>>> L, info = torch.linalg.cholesky_ex(A)
>>> A
tensor([[ 2.3792+0.0000j, -0.9023+0.9831j],
[-0.9023-0.9831j, 0.8757+0.0000j]], dtype=torch.complex128)
>>> L
tensor([[ 1.5425+0.0000j, 0.0000+0.0000j],
[-0.5850-0.6374j, 0.3567+0.0000j]], dtype=torch.complex128)
>>> info
tensor(0, dtype=torch.int32)
""",
)
inv = _add_docstr(
_linalg.linalg_inv,
r"""
linalg.inv(A, *, out=None) -> Tensor
Computes the inverse of a square matrix if it exists.
Throws a `RuntimeError` if the matrix is not invertible.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
for a matrix :math:`A \in \mathbb{K}^{n \times n}`,
its **inverse matrix** :math:`A^{-1} \in \mathbb{K}^{n \times n}` (if it exists) is defined as
.. math::
A^{-1}A = AA^{-1} = \mathrm{I}_n
where :math:`\mathrm{I}_n` is the `n`-dimensional identity matrix.
The inverse matrix exists if and only if :math:`A` is `invertible`_. In this case,
the inverse is unique.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices
then the output has the same batch dimensions.
"""
+ rf"""
.. note:: {common_notes["sync_note_has_ex"].format("torch.linalg.inv_ex")}
"""
+ r"""
.. note::
Consider using :func:`torch.linalg.solve` if possible for multiplying a matrix on the left by
the inverse, as::
linalg.solve(A, B) == linalg.inv(A) @ B # When B is a matrix
It is always preferred to use :func:`~solve` when possible, as it is faster and more
numerically stable than computing the inverse explicitly.
.. seealso::
:func:`torch.linalg.pinv` computes the pseudoinverse (Moore-Penrose inverse) of matrices
of any shape.
:func:`torch.linalg.solve` computes :attr:`A`\ `.inv() @ \ `:attr:`B` with a
numerically stable algorithm.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
consisting of invertible matrices.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Raises:
RuntimeError: if the matrix :attr:`A` or any matrix in the batch of matrices :attr:`A` is not invertible.
Examples::
>>> A = torch.randn(4, 4)
>>> Ainv = torch.linalg.inv(A)
>>> torch.dist(A @ Ainv, torch.eye(4))
tensor(1.1921e-07)
>>> A = torch.randn(2, 3, 4, 4) # Batch of matrices
>>> Ainv = torch.linalg.inv(A)
>>> torch.dist(A @ Ainv, torch.eye(4))
tensor(1.9073e-06)
>>> A = torch.randn(4, 4, dtype=torch.complex128) # Complex matrix
>>> Ainv = torch.linalg.inv(A)
>>> torch.dist(A @ Ainv, torch.eye(4))
tensor(7.5107e-16, dtype=torch.float64)
.. _invertible:
https://en.wikipedia.org/wiki/Invertible_matrix#The_invertible_matrix_theorem
""",
)
solve_ex = _add_docstr(
_linalg.linalg_solve_ex,
r"""
linalg.solve_ex(A, B, *, left=True, check_errors=False, out=None) -> (Tensor, Tensor)
A version of :func:`~solve` that does not perform error checks unless :attr:`check_errors`\ `= True`.
It also returns the :attr:`info` tensor returned by `LAPACK's getrf`_.
"""
+ rf"""
.. note:: {common_notes["sync_note_ex"]}
.. warning:: {common_notes["experimental_warning"]}
"""
+ r"""
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions.
Keyword args:
left (bool, optional): whether to solve the system :math:`AX=B` or :math:`XA = B`. Default: `True`.
check_errors (bool, optional): controls whether to check the content of ``infos`` and raise
an error if it is non-zero. Default: `False`.
out (tuple, optional): tuple of two tensors to write the output to. Ignored if `None`. Default: `None`.
Returns:
A named tuple `(result, info)`.
Examples::
>>> A = torch.randn(3, 3)
>>> Ainv, info = torch.linalg.solve_ex(A)
>>> torch.dist(torch.linalg.inv(A), Ainv)
tensor(0.)
>>> info
tensor(0, dtype=torch.int32)
.. _LAPACK's getrf:
https://www.netlib.org/lapack/explore-html/dd/d9a/group__double_g_ecomputational_ga0019443faea08275ca60a734d0593e60.html
""",
)
inv_ex = _add_docstr(
_linalg.linalg_inv_ex,
r"""
linalg.inv_ex(A, *, check_errors=False, out=None) -> (Tensor, Tensor)
Computes the inverse of a square matrix if it is invertible.
Returns a namedtuple ``(inverse, info)``. ``inverse`` contains the result of
inverting :attr:`A` and ``info`` stores the LAPACK error codes.
If :attr:`A` is not an invertible matrix, or if it's a batch of matrices
and one or more of them is not an invertible matrix,
then ``info`` stores a positive integer for the corresponding matrix.
The positive integer indicates the diagonal element of the LU decomposition of
the input matrix that is exactly zero.
``info`` filled with zeros indicates that the inversion was successful.
If ``check_errors=True`` and ``info`` contains positive integers, then a RuntimeError is thrown.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
"""
+ rf"""
.. note:: {common_notes["sync_note_ex"]}
.. warning:: {common_notes["experimental_warning"]}
"""
+ r"""
.. seealso::
:func:`torch.linalg.inv` is a NumPy compatible variant that always checks for errors.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
consisting of square matrices.
check_errors (bool, optional): controls whether to check the content of ``info``. Default: `False`.
Keyword args:
out (tuple, optional): tuple of two tensors to write the output to. Ignored if `None`. Default: `None`.
Examples::
>>> A = torch.randn(3, 3)
>>> Ainv, info = torch.linalg.inv_ex(A)
>>> torch.dist(torch.linalg.inv(A), Ainv)
tensor(0.)
>>> info
tensor(0, dtype=torch.int32)
""",
)
det = _add_docstr(
_linalg.linalg_det,
r"""
linalg.det(A, *, out=None) -> Tensor
Computes the determinant of a square matrix.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
.. seealso::
:func:`torch.linalg.slogdet` computes the sign and natural logarithm of the absolute
value of the determinant of square matrices.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Examples::
>>> A = torch.randn(3, 3)
>>> torch.linalg.det(A)
tensor(0.0934)
>>> A = torch.randn(3, 2, 2)
>>> torch.linalg.det(A)
tensor([1.1990, 0.4099, 0.7386])
""",
)
slogdet = _add_docstr(
_linalg.linalg_slogdet,
r"""
linalg.slogdet(A, *, out=None) -> (Tensor, Tensor)
Computes the sign and natural logarithm of the absolute value of the determinant of a square matrix.
For complex :attr:`A`, it returns the sign and the natural logarithm of the modulus of the
determinant, that is, a logarithmic polar decomposition of the determinant.
The determinant can be recovered as `sign * exp(logabsdet)`.
When a matrix has a determinant of zero, it returns `(0, -inf)`.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
.. seealso::
:func:`torch.linalg.det` computes the determinant of square matrices.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions.
Keyword args:
out (tuple, optional): output tuple of two tensors. Ignored if `None`. Default: `None`.
Returns:
A named tuple `(sign, logabsdet)`.
`sign` will have the same dtype as :attr:`A`.
`logabsdet` will always be real-valued, even when :attr:`A` is complex.
Examples::
>>> A = torch.randn(3, 3)
>>> A
tensor([[ 0.0032, -0.2239, -1.1219],
[-0.6690, 0.1161, 0.4053],
[-1.6218, -0.9273, -0.0082]])
>>> torch.linalg.det(A)
tensor(-0.7576)
>>> torch.logdet(A)
tensor(nan)
>>> torch.linalg.slogdet(A)
torch.return_types.linalg_slogdet(sign=tensor(-1.), logabsdet=tensor(-0.2776))
""",
)
eig = _add_docstr(
_linalg.linalg_eig,
r"""
linalg.eig(A, *, out=None) -> (Tensor, Tensor)
Computes the eigenvalue decomposition of a square matrix if it exists.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **eigenvalue decomposition** of a square matrix
:math:`A \in \mathbb{K}^{n \times n}` (if it exists) is defined as
.. math::
A = V \operatorname{diag}(\Lambda) V^{-1}\mathrlap{\qquad V \in \mathbb{C}^{n \times n}, \Lambda \in \mathbb{C}^n}
This decomposition exists if and only if :math:`A` is `diagonalizable`_.
This is the case when all its eigenvalues are different.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
The returned eigenvalues are not guaranteed to be in any specific order.
.. note:: The eigenvalues and eigenvectors of a real matrix may be complex.
"""
+ rf"""
.. note:: {common_notes["sync_note"]}
"""
+ r"""
.. warning:: This function assumes that :attr:`A` is `diagonalizable`_ (for example, when all the
eigenvalues are different). If it is not diagonalizable, the returned
eigenvalues will be correct but :math:`A \neq V \operatorname{diag}(\Lambda)V^{-1}`.
.. warning:: The returned eigenvectors are normalized to have norm `1`.
Even then, the eigenvectors of a matrix are not unique, nor are they continuous with respect to
:attr:`A`. Due to this lack of uniqueness, different hardware and software may compute
different eigenvectors.
This non-uniqueness is caused by the fact that multiplying an eigenvector by
by :math:`e^{i \phi}, \phi \in \mathbb{R}` produces another set of valid eigenvectors
of the matrix. For this reason, the loss function shall not depend on the phase of the
eigenvectors, as this quantity is not well-defined.
This is checked when computing the gradients of this function. As such,
when inputs are on a CUDA device, the computation of the gradients
of this function synchronizes that device with the CPU.
.. warning:: Gradients computed using the `eigenvectors` tensor will only be finite when
:attr:`A` has distinct eigenvalues.
Furthermore, if the distance between any two eigenvalues is close to zero,
the gradient will be numerically unstable, as it depends on the eigenvalues
:math:`\lambda_i` through the computation of
:math:`\frac{1}{\min_{i \neq j} \lambda_i - \lambda_j}`.
.. seealso::
:func:`torch.linalg.eigvals` computes only the eigenvalues.
Unlike :func:`torch.linalg.eig`, the gradients of :func:`~eigvals` are always
numerically stable.
:func:`torch.linalg.eigh` for a (faster) function that computes the eigenvalue decomposition
for Hermitian and symmetric matrices.
:func:`torch.linalg.svd` for a function that computes another type of spectral
decomposition that works on matrices of any shape.
:func:`torch.linalg.qr` for another (much faster) decomposition that works on matrices of
any shape.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
consisting of diagonalizable matrices.
Keyword args:
out (tuple, optional): output tuple of two tensors. Ignored if `None`. Default: `None`.
Returns:
A named tuple `(eigenvalues, eigenvectors)` which corresponds to :math:`\Lambda` and :math:`V` above.
`eigenvalues` and `eigenvectors` will always be complex-valued, even when :attr:`A` is real. The eigenvectors
will be given by the columns of `eigenvectors`.
Examples::
>>> A = torch.randn(2, 2, dtype=torch.complex128)
>>> A
tensor([[ 0.9828+0.3889j, -0.4617+0.3010j],
[ 0.1662-0.7435j, -0.6139+0.0562j]], dtype=torch.complex128)
>>> L, V = torch.linalg.eig(A)
>>> L
tensor([ 1.1226+0.5738j, -0.7537-0.1286j], dtype=torch.complex128)
>>> V
tensor([[ 0.9218+0.0000j, 0.1882-0.2220j],
[-0.0270-0.3867j, 0.9567+0.0000j]], dtype=torch.complex128)
>>> torch.dist(V @ torch.diag(L) @ torch.linalg.inv(V), A)
tensor(7.7119e-16, dtype=torch.float64)
>>> A = torch.randn(3, 2, 2, dtype=torch.float64)
>>> L, V = torch.linalg.eig(A)
>>> torch.dist(V @ torch.diag_embed(L) @ torch.linalg.inv(V), A)
tensor(3.2841e-16, dtype=torch.float64)
.. _diagonalizable:
https://en.wikipedia.org/wiki/Diagonalizable_matrix#Definition
""",
)
eigvals = _add_docstr(
_linalg.linalg_eigvals,
r"""
linalg.eigvals(A, *, out=None) -> Tensor
Computes the eigenvalues of a square matrix.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **eigenvalues** of a square matrix :math:`A \in \mathbb{K}^{n \times n}` are defined
as the roots (counted with multiplicity) of the polynomial `p` of degree `n` given by
.. math::
p(\lambda) = \operatorname{det}(A - \lambda \mathrm{I}_n)\mathrlap{\qquad \lambda \in \mathbb{C}}
where :math:`\mathrm{I}_n` is the `n`-dimensional identity matrix.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
The returned eigenvalues are not guaranteed to be in any specific order.
.. note:: The eigenvalues of a real matrix may be complex, as the roots of a real polynomial may be complex.
The eigenvalues of a matrix are always well-defined, even when the matrix is not diagonalizable.
"""
+ rf"""
.. note:: {common_notes["sync_note"]}
"""
+ r"""
.. seealso::
:func:`torch.linalg.eig` computes the full eigenvalue decomposition.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Returns:
A complex-valued tensor containing the eigenvalues even when :attr:`A` is real.
Examples::
>>> A = torch.randn(2, 2, dtype=torch.complex128)
>>> L = torch.linalg.eigvals(A)
>>> L
tensor([ 1.1226+0.5738j, -0.7537-0.1286j], dtype=torch.complex128)
>>> torch.dist(L, torch.linalg.eig(A).eigenvalues)
tensor(2.4576e-07)
""",
)
eigh = _add_docstr(
_linalg.linalg_eigh,
r"""
linalg.eigh(A, UPLO='L', *, out=None) -> (Tensor, Tensor)
Computes the eigenvalue decomposition of a complex Hermitian or real symmetric matrix.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **eigenvalue decomposition** of a complex Hermitian or real symmetric matrix
:math:`A \in \mathbb{K}^{n \times n}` is defined as
.. math::
A = Q \operatorname{diag}(\Lambda) Q^{\text{H}}\mathrlap{\qquad Q \in \mathbb{K}^{n \times n}, \Lambda \in \mathbb{R}^n}
where :math:`Q^{\text{H}}` is the conjugate transpose when :math:`Q` is complex, and the transpose when :math:`Q` is real-valued.
:math:`Q` is orthogonal in the real case and unitary in the complex case.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
:attr:`A` is assumed to be Hermitian (resp. symmetric), but this is not checked internally, instead:
- If :attr:`UPLO`\ `= 'L'` (default), only the lower triangular part of the matrix is used in the computation.
- If :attr:`UPLO`\ `= 'U'`, only the upper triangular part of the matrix is used.
The eigenvalues are returned in ascending order.
"""
+ rf"""
.. note:: {common_notes["sync_note"]}
"""
+ r"""
.. note:: The eigenvalues of real symmetric or complex Hermitian matrices are always real.
.. warning:: The eigenvectors of a symmetric matrix are not unique, nor are they continuous with
respect to :attr:`A`. Due to this lack of uniqueness, different hardware and
software may compute different eigenvectors.
This non-uniqueness is caused by the fact that multiplying an eigenvector by
`-1` in the real case or by :math:`e^{i \phi}, \phi \in \mathbb{R}` in the complex
case produces another set of valid eigenvectors of the matrix.
For this reason, the loss function shall not depend on the phase of the eigenvectors, as
this quantity is not well-defined.
This is checked for complex inputs when computing the gradients of this function. As such,
when inputs are complex and are on a CUDA device, the computation of the gradients
of this function synchronizes that device with the CPU.
.. warning:: Gradients computed using the `eigenvectors` tensor will only be finite when
:attr:`A` has distinct eigenvalues.
Furthermore, if the distance between any two eigenvalues is close to zero,
the gradient will be numerically unstable, as it depends on the eigenvalues
:math:`\lambda_i` through the computation of
:math:`\frac{1}{\min_{i \neq j} \lambda_i - \lambda_j}`.
.. warning:: User may see pytorch crashes if running `eigh` on CUDA devices with CUDA versions before 12.1 update 1
with large ill-conditioned matrices as inputs.
Refer to :ref:`Linear Algebra Numerical Stability<Linear Algebra Stability>` for more details.
If this is the case, user may (1) tune their matrix inputs to be less ill-conditioned,
or (2) use :func:`torch.backends.cuda.preferred_linalg_library` to
try other supported backends.
.. seealso::
:func:`torch.linalg.eigvalsh` computes only the eigenvalues of a Hermitian matrix.
Unlike :func:`torch.linalg.eigh`, the gradients of :func:`~eigvalsh` are always
numerically stable.
:func:`torch.linalg.cholesky` for a different decomposition of a Hermitian matrix.
The Cholesky decomposition gives less information about the matrix but is much faster
to compute than the eigenvalue decomposition.
:func:`torch.linalg.eig` for a (slower) function that computes the eigenvalue decomposition
of a not necessarily Hermitian square matrix.
:func:`torch.linalg.svd` for a (slower) function that computes the more general SVD
decomposition of matrices of any shape.
:func:`torch.linalg.qr` for another (much faster) decomposition that works on general
matrices.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
consisting of symmetric or Hermitian matrices.
UPLO ('L', 'U', optional): controls whether to use the upper or lower triangular part
of :attr:`A` in the computations. Default: `'L'`.
Keyword args:
out (tuple, optional): output tuple of two tensors. Ignored if `None`. Default: `None`.
Returns:
A named tuple `(eigenvalues, eigenvectors)` which corresponds to :math:`\Lambda` and :math:`Q` above.
`eigenvalues` will always be real-valued, even when :attr:`A` is complex.
It will also be ordered in ascending order.
`eigenvectors` will have the same dtype as :attr:`A` and will contain the eigenvectors as its columns.
Examples::
>>> A = torch.randn(2, 2, dtype=torch.complex128)
>>> A = A + A.T.conj() # creates a Hermitian matrix
>>> A
tensor([[2.9228+0.0000j, 0.2029-0.0862j],
[0.2029+0.0862j, 0.3464+0.0000j]], dtype=torch.complex128)
>>> L, Q = torch.linalg.eigh(A)
>>> L
tensor([0.3277, 2.9415], dtype=torch.float64)
>>> Q
tensor([[-0.0846+-0.0000j, -0.9964+0.0000j],
[ 0.9170+0.3898j, -0.0779-0.0331j]], dtype=torch.complex128)
>>> torch.dist(Q @ torch.diag(L.cdouble()) @ Q.T.conj(), A)
tensor(6.1062e-16, dtype=torch.float64)
>>> A = torch.randn(3, 2, 2, dtype=torch.float64)
>>> A = A + A.mT # creates a batch of symmetric matrices
>>> L, Q = torch.linalg.eigh(A)
>>> torch.dist(Q @ torch.diag_embed(L) @ Q.mH, A)
tensor(1.5423e-15, dtype=torch.float64)
""",
)
eigvalsh = _add_docstr(
_linalg.linalg_eigvalsh,
r"""
linalg.eigvalsh(A, UPLO='L', *, out=None) -> Tensor
Computes the eigenvalues of a complex Hermitian or real symmetric matrix.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **eigenvalues** of a complex Hermitian or real symmetric matrix :math:`A \in \mathbb{K}^{n \times n}`
are defined as the roots (counted with multiplicity) of the polynomial `p` of degree `n` given by
.. math::
p(\lambda) = \operatorname{det}(A - \lambda \mathrm{I}_n)\mathrlap{\qquad \lambda \in \mathbb{R}}
where :math:`\mathrm{I}_n` is the `n`-dimensional identity matrix.
The eigenvalues of a real symmetric or complex Hermitian matrix are always real.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
The eigenvalues are returned in ascending order.
:attr:`A` is assumed to be Hermitian (resp. symmetric), but this is not checked internally, instead:
- If :attr:`UPLO`\ `= 'L'` (default), only the lower triangular part of the matrix is used in the computation.
- If :attr:`UPLO`\ `= 'U'`, only the upper triangular part of the matrix is used.
"""
+ rf"""
.. note:: {common_notes["sync_note"]}
"""
+ r"""
.. seealso::
:func:`torch.linalg.eigh` computes the full eigenvalue decomposition.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
consisting of symmetric or Hermitian matrices.
UPLO ('L', 'U', optional): controls whether to use the upper or lower triangular part
of :attr:`A` in the computations. Default: `'L'`.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Returns:
A real-valued tensor containing the eigenvalues even when :attr:`A` is complex.
The eigenvalues are returned in ascending order.
Examples::
>>> A = torch.randn(2, 2, dtype=torch.complex128)
>>> A = A + A.T.conj() # creates a Hermitian matrix
>>> A
tensor([[2.9228+0.0000j, 0.2029-0.0862j],
[0.2029+0.0862j, 0.3464+0.0000j]], dtype=torch.complex128)
>>> torch.linalg.eigvalsh(A)
tensor([0.3277, 2.9415], dtype=torch.float64)
>>> A = torch.randn(3, 2, 2, dtype=torch.float64)
>>> A = A + A.mT # creates a batch of symmetric matrices
>>> torch.linalg.eigvalsh(A)
tensor([[ 2.5797, 3.4629],
[-4.1605, 1.3780],
[-3.1113, 2.7381]], dtype=torch.float64)
""",
)
householder_product = _add_docstr(
_linalg.linalg_householder_product,
r"""
householder_product(A, tau, *, out=None) -> Tensor
Computes the first `n` columns of a product of Householder matrices.
Let :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, and
let :math:`A \in \mathbb{K}^{m \times n}` be a matrix with columns :math:`a_i \in \mathbb{K}^m`
for :math:`i=1,\ldots,m` with :math:`m \geq n`. Denote by :math:`b_i` the vector resulting from
zeroing out the first :math:`i-1` components of :math:`a_i` and setting to `1` the :math:`i`-th.
For a vector :math:`\tau \in \mathbb{K}^k` with :math:`k \leq n`, this function computes the
first :math:`n` columns of the matrix
.. math::
H_1H_2 ... H_k \qquad\text{with}\qquad H_i = \mathrm{I}_m - \tau_i b_i b_i^{\text{H}}
where :math:`\mathrm{I}_m` is the `m`-dimensional identity matrix and :math:`b^{\text{H}}` is the
conjugate transpose when :math:`b` is complex, and the transpose when :math:`b` is real-valued.
The output matrix is the same size as the input matrix :attr:`A`.
See `Representation of Orthogonal or Unitary Matrices`_ for further details.
Supports inputs of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if the inputs are batches of matrices then
the output has the same batch dimensions.
.. seealso::
:func:`torch.geqrf` can be used together with this function to form the `Q` from the
:func:`~qr` decomposition.
:func:`torch.ormqr` is a related function that computes the matrix multiplication
of a product of Householder matrices with another matrix.
However, that function is not supported by autograd.
.. warning::
Gradient computations are only well-defined if :math:`\tau_i \neq \frac{1}{||a_i||^2}`.
If this condition is not met, no error will be thrown, but the gradient produced may contain `NaN`.
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
tau (Tensor): tensor of shape `(*, k)` where `*` is zero or more batch dimensions.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Raises:
RuntimeError: if :attr:`A` doesn't satisfy the requirement `m >= n`,
or :attr:`tau` doesn't satisfy the requirement `n >= k`.
Examples::
>>> A = torch.randn(2, 2)
>>> h, tau = torch.geqrf(A)
>>> Q = torch.linalg.householder_product(h, tau)
>>> torch.dist(Q, torch.linalg.qr(A).Q)
tensor(0.)
>>> h = torch.randn(3, 2, 2, dtype=torch.complex128)
>>> tau = torch.randn(3, 1, dtype=torch.complex128)
>>> Q = torch.linalg.householder_product(h, tau)
>>> Q
tensor([[[ 1.8034+0.4184j, 0.2588-1.0174j],
[-0.6853+0.7953j, 2.0790+0.5620j]],
[[ 1.4581+1.6989j, -1.5360+0.1193j],
[ 1.3877-0.6691j, 1.3512+1.3024j]],
[[ 1.4766+0.5783j, 0.0361+0.6587j],
[ 0.6396+0.1612j, 1.3693+0.4481j]]], dtype=torch.complex128)
.. _Representation of Orthogonal or Unitary Matrices:
https://www.netlib.org/lapack/lug/node128.html
""",
)
ldl_factor = _add_docstr(
_linalg.linalg_ldl_factor,
r"""
linalg.ldl_factor(A, *, hermitian=False, out=None) -> (Tensor, Tensor)
Computes a compact representation of the LDL factorization of a Hermitian or symmetric (possibly indefinite) matrix.
When :attr:`A` is complex valued it can be Hermitian (:attr:`hermitian`\ `= True`)
or symmetric (:attr:`hermitian`\ `= False`).
The factorization is of the form the form :math:`A = L D L^T`.
If :attr:`hermitian` is `True` then transpose operation is the conjugate transpose.
:math:`L` (or :math:`U`) and :math:`D` are stored in compact form in ``LD``.
They follow the format specified by `LAPACK's sytrf`_ function.
These tensors may be used in :func:`torch.linalg.ldl_solve` to solve linear systems.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
"""
+ rf"""
.. note:: {common_notes["sync_note_has_ex"].format("torch.linalg.ldl_factor_ex")}
"""
+ r"""
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
consisting of symmetric or Hermitian matrices.
Keyword args:
hermitian (bool, optional): whether to consider the input to be Hermitian or symmetric.
For real-valued matrices, this switch has no effect. Default: `False`.
out (tuple, optional): tuple of two tensors to write the output to. Ignored if `None`. Default: `None`.
Returns:
A named tuple `(LD, pivots)`.
Examples::
>>> A = torch.randn(3, 3)
>>> A = A @ A.mT # make symmetric
>>> A
tensor([[7.2079, 4.2414, 1.9428],
[4.2414, 3.4554, 0.3264],
[1.9428, 0.3264, 1.3823]])
>>> LD, pivots = torch.linalg.ldl_factor(A)
>>> LD
tensor([[ 7.2079, 0.0000, 0.0000],
[ 0.5884, 0.9595, 0.0000],
[ 0.2695, -0.8513, 0.1633]])
>>> pivots
tensor([1, 2, 3], dtype=torch.int32)
.. _LAPACK's sytrf:
https://www.netlib.org/lapack/explore-html/d3/db6/group__double_s_ycomputational_gad91bde1212277b3e909eb6af7f64858a.html
""",
)
ldl_factor_ex = _add_docstr(
_linalg.linalg_ldl_factor_ex,
r"""
linalg.ldl_factor_ex(A, *, hermitian=False, check_errors=False, out=None) -> (Tensor, Tensor, Tensor)
This is a version of :func:`~ldl_factor` that does not perform error checks unless :attr:`check_errors`\ `= True`.
It also returns the :attr:`info` tensor returned by `LAPACK's sytrf`_.
``info`` stores integer error codes from the backend library.
A positive integer indicates the diagonal element of :math:`D` that is zero.
Division by 0 will occur if the result is used for solving a system of linear equations.
``info`` filled with zeros indicates that the factorization was successful.
If ``check_errors=True`` and ``info`` contains positive integers, then a `RuntimeError` is thrown.
"""
+ rf"""
.. note:: {common_notes["sync_note_ex"]}
.. warning:: {common_notes["experimental_warning"]}
"""
+ r"""
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
consisting of symmetric or Hermitian matrices.
Keyword args:
hermitian (bool, optional): whether to consider the input to be Hermitian or symmetric.
For real-valued matrices, this switch has no effect. Default: `False`.
check_errors (bool, optional): controls whether to check the content of ``info`` and raise
an error if it is non-zero. Default: `False`.
out (tuple, optional): tuple of three tensors to write the output to. Ignored if `None`. Default: `None`.
Returns:
A named tuple `(LD, pivots, info)`.
Examples::
>>> A = torch.randn(3, 3)
>>> A = A @ A.mT # make symmetric
>>> A
tensor([[7.2079, 4.2414, 1.9428],
[4.2414, 3.4554, 0.3264],
[1.9428, 0.3264, 1.3823]])
>>> LD, pivots, info = torch.linalg.ldl_factor_ex(A)
>>> LD
tensor([[ 7.2079, 0.0000, 0.0000],
[ 0.5884, 0.9595, 0.0000],
[ 0.2695, -0.8513, 0.1633]])
>>> pivots
tensor([1, 2, 3], dtype=torch.int32)
>>> info
tensor(0, dtype=torch.int32)
.. _LAPACK's sytrf:
https://www.netlib.org/lapack/explore-html/d3/db6/group__double_s_ycomputational_gad91bde1212277b3e909eb6af7f64858a.html
""",
)
ldl_solve = _add_docstr(
_linalg.linalg_ldl_solve,
r"""
linalg.ldl_solve(LD, pivots, B, *, hermitian=False, out=None) -> Tensor
Computes the solution of a system of linear equations using the LDL factorization.
:attr:`LD` and :attr:`pivots` are the compact representation of the LDL factorization and
are expected to be computed by :func:`torch.linalg.ldl_factor_ex`.
:attr:`hermitian` argument to this function should be the same
as the corresponding arguments in :func:`torch.linalg.ldl_factor_ex`.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
"""
+ rf"""
.. warning:: {common_notes["experimental_warning"]}
"""
+ r"""
Args:
LD (Tensor): the `n \times n` matrix or the batch of such matrices of size
`(*, n, n)` where `*` is one or more batch dimensions.
pivots (Tensor): the pivots corresponding to the LDL factorization of :attr:`LD`.
B (Tensor): right-hand side tensor of shape `(*, n, k)`.
Keyword args:
hermitian (bool, optional): whether to consider the decomposed matrix to be Hermitian or symmetric.
For real-valued matrices, this switch has no effect. Default: `False`.
out (tuple, optional): output tensor. `B` may be passed as `out` and the result is computed in-place on `B`.
Ignored if `None`. Default: `None`.
Examples::
>>> A = torch.randn(2, 3, 3)
>>> A = A @ A.mT # make symmetric
>>> LD, pivots, info = torch.linalg.ldl_factor_ex(A)
>>> B = torch.randn(2, 3, 4)
>>> X = torch.linalg.ldl_solve(LD, pivots, B)
>>> torch.linalg.norm(A @ X - B)
>>> tensor(0.0001)
""",
)
lstsq = _add_docstr(
_linalg.linalg_lstsq,
r"""
torch.linalg.lstsq(A, B, rcond=None, *, driver=None) -> (Tensor, Tensor, Tensor, Tensor)
Computes a solution to the least squares problem of a system of linear equations.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **least squares problem** for a linear system :math:`AX = B` with
:math:`A \in \mathbb{K}^{m \times n}, B \in \mathbb{K}^{m \times k}` is defined as
.. math::
\min_{X \in \mathbb{K}^{n \times k}} \|AX - B\|_F
where :math:`\|-\|_F` denotes the Frobenius norm.
Supports inputs of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if the inputs are batches of matrices then
the output has the same batch dimensions.
:attr:`driver` chooses the backend function that will be used.
For CPU inputs the valid values are `'gels'`, `'gelsy'`, `'gelsd`, `'gelss'`.
To choose the best driver on CPU consider:
- If :attr:`A` is well-conditioned (its `condition number`_ is not too large), or you do not mind some precision loss.
- For a general matrix: `'gelsy'` (QR with pivoting) (default)
- If :attr:`A` is full-rank: `'gels'` (QR)
- If :attr:`A` is not well-conditioned.
- `'gelsd'` (tridiagonal reduction and SVD)
- But if you run into memory issues: `'gelss'` (full SVD).
For CUDA input, the only valid driver is `'gels'`, which assumes that :attr:`A` is full-rank.
See also the `full description of these drivers`_
:attr:`rcond` is used to determine the effective rank of the matrices in :attr:`A`
when :attr:`driver` is one of (`'gelsy'`, `'gelsd'`, `'gelss'`).
In this case, if :math:`\sigma_i` are the singular values of `A` in decreasing order,
:math:`\sigma_i` will be rounded down to zero if :math:`\sigma_i \leq \text{rcond} \cdot \sigma_1`.
If :attr:`rcond`\ `= None` (default), :attr:`rcond` is set to the machine precision of the dtype of :attr:`A` times `max(m, n)`.
This function returns the solution to the problem and some extra information in a named tuple of
four tensors `(solution, residuals, rank, singular_values)`. For inputs :attr:`A`, :attr:`B`
of shape `(*, m, n)`, `(*, m, k)` respectively, it contains
- `solution`: the least squares solution. It has shape `(*, n, k)`.
- `residuals`: the squared residuals of the solutions, that is, :math:`\|AX - B\|_F^2`.
It has shape `(*, k)`.
It is computed when `m > n` and every matrix in :attr:`A` is full-rank,
otherwise, it is an empty tensor.
If :attr:`A` is a batch of matrices and any matrix in the batch is not full rank,
then an empty tensor is returned. This behavior may change in a future PyTorch release.
- `rank`: tensor of ranks of the matrices in :attr:`A`.
It has shape equal to the batch dimensions of :attr:`A`.
It is computed when :attr:`driver` is one of (`'gelsy'`, `'gelsd'`, `'gelss'`),
otherwise it is an empty tensor.
- `singular_values`: tensor of singular values of the matrices in :attr:`A`.
It has shape `(*, min(m, n))`.
It is computed when :attr:`driver` is one of (`'gelsd'`, `'gelss'`),
otherwise it is an empty tensor.
.. note::
This function computes `X = \ `:attr:`A`\ `.pinverse() @ \ `:attr:`B` in a faster and
more numerically stable way than performing the computations separately.
.. warning::
The default value of :attr:`rcond` may change in a future PyTorch release.
It is therefore recommended to use a fixed value to avoid potential
breaking changes.
Args:
A (Tensor): lhs tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
B (Tensor): rhs tensor of shape `(*, m, k)` where `*` is zero or more batch dimensions.
rcond (float, optional): used to determine the effective rank of :attr:`A`.
If :attr:`rcond`\ `= None`, :attr:`rcond` is set to the machine
precision of the dtype of :attr:`A` times `max(m, n)`. Default: `None`.
Keyword args:
driver (str, optional): name of the LAPACK/MAGMA method to be used.
If `None`, `'gelsy'` is used for CPU inputs and `'gels'` for CUDA inputs.
Default: `None`.
Returns:
A named tuple `(solution, residuals, rank, singular_values)`.
Examples::
>>> A = torch.randn(1,3,3)
>>> A
tensor([[[-1.0838, 0.0225, 0.2275],
[ 0.2438, 0.3844, 0.5499],
[ 0.1175, -0.9102, 2.0870]]])
>>> B = torch.randn(2,3,3)
>>> B
tensor([[[-0.6772, 0.7758, 0.5109],
[-1.4382, 1.3769, 1.1818],
[-0.3450, 0.0806, 0.3967]],
[[-1.3994, -0.1521, -0.1473],
[ 1.9194, 1.0458, 0.6705],
[-1.1802, -0.9796, 1.4086]]])
>>> X = torch.linalg.lstsq(A, B).solution # A is broadcasted to shape (2, 3, 3)
>>> torch.dist(X, torch.linalg.pinv(A) @ B)
tensor(1.5152e-06)
>>> S = torch.linalg.lstsq(A, B, driver='gelsd').singular_values
>>> torch.dist(S, torch.linalg.svdvals(A))
tensor(2.3842e-07)
>>> A[:, 0].zero_() # Decrease the rank of A
>>> rank = torch.linalg.lstsq(A, B).rank
>>> rank
tensor([2])
.. _condition number:
https://pytorch.org/docs/main/linalg.html#torch.linalg.cond
.. _full description of these drivers:
https://www.netlib.org/lapack/lug/node27.html
""",
)
matrix_power = _add_docstr(
_linalg.linalg_matrix_power,
r"""
matrix_power(A, n, *, out=None) -> Tensor
Computes the `n`-th power of a square matrix for an integer `n`.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
If :attr:`n`\ `= 0`, it returns the identity matrix (or batch) of the same shape
as :attr:`A`. If :attr:`n` is negative, it returns the inverse of each matrix
(if invertible) raised to the power of `abs(n)`.
.. note::
Consider using :func:`torch.linalg.solve` if possible for multiplying a matrix on the left by
a negative power as, if :attr:`n`\ `> 0`::
torch.linalg.solve(matrix_power(A, n), B) == matrix_power(A, -n) @ B
It is always preferred to use :func:`~solve` when possible, as it is faster and more
numerically stable than computing :math:`A^{-n}` explicitly.
.. seealso::
:func:`torch.linalg.solve` computes :attr:`A`\ `.inverse() @ \ `:attr:`B` with a
numerically stable algorithm.
Args:
A (Tensor): tensor of shape `(*, m, m)` where `*` is zero or more batch dimensions.
n (int): the exponent.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Raises:
RuntimeError: if :attr:`n`\ `< 0` and the matrix :attr:`A` or any matrix in the
batch of matrices :attr:`A` is not invertible.
Examples::
>>> A = torch.randn(3, 3)
>>> torch.linalg.matrix_power(A, 0)
tensor([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> torch.linalg.matrix_power(A, 3)
tensor([[ 1.0756, 0.4980, 0.0100],
[-1.6617, 1.4994, -1.9980],
[-0.4509, 0.2731, 0.8001]])
>>> torch.linalg.matrix_power(A.expand(2, -1, -1), -2)
tensor([[[ 0.2640, 0.4571, -0.5511],
[-1.0163, 0.3491, -1.5292],
[-0.4899, 0.0822, 0.2773]],
[[ 0.2640, 0.4571, -0.5511],
[-1.0163, 0.3491, -1.5292],
[-0.4899, 0.0822, 0.2773]]])
""",
)
matrix_rank = _add_docstr(
_linalg.linalg_matrix_rank,
r"""
linalg.matrix_rank(A, *, atol=None, rtol=None, hermitian=False, out=None) -> Tensor
Computes the numerical rank of a matrix.
The matrix rank is computed as the number of singular values
(or eigenvalues in absolute value when :attr:`hermitian`\ `= True`)
that are greater than :math:`\max(\text{atol}, \sigma_1 * \text{rtol})` threshold,
where :math:`\sigma_1` is the largest singular value (or eigenvalue).
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
If :attr:`hermitian`\ `= True`, :attr:`A` is assumed to be Hermitian if complex or
symmetric if real, but this is not checked internally. Instead, just the lower
triangular part of the matrix is used in the computations.
If :attr:`rtol` is not specified and :attr:`A` is a matrix of dimensions `(m, n)`,
the relative tolerance is set to be :math:`\text{rtol} = \max(m, n) \varepsilon`
and :math:`\varepsilon` is the epsilon value for the dtype of :attr:`A` (see :class:`.finfo`).
If :attr:`rtol` is not specified and :attr:`atol` is specified to be larger than zero then
:attr:`rtol` is set to zero.
If :attr:`atol` or :attr:`rtol` is a :class:`torch.Tensor`, its shape must be broadcastable to that
of the singular values of :attr:`A` as returned by :func:`torch.linalg.svdvals`.
.. note::
This function has NumPy compatible variant `linalg.matrix_rank(A, tol, hermitian=False)`.
However, use of the positional argument :attr:`tol` is deprecated in favor of :attr:`atol` and :attr:`rtol`.
"""
+ rf"""
.. note:: The matrix rank is computed using a singular value decomposition
:func:`torch.linalg.svdvals` if :attr:`hermitian`\ `= False` (default) and the eigenvalue
decomposition :func:`torch.linalg.eigvalsh` when :attr:`hermitian`\ `= True`.
{common_notes["sync_note"]}
"""
+ r"""
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
tol (float, Tensor, optional): [NumPy Compat] Alias for :attr:`atol`. Default: `None`.
Keyword args:
atol (float, Tensor, optional): the absolute tolerance value. When `None` it's considered to be zero.
Default: `None`.
rtol (float, Tensor, optional): the relative tolerance value. See above for the value it takes when `None`.
Default: `None`.
hermitian(bool): indicates whether :attr:`A` is Hermitian if complex
or symmetric if real. Default: `False`.
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Examples::
>>> A = torch.eye(10)
>>> torch.linalg.matrix_rank(A)
tensor(10)
>>> B = torch.eye(10)
>>> B[0, 0] = 0
>>> torch.linalg.matrix_rank(B)
tensor(9)
>>> A = torch.randn(4, 3, 2)
>>> torch.linalg.matrix_rank(A)
tensor([2, 2, 2, 2])
>>> A = torch.randn(2, 4, 2, 3)
>>> torch.linalg.matrix_rank(A)
tensor([[2, 2, 2, 2],
[2, 2, 2, 2]])
>>> A = torch.randn(2, 4, 3, 3, dtype=torch.complex64)
>>> torch.linalg.matrix_rank(A)
tensor([[3, 3, 3, 3],
[3, 3, 3, 3]])
>>> torch.linalg.matrix_rank(A, hermitian=True)
tensor([[3, 3, 3, 3],
[3, 3, 3, 3]])
>>> torch.linalg.matrix_rank(A, atol=1.0, rtol=0.0)
tensor([[3, 2, 2, 2],
[1, 2, 1, 2]])
>>> torch.linalg.matrix_rank(A, atol=1.0, rtol=0.0, hermitian=True)
tensor([[2, 2, 2, 1],
[1, 2, 2, 2]])
""",
)
norm = _add_docstr(
_linalg.linalg_norm,
r"""
linalg.norm(A, ord=None, dim=None, keepdim=False, *, out=None, dtype=None) -> Tensor
Computes a vector or matrix norm.
Supports input of float, double, cfloat and cdouble dtypes.
Whether this function computes a vector or matrix norm is determined as follows:
- If :attr:`dim` is an `int`, the vector norm will be computed.
- If :attr:`dim` is a `2`-`tuple`, the matrix norm will be computed.
- If :attr:`dim`\ `= None` and :attr:`ord`\ `= None`,
:attr:`A` will be flattened to 1D and the `2`-norm of the resulting vector will be computed.
- If :attr:`dim`\ `= None` and :attr:`ord` `!= None`, :attr:`A` must be 1D or 2D.
:attr:`ord` defines the norm that is computed. The following norms are supported:
====================== ========================= ========================================================
:attr:`ord` norm for matrices norm for vectors
====================== ========================= ========================================================
`None` (default) Frobenius norm `2`-norm (see below)
`'fro'` Frobenius norm -- not supported --
`'nuc'` nuclear norm -- not supported --
`inf` `max(sum(abs(x), dim=1))` `max(abs(x))`
`-inf` `min(sum(abs(x), dim=1))` `min(abs(x))`
`0` -- not supported -- `sum(x != 0)`
`1` `max(sum(abs(x), dim=0))` as below
`-1` `min(sum(abs(x), dim=0))` as below
`2` largest singular value as below
`-2` smallest singular value as below
other `int` or `float` -- not supported -- `sum(abs(x)^{ord})^{(1 / ord)}`
====================== ========================= ========================================================
where `inf` refers to `float('inf')`, NumPy's `inf` object, or any equivalent object.
.. seealso::
:func:`torch.linalg.vector_norm` computes a vector norm.
:func:`torch.linalg.matrix_norm` computes a matrix norm.
The above functions are often clearer and more flexible than using :func:`torch.linalg.norm`.
For example, `torch.linalg.norm(A, ord=1, dim=(0, 1))` always
computes a matrix norm, but with `torch.linalg.vector_norm(A, ord=1, dim=(0, 1))` it is possible
to compute a vector norm over the two dimensions.
Args:
A (Tensor): tensor of shape `(*, n)` or `(*, m, n)` where `*` is zero or more batch dimensions
ord (int, float, inf, -inf, 'fro', 'nuc', optional): order of norm. Default: `None`
dim (int, Tuple[int], optional): dimensions over which to compute
the vector or matrix norm. See above for the behavior when :attr:`dim`\ `= None`.
Default: `None`
keepdim (bool, optional): If set to `True`, the reduced dimensions are retained
in the result as dimensions with size one. Default: `False`
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
dtype (:class:`torch.dtype`, optional): If specified, the input tensor is cast to
:attr:`dtype` before performing the operation, and the returned tensor's type
will be :attr:`dtype`. Default: `None`
Returns:
A real-valued tensor, even when :attr:`A` is complex.
Examples::
>>> from torch import linalg as LA
>>> a = torch.arange(9, dtype=torch.float) - 4
>>> a
tensor([-4., -3., -2., -1., 0., 1., 2., 3., 4.])
>>> B = a.reshape((3, 3))
>>> B
tensor([[-4., -3., -2.],
[-1., 0., 1.],
[ 2., 3., 4.]])
>>> LA.norm(a)
tensor(7.7460)
>>> LA.norm(B)
tensor(7.7460)
>>> LA.norm(B, 'fro')
tensor(7.7460)
>>> LA.norm(a, float('inf'))
tensor(4.)
>>> LA.norm(B, float('inf'))
tensor(9.)
>>> LA.norm(a, -float('inf'))
tensor(0.)
>>> LA.norm(B, -float('inf'))
tensor(2.)
>>> LA.norm(a, 1)
tensor(20.)
>>> LA.norm(B, 1)
tensor(7.)
>>> LA.norm(a, -1)
tensor(0.)
>>> LA.norm(B, -1)
tensor(6.)
>>> LA.norm(a, 2)
tensor(7.7460)
>>> LA.norm(B, 2)
tensor(7.3485)
>>> LA.norm(a, -2)
tensor(0.)
>>> LA.norm(B.double(), -2)
tensor(1.8570e-16, dtype=torch.float64)
>>> LA.norm(a, 3)
tensor(5.8480)
>>> LA.norm(a, -3)
tensor(0.)
Using the :attr:`dim` argument to compute vector norms::
>>> c = torch.tensor([[1., 2., 3.],
... [-1, 1, 4]])
>>> LA.norm(c, dim=0)
tensor([1.4142, 2.2361, 5.0000])
>>> LA.norm(c, dim=1)
tensor([3.7417, 4.2426])
>>> LA.norm(c, ord=1, dim=1)
tensor([6., 6.])
Using the :attr:`dim` argument to compute matrix norms::
>>> A = torch.arange(8, dtype=torch.float).reshape(2, 2, 2)
>>> LA.norm(A, dim=(1,2))
tensor([ 3.7417, 11.2250])
>>> LA.norm(A[0, :, :]), LA.norm(A[1, :, :])
(tensor(3.7417), tensor(11.2250))
""",
)
vector_norm = _add_docstr(
_linalg.linalg_vector_norm,
r"""
linalg.vector_norm(x, ord=2, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor
Computes a vector norm.
If :attr:`x` is complex valued, it computes the norm of :attr:`x`\ `.abs()`
Supports input of float, double, cfloat and cdouble dtypes.
This function does not necessarily treat multidimensional :attr:`x` as a batch of
vectors, instead:
- If :attr:`dim`\ `= None`, :attr:`x` will be flattened before the norm is computed.
- If :attr:`dim` is an `int` or a `tuple`, the norm will be computed over these dimensions
and the other dimensions will be treated as batch dimensions.
This behavior is for consistency with :func:`torch.linalg.norm`.
:attr:`ord` defines the vector norm that is computed. The following norms are supported:
====================== ===============================
:attr:`ord` vector norm
====================== ===============================
`2` (default) `2`-norm (see below)
`inf` `max(abs(x))`
`-inf` `min(abs(x))`
`0` `sum(x != 0)`
other `int` or `float` `sum(abs(x)^{ord})^{(1 / ord)}`
====================== ===============================
where `inf` refers to `float('inf')`, NumPy's `inf` object, or any equivalent object.
:attr:`dtype` may be used to perform the computation in a more precise dtype.
It is semantically equivalent to calling ``linalg.vector_norm(x.to(dtype))``
but it is faster in some cases.
.. seealso::
:func:`torch.linalg.matrix_norm` computes a matrix norm.
Args:
x (Tensor): tensor, flattened by default, but this behavior can be
controlled using :attr:`dim`. (Note: the keyword argument
`input` can also be used as an alias for `x`.)
ord (int, float, inf, -inf, 'fro', 'nuc', optional): order of norm. Default: `2`
dim (int, Tuple[int], optional): dimensions over which to compute
the norm. See above for the behavior when :attr:`dim`\ `= None`.
Default: `None`
keepdim (bool, optional): If set to `True`, the reduced dimensions are retained
in the result as dimensions with size one. Default: `False`
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
dtype (:class:`torch.dtype`, optional): type used to perform the accumulation and the return.
If specified, :attr:`x` is cast to :attr:`dtype` before performing the operation,
and the returned tensor's type will be :attr:`dtype` if real and of its real counterpart if complex.
:attr:`dtype` may be complex if :attr:`x` is complex, otherwise it must be real.
:attr:`x` should be convertible without narrowing to :attr:`dtype`. Default: None
Returns:
A real-valued tensor, even when :attr:`x` is complex.
Examples::
>>> from torch import linalg as LA
>>> a = torch.arange(9, dtype=torch.float) - 4
>>> a
tensor([-4., -3., -2., -1., 0., 1., 2., 3., 4.])
>>> B = a.reshape((3, 3))
>>> B
tensor([[-4., -3., -2.],
[-1., 0., 1.],
[ 2., 3., 4.]])
>>> LA.vector_norm(a, ord=3.5)
tensor(5.4345)
>>> LA.vector_norm(B, ord=3.5)
tensor(5.4345)
""",
)
matrix_norm = _add_docstr(
_linalg.linalg_matrix_norm,
r"""
linalg.matrix_norm(A, ord='fro', dim=(-2, -1), keepdim=False, *, dtype=None, out=None) -> Tensor
Computes a matrix norm.
If :attr:`A` is complex valued, it computes the norm of :attr:`A`\ `.abs()`
Support input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices: the norm will be computed over the
dimensions specified by the 2-tuple :attr:`dim` and the other dimensions will
be treated as batch dimensions. The output will have the same batch dimensions.
:attr:`ord` defines the matrix norm that is computed. The following norms are supported:
====================== ========================================================
:attr:`ord` matrix norm
====================== ========================================================
`'fro'` (default) Frobenius norm
`'nuc'` nuclear norm
`inf` `max(sum(abs(x), dim=1))`
`-inf` `min(sum(abs(x), dim=1))`
`1` `max(sum(abs(x), dim=0))`
`-1` `min(sum(abs(x), dim=0))`
`2` largest singular value
`-2` smallest singular value
====================== ========================================================
where `inf` refers to `float('inf')`, NumPy's `inf` object, or any equivalent object.
Args:
A (Tensor): tensor with two or more dimensions. By default its
shape is interpreted as `(*, m, n)` where `*` is zero or more
batch dimensions, but this behavior can be controlled using :attr:`dim`.
ord (int, inf, -inf, 'fro', 'nuc', optional): order of norm. Default: `'fro'`
dim (Tuple[int, int], optional): dimensions over which to compute the norm. Default: `(-2, -1)`
keepdim (bool, optional): If set to `True`, the reduced dimensions are retained
in the result as dimensions with size one. Default: `False`
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
dtype (:class:`torch.dtype`, optional): If specified, the input tensor is cast to
:attr:`dtype` before performing the operation, and the returned tensor's type
will be :attr:`dtype`. Default: `None`
Returns:
A real-valued tensor, even when :attr:`A` is complex.
Examples::
>>> from torch import linalg as LA
>>> A = torch.arange(9, dtype=torch.float).reshape(3, 3)
>>> A
tensor([[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]])
>>> LA.matrix_norm(A)
tensor(14.2829)
>>> LA.matrix_norm(A, ord=-1)
tensor(9.)
>>> B = A.expand(2, -1, -1)
>>> B
tensor([[[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]],
[[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]]])
>>> LA.matrix_norm(B)
tensor([14.2829, 14.2829])
>>> LA.matrix_norm(B, dim=(0, 2))
tensor([ 3.1623, 10.0000, 17.2627])
""",
)
matmul = _add_docstr(
_linalg.linalg_matmul,
r"""
linalg.matmul(input, other, *, out=None) -> Tensor
Alias for :func:`torch.matmul`
""",
)
diagonal = _add_docstr(
_linalg.linalg_diagonal,
r"""
linalg.diagonal(A, *, offset=0, dim1=-2, dim2=-1) -> Tensor
Alias for :func:`torch.diagonal` with defaults :attr:`dim1`\ `= -2`, :attr:`dim2`\ `= -1`.
""",
)
multi_dot = _add_docstr(
_linalg.linalg_multi_dot,
r"""
linalg.multi_dot(tensors, *, out=None)
Efficiently multiplies two or more matrices by reordering the multiplications so that
the fewest arithmetic operations are performed.
Supports inputs of float, double, cfloat and cdouble dtypes.
This function does not support batched inputs.
Every tensor in :attr:`tensors` must be 2D, except for the first and last which
may be 1D. If the first tensor is a 1D vector of shape `(n,)` it is treated as a row vector
of shape `(1, n)`, similarly if the last tensor is a 1D vector of shape `(n,)` it is treated
as a column vector of shape `(n, 1)`.
If the first and last tensors are matrices, the output will be a matrix.
However, if either is a 1D vector, then the output will be a 1D vector.
Differences with `numpy.linalg.multi_dot`:
- Unlike `numpy.linalg.multi_dot`, the first and last tensors must either be 1D or 2D
whereas NumPy allows them to be nD
.. warning:: This function does not broadcast.
.. note:: This function is implemented by chaining :func:`torch.mm` calls after
computing the optimal matrix multiplication order.
.. note:: The cost of multiplying two matrices with shapes `(a, b)` and `(b, c)` is
`a * b * c`. Given matrices `A`, `B`, `C` with shapes `(10, 100)`,
`(100, 5)`, `(5, 50)` respectively, we can calculate the cost of different
multiplication orders as follows:
.. math::
\begin{align*}
\operatorname{cost}((AB)C) &= 10 \times 100 \times 5 + 10 \times 5 \times 50 = 7500 \\
\operatorname{cost}(A(BC)) &= 10 \times 100 \times 50 + 100 \times 5 \times 50 = 75000
\end{align*}
In this case, multiplying `A` and `B` first followed by `C` is 10 times faster.
Args:
tensors (Sequence[Tensor]): two or more tensors to multiply. The first and last
tensors may be 1D or 2D. Every other tensor must be 2D.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Examples::
>>> from torch.linalg import multi_dot
>>> multi_dot([torch.tensor([1, 2]), torch.tensor([2, 3])])
tensor(8)
>>> multi_dot([torch.tensor([[1, 2]]), torch.tensor([2, 3])])
tensor([8])
>>> multi_dot([torch.tensor([[1, 2]]), torch.tensor([[2], [3]])])
tensor([[8]])
>>> A = torch.arange(2 * 3).view(2, 3)
>>> B = torch.arange(3 * 2).view(3, 2)
>>> C = torch.arange(2 * 2).view(2, 2)
>>> multi_dot((A, B, C))
tensor([[ 26, 49],
[ 80, 148]])
""",
)
svd = _add_docstr(
_linalg.linalg_svd,
r"""
linalg.svd(A, full_matrices=True, *, driver=None, out=None) -> (Tensor, Tensor, Tensor)
Computes the singular value decomposition (SVD) of a matrix.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **full SVD** of a matrix
:math:`A \in \mathbb{K}^{m \times n}`, if `k = min(m,n)`, is defined as
.. math::
A = U \operatorname{diag}(S) V^{\text{H}}
\mathrlap{\qquad U \in \mathbb{K}^{m \times m}, S \in \mathbb{R}^k, V \in \mathbb{K}^{n \times n}}
where :math:`\operatorname{diag}(S) \in \mathbb{K}^{m \times n}`,
:math:`V^{\text{H}}` is the conjugate transpose when :math:`V` is complex, and the transpose when :math:`V` is real-valued.
The matrices :math:`U`, :math:`V` (and thus :math:`V^{\text{H}}`) are orthogonal in the real case, and unitary in the complex case.
When `m > n` (resp. `m < n`) we can drop the last `m - n` (resp. `n - m`) columns of `U` (resp. `V`) to form the **reduced SVD**:
.. math::
A = U \operatorname{diag}(S) V^{\text{H}}
\mathrlap{\qquad U \in \mathbb{K}^{m \times k}, S \in \mathbb{R}^k, V \in \mathbb{K}^{n \times k}}
where :math:`\operatorname{diag}(S) \in \mathbb{K}^{k \times k}`.
In this case, :math:`U` and :math:`V` also have orthonormal columns.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
The returned decomposition is a named tuple `(U, S, Vh)`
which corresponds to :math:`U`, :math:`S`, :math:`V^{\text{H}}` above.
The singular values are returned in descending order.
The parameter :attr:`full_matrices` chooses between the full (default) and reduced SVD.
The :attr:`driver` kwarg may be used in CUDA with a cuSOLVER backend to choose the algorithm used to compute the SVD.
The choice of a driver is a trade-off between accuracy and speed.
- If :attr:`A` is well-conditioned (its `condition number`_ is not too large), or you do not mind some precision loss.
- For a general matrix: `'gesvdj'` (Jacobi method)
- If :attr:`A` is tall or wide (`m >> n` or `m << n`): `'gesvda'` (Approximate method)
- If :attr:`A` is not well-conditioned or precision is relevant: `'gesvd'` (QR based)
By default (:attr:`driver`\ `= None`), we call `'gesvdj'` and, if it fails, we fallback to `'gesvd'`.
Differences with `numpy.linalg.svd`:
- Unlike `numpy.linalg.svd`, this function always returns a tuple of three tensors
and it doesn't support `compute_uv` argument.
Please use :func:`torch.linalg.svdvals`, which computes only the singular values,
instead of `compute_uv=False`.
.. note:: When :attr:`full_matrices`\ `= True`, the gradients with respect to `U[..., :, min(m, n):]`
and `Vh[..., min(m, n):, :]` will be ignored, as those vectors can be arbitrary bases
of the corresponding subspaces.
.. warning:: The returned tensors `U` and `V` are not unique, nor are they continuous with
respect to :attr:`A`.
Due to this lack of uniqueness, different hardware and software may compute
different singular vectors.
This non-uniqueness is caused by the fact that multiplying any pair of singular
vectors :math:`u_k, v_k` by `-1` in the real case or by
:math:`e^{i \phi}, \phi \in \mathbb{R}` in the complex case produces another two
valid singular vectors of the matrix.
For this reason, the loss function shall not depend on this :math:`e^{i \phi}` quantity,
as it is not well-defined.
This is checked for complex inputs when computing the gradients of this function. As such,
when inputs are complex and are on a CUDA device, the computation of the gradients
of this function synchronizes that device with the CPU.
.. warning:: Gradients computed using `U` or `Vh` will only be finite when
:attr:`A` does not have repeated singular values. If :attr:`A` is rectangular,
additionally, zero must also not be one of its singular values.
Furthermore, if the distance between any two singular values is close to zero,
the gradient will be numerically unstable, as it depends on the singular values
:math:`\sigma_i` through the computation of
:math:`\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}`.
In the rectangular case, the gradient will also be numerically unstable when
:attr:`A` has small singular values, as it also depends on the computation of
:math:`\frac{1}{\sigma_i}`.
.. seealso::
:func:`torch.linalg.svdvals` computes only the singular values.
Unlike :func:`torch.linalg.svd`, the gradients of :func:`~svdvals` are always
numerically stable.
:func:`torch.linalg.eig` for a function that computes another type of spectral
decomposition of a matrix. The eigendecomposition works just on square matrices.
:func:`torch.linalg.eigh` for a (faster) function that computes the eigenvalue decomposition
for Hermitian and symmetric matrices.
:func:`torch.linalg.qr` for another (much faster) decomposition that works on general
matrices.
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
full_matrices (bool, optional): controls whether to compute the full or reduced
SVD, and consequently,
the shape of the returned tensors
`U` and `Vh`. Default: `True`.
Keyword args:
driver (str, optional): name of the cuSOLVER method to be used. This keyword argument only works on CUDA inputs.
Available options are: `None`, `gesvd`, `gesvdj`, and `gesvda`.
Default: `None`.
out (tuple, optional): output tuple of three tensors. Ignored if `None`.
Returns:
A named tuple `(U, S, Vh)` which corresponds to :math:`U`, :math:`S`, :math:`V^{\text{H}}` above.
`S` will always be real-valued, even when :attr:`A` is complex.
It will also be ordered in descending order.
`U` and `Vh` will have the same dtype as :attr:`A`. The left / right singular vectors will be given by
the columns of `U` and the rows of `Vh` respectively.
Examples::
>>> A = torch.randn(5, 3)
>>> U, S, Vh = torch.linalg.svd(A, full_matrices=False)
>>> U.shape, S.shape, Vh.shape
(torch.Size([5, 3]), torch.Size([3]), torch.Size([3, 3]))
>>> torch.dist(A, U @ torch.diag(S) @ Vh)
tensor(1.0486e-06)
>>> U, S, Vh = torch.linalg.svd(A)
>>> U.shape, S.shape, Vh.shape
(torch.Size([5, 5]), torch.Size([3]), torch.Size([3, 3]))
>>> torch.dist(A, U[:, :3] @ torch.diag(S) @ Vh)
tensor(1.0486e-06)
>>> A = torch.randn(7, 5, 3)
>>> U, S, Vh = torch.linalg.svd(A, full_matrices=False)
>>> torch.dist(A, U @ torch.diag_embed(S) @ Vh)
tensor(3.0957e-06)
.. _condition number:
https://pytorch.org/docs/main/linalg.html#torch.linalg.cond
.. _the resulting vectors will span the same subspace:
https://en.wikipedia.org/wiki/Singular_value_decomposition#Singular_values,_singular_vectors,_and_their_relation_to_the_SVD
""",
)
svdvals = _add_docstr(
_linalg.linalg_svdvals,
r"""
linalg.svdvals(A, *, driver=None, out=None) -> Tensor
Computes the singular values of a matrix.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
The singular values are returned in descending order.
.. note:: This function is equivalent to NumPy's `linalg.svd(A, compute_uv=False)`.
"""
+ rf"""
.. note:: {common_notes["sync_note"]}
"""
+ r"""
.. seealso::
:func:`torch.linalg.svd` computes the full singular value decomposition.
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
Keyword args:
driver (str, optional): name of the cuSOLVER method to be used. This keyword argument only works on CUDA inputs.
Available options are: `None`, `gesvd`, `gesvdj`, and `gesvda`.
Check :func:`torch.linalg.svd` for details.
Default: `None`.
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Returns:
A real-valued tensor, even when :attr:`A` is complex.
Examples::
>>> A = torch.randn(5, 3)
>>> S = torch.linalg.svdvals(A)
>>> S
tensor([2.5139, 2.1087, 1.1066])
>>> torch.dist(S, torch.linalg.svd(A, full_matrices=False).S)
tensor(2.4576e-07)
""",
)
cond = _add_docstr(
_linalg.linalg_cond,
r"""
linalg.cond(A, p=None, *, out=None) -> Tensor
Computes the condition number of a matrix with respect to a matrix norm.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **condition number** :math:`\kappa` of a matrix
:math:`A \in \mathbb{K}^{n \times n}` is defined as
.. math::
\kappa(A) = \|A\|_p\|A^{-1}\|_p
The condition number of :attr:`A` measures the numerical stability of the linear system `AX = B`
with respect to a matrix norm.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
:attr:`p` defines the matrix norm that is computed. The following norms are supported:
========= =================================
:attr:`p` matrix norm
========= =================================
`None` `2`-norm (largest singular value)
`'fro'` Frobenius norm
`'nuc'` nuclear norm
`inf` `max(sum(abs(x), dim=1))`
`-inf` `min(sum(abs(x), dim=1))`
`1` `max(sum(abs(x), dim=0))`
`-1` `min(sum(abs(x), dim=0))`
`2` largest singular value
`-2` smallest singular value
========= =================================
where `inf` refers to `float('inf')`, NumPy's `inf` object, or any equivalent object.
For :attr:`p` is one of `('fro', 'nuc', inf, -inf, 1, -1)`, this function uses
:func:`torch.linalg.norm` and :func:`torch.linalg.inv`.
As such, in this case, the matrix (or every matrix in the batch) :attr:`A` has to be square
and invertible.
For :attr:`p` in `(2, -2)`, this function can be computed in terms of the singular values
:math:`\sigma_1 \geq \ldots \geq \sigma_n`
.. math::
\kappa_2(A) = \frac{\sigma_1}{\sigma_n}\qquad \kappa_{-2}(A) = \frac{\sigma_n}{\sigma_1}
In these cases, it is computed using :func:`torch.linalg.svdvals`. For these norms, the matrix
(or every matrix in the batch) :attr:`A` may have any shape.
.. note :: When inputs are on a CUDA device, this function synchronizes that device with the CPU
if :attr:`p` is one of `('fro', 'nuc', inf, -inf, 1, -1)`.
.. seealso::
:func:`torch.linalg.solve` for a function that solves linear systems of square matrices.
:func:`torch.linalg.lstsq` for a function that solves linear systems of general matrices.
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions
for :attr:`p` in `(2, -2)`, and of shape `(*, n, n)` where every matrix
is invertible for :attr:`p` in `('fro', 'nuc', inf, -inf, 1, -1)`.
p (int, inf, -inf, 'fro', 'nuc', optional):
the type of the matrix norm to use in the computations (see above). Default: `None`
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Returns:
A real-valued tensor, even when :attr:`A` is complex.
Raises:
RuntimeError:
if :attr:`p` is one of `('fro', 'nuc', inf, -inf, 1, -1)`
and the :attr:`A` matrix or any matrix in the batch :attr:`A` is not square
or invertible.
Examples::
>>> A = torch.randn(3, 4, 4, dtype=torch.complex64)
>>> torch.linalg.cond(A)
>>> A = torch.tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]])
>>> torch.linalg.cond(A)
tensor([1.4142])
>>> torch.linalg.cond(A, 'fro')
tensor(3.1623)
>>> torch.linalg.cond(A, 'nuc')
tensor(9.2426)
>>> torch.linalg.cond(A, float('inf'))
tensor(2.)
>>> torch.linalg.cond(A, float('-inf'))
tensor(1.)
>>> torch.linalg.cond(A, 1)
tensor(2.)
>>> torch.linalg.cond(A, -1)
tensor(1.)
>>> torch.linalg.cond(A, 2)
tensor([1.4142])
>>> torch.linalg.cond(A, -2)
tensor([0.7071])
>>> A = torch.randn(2, 3, 3)
>>> torch.linalg.cond(A)
tensor([[9.5917],
[3.2538]])
>>> A = torch.randn(2, 3, 3, dtype=torch.complex64)
>>> torch.linalg.cond(A)
tensor([[4.6245],
[4.5671]])
""",
)
pinv = _add_docstr(
_linalg.linalg_pinv,
r"""
linalg.pinv(A, *, atol=None, rtol=None, hermitian=False, out=None) -> Tensor
Computes the pseudoinverse (Moore-Penrose inverse) of a matrix.
The pseudoinverse may be `defined algebraically`_
but it is more computationally convenient to understand it `through the SVD`_
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
If :attr:`hermitian`\ `= True`, :attr:`A` is assumed to be Hermitian if complex or
symmetric if real, but this is not checked internally. Instead, just the lower
triangular part of the matrix is used in the computations.
The singular values (or the norm of the eigenvalues when :attr:`hermitian`\ `= True`)
that are below :math:`\max(\text{atol}, \sigma_1 \cdot \text{rtol})` threshold are
treated as zero and discarded in the computation,
where :math:`\sigma_1` is the largest singular value (or eigenvalue).
If :attr:`rtol` is not specified and :attr:`A` is a matrix of dimensions `(m, n)`,
the relative tolerance is set to be :math:`\text{rtol} = \max(m, n) \varepsilon`
and :math:`\varepsilon` is the epsilon value for the dtype of :attr:`A` (see :class:`.finfo`).
If :attr:`rtol` is not specified and :attr:`atol` is specified to be larger than zero then
:attr:`rtol` is set to zero.
If :attr:`atol` or :attr:`rtol` is a :class:`torch.Tensor`, its shape must be broadcastable to that
of the singular values of :attr:`A` as returned by :func:`torch.linalg.svd`.
.. note:: This function uses :func:`torch.linalg.svd` if :attr:`hermitian`\ `= False` and
:func:`torch.linalg.eigh` if :attr:`hermitian`\ `= True`.
For CUDA inputs, this function synchronizes that device with the CPU.
.. note::
Consider using :func:`torch.linalg.lstsq` if possible for multiplying a matrix on the left by
the pseudoinverse, as::
torch.linalg.lstsq(A, B).solution == A.pinv() @ B
It is always preferred to use :func:`~lstsq` when possible, as it is faster and more
numerically stable than computing the pseudoinverse explicitly.
.. note::
This function has NumPy compatible variant `linalg.pinv(A, rcond, hermitian=False)`.
However, use of the positional argument :attr:`rcond` is deprecated in favor of :attr:`rtol`.
.. warning::
This function uses internally :func:`torch.linalg.svd` (or :func:`torch.linalg.eigh`
when :attr:`hermitian`\ `= True`), so its derivative has the same problems as those of these
functions. See the warnings in :func:`torch.linalg.svd` and :func:`torch.linalg.eigh` for
more details.
.. seealso::
:func:`torch.linalg.inv` computes the inverse of a square matrix.
:func:`torch.linalg.lstsq` computes :attr:`A`\ `.pinv() @ \ `:attr:`B` with a
numerically stable algorithm.
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
rcond (float, Tensor, optional): [NumPy Compat]. Alias for :attr:`rtol`. Default: `None`.
Keyword args:
atol (float, Tensor, optional): the absolute tolerance value. When `None` it's considered to be zero.
Default: `None`.
rtol (float, Tensor, optional): the relative tolerance value. See above for the value it takes when `None`.
Default: `None`.
hermitian(bool, optional): indicates whether :attr:`A` is Hermitian if complex
or symmetric if real. Default: `False`.
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Examples::
>>> A = torch.randn(3, 5)
>>> A
tensor([[ 0.5495, 0.0979, -1.4092, -0.1128, 0.4132],
[-1.1143, -0.3662, 0.3042, 1.6374, -0.9294],
[-0.3269, -0.5745, -0.0382, -0.5922, -0.6759]])
>>> torch.linalg.pinv(A)
tensor([[ 0.0600, -0.1933, -0.2090],
[-0.0903, -0.0817, -0.4752],
[-0.7124, -0.1631, -0.2272],
[ 0.1356, 0.3933, -0.5023],
[-0.0308, -0.1725, -0.5216]])
>>> A = torch.randn(2, 6, 3)
>>> Apinv = torch.linalg.pinv(A)
>>> torch.dist(Apinv @ A, torch.eye(3))
tensor(8.5633e-07)
>>> A = torch.randn(3, 3, dtype=torch.complex64)
>>> A = A + A.T.conj() # creates a Hermitian matrix
>>> Apinv = torch.linalg.pinv(A, hermitian=True)
>>> torch.dist(Apinv @ A, torch.eye(3))
tensor(1.0830e-06)
.. _defined algebraically:
https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse#Existence_and_uniqueness
.. _through the SVD:
https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse#Singular_value_decomposition_(SVD)
""",
)
matrix_exp = _add_docstr(
_linalg.linalg_matrix_exp,
r"""
linalg.matrix_exp(A) -> Tensor
Computes the matrix exponential of a square matrix.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
this function computes the **matrix exponential** of :math:`A \in \mathbb{K}^{n \times n}`, which is defined as
.. math::
\mathrm{matrix\_exp}(A) = \sum_{k=0}^\infty \frac{1}{k!}A^k \in \mathbb{K}^{n \times n}.
If the matrix :math:`A` has eigenvalues :math:`\lambda_i \in \mathbb{C}`,
the matrix :math:`\mathrm{matrix\_exp}(A)` has eigenvalues :math:`e^{\lambda_i} \in \mathbb{C}`.
Supports input of bfloat16, float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions.
Example::
>>> A = torch.empty(2, 2, 2)
>>> A[0, :, :] = torch.eye(2, 2)
>>> A[1, :, :] = 2 * torch.eye(2, 2)
>>> A
tensor([[[1., 0.],
[0., 1.]],
[[2., 0.],
[0., 2.]]])
>>> torch.linalg.matrix_exp(A)
tensor([[[2.7183, 0.0000],
[0.0000, 2.7183]],
[[7.3891, 0.0000],
[0.0000, 7.3891]]])
>>> import math
>>> A = torch.tensor([[0, math.pi/3], [-math.pi/3, 0]]) # A is skew-symmetric
>>> torch.linalg.matrix_exp(A) # matrix_exp(A) = [[cos(pi/3), sin(pi/3)], [-sin(pi/3), cos(pi/3)]]
tensor([[ 0.5000, 0.8660],
[-0.8660, 0.5000]])
""",
)
solve = _add_docstr(
_linalg.linalg_solve,
r"""
linalg.solve(A, B, *, left=True, out=None) -> Tensor
Computes the solution of a square system of linear equations with a unique solution.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
this function computes the solution :math:`X \in \mathbb{K}^{n \times k}` of the **linear system** associated to
:math:`A \in \mathbb{K}^{n \times n}, B \in \mathbb{K}^{n \times k}`, which is defined as
.. math:: AX = B
If :attr:`left`\ `= False`, this function returns the matrix :math:`X \in \mathbb{K}^{n \times k}` that solves the system
.. math::
XA = B\mathrlap{\qquad A \in \mathbb{K}^{k \times k}, B \in \mathbb{K}^{n \times k}.}
This system of linear equations has one solution if and only if :math:`A` is `invertible`_.
This function assumes that :math:`A` is invertible.
Supports inputs of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if the inputs are batches of matrices then
the output has the same batch dimensions.
Letting `*` be zero or more batch dimensions,
- If :attr:`A` has shape `(*, n, n)` and :attr:`B` has shape `(*, n)` (a batch of vectors) or shape
`(*, n, k)` (a batch of matrices or "multiple right-hand sides"), this function returns `X` of shape
`(*, n)` or `(*, n, k)` respectively.
- Otherwise, if :attr:`A` has shape `(*, n, n)` and :attr:`B` has shape `(n,)` or `(n, k)`, :attr:`B`
is broadcasted to have shape `(*, n)` or `(*, n, k)` respectively.
This function then returns the solution of the resulting batch of systems of linear equations.
.. note::
This function computes `X = \ `:attr:`A`\ `.inverse() @ \ `:attr:`B` in a faster and
more numerically stable way than performing the computations separately.
.. note::
It is possible to compute the solution of the system :math:`XA = B` by passing the inputs
:attr:`A` and :attr:`B` transposed and transposing the output returned by this function.
.. note::
:attr:`A` is allowed to be a non-batched `torch.sparse_csr_tensor`, but only with `left=True`.
"""
+ rf"""
.. note:: {common_notes["sync_note_has_ex"].format("torch.linalg.solve_ex")}
"""
+ r"""
.. seealso::
:func:`torch.linalg.solve_triangular` computes the solution of a triangular system of linear
equations with a unique solution.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions.
B (Tensor): right-hand side tensor of shape `(*, n)` or `(*, n, k)` or `(n,)` or `(n, k)`
according to the rules described above
Keyword args:
left (bool, optional): whether to solve the system :math:`AX=B` or :math:`XA = B`. Default: `True`.
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Raises:
RuntimeError: if the :attr:`A` matrix is not invertible or any matrix in a batched :attr:`A`
is not invertible.
Examples::
>>> A = torch.randn(3, 3)
>>> b = torch.randn(3)
>>> x = torch.linalg.solve(A, b)
>>> torch.allclose(A @ x, b)
True
>>> A = torch.randn(2, 3, 3)
>>> B = torch.randn(2, 3, 4)
>>> X = torch.linalg.solve(A, B)
>>> X.shape
torch.Size([2, 3, 4])
>>> torch.allclose(A @ X, B)
True
>>> A = torch.randn(2, 3, 3)
>>> b = torch.randn(3, 1)
>>> x = torch.linalg.solve(A, b) # b is broadcasted to size (2, 3, 1)
>>> x.shape
torch.Size([2, 3, 1])
>>> torch.allclose(A @ x, b)
True
>>> b = torch.randn(3)
>>> x = torch.linalg.solve(A, b) # b is broadcasted to size (2, 3)
>>> x.shape
torch.Size([2, 3])
>>> Ax = A @ x.unsqueeze(-1)
>>> torch.allclose(Ax, b.unsqueeze(-1).expand_as(Ax))
True
.. _invertible:
https://en.wikipedia.org/wiki/Invertible_matrix#The_invertible_matrix_theorem
""",
)
solve_triangular = _add_docstr(
_linalg.linalg_solve_triangular,
r"""
linalg.solve_triangular(A, B, *, upper, left=True, unitriangular=False, out=None) -> Tensor
Computes the solution of a triangular system of linear equations with a unique solution.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
this function computes the solution :math:`X \in \mathbb{K}^{n \times k}` of the **linear system**
associated to the triangular matrix :math:`A \in \mathbb{K}^{n \times n}` without zeros on the diagonal
(that is, it is `invertible`_) and the rectangular matrix , :math:`B \in \mathbb{K}^{n \times k}`,
which is defined as
.. math:: AX = B
The argument :attr:`upper` signals whether :math:`A` is upper or lower triangular.
If :attr:`left`\ `= False`, this function returns the matrix :math:`X \in \mathbb{K}^{n \times k}` that
solves the system
.. math::
XA = B\mathrlap{\qquad A \in \mathbb{K}^{k \times k}, B \in \mathbb{K}^{n \times k}.}
If :attr:`upper`\ `= True` (resp. `False`) just the upper (resp. lower) triangular half of :attr:`A`
will be accessed. The elements below the main diagonal will be considered to be zero and will not be accessed.
If :attr:`unitriangular`\ `= True`, the diagonal of :attr:`A` is assumed to be ones and will not be accessed.
The result may contain `NaN` s if the diagonal of :attr:`A` contains zeros or elements that
are very close to zero and :attr:`unitriangular`\ `= False` (default) or if the input matrix
has very small eigenvalues.
Supports inputs of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if the inputs are batches of matrices then
the output has the same batch dimensions.
.. seealso::
:func:`torch.linalg.solve` computes the solution of a general square system of linear
equations with a unique solution.
Args:
A (Tensor): tensor of shape `(*, n, n)` (or `(*, k, k)` if :attr:`left`\ `= False`)
where `*` is zero or more batch dimensions.
B (Tensor): right-hand side tensor of shape `(*, n, k)`.
Keyword args:
upper (bool): whether :attr:`A` is an upper or lower triangular matrix.
left (bool, optional): whether to solve the system :math:`AX=B` or :math:`XA = B`. Default: `True`.
unitriangular (bool, optional): if `True`, the diagonal elements of :attr:`A` are assumed to be
all equal to `1`. Default: `False`.
out (Tensor, optional): output tensor. `B` may be passed as `out` and the result is computed in-place on `B`.
Ignored if `None`. Default: `None`.
Examples::
>>> A = torch.randn(3, 3).triu_()
>>> B = torch.randn(3, 4)
>>> X = torch.linalg.solve_triangular(A, B, upper=True)
>>> torch.allclose(A @ X, B)
True
>>> A = torch.randn(2, 3, 3).tril_()
>>> B = torch.randn(2, 3, 4)
>>> X = torch.linalg.solve_triangular(A, B, upper=False)
>>> torch.allclose(A @ X, B)
True
>>> A = torch.randn(2, 4, 4).tril_()
>>> B = torch.randn(2, 3, 4)
>>> X = torch.linalg.solve_triangular(A, B, upper=False, left=False)
>>> torch.allclose(X @ A, B)
True
.. _invertible:
https://en.wikipedia.org/wiki/Invertible_matrix#The_invertible_matrix_theorem
""",
)
lu_factor = _add_docstr(
_linalg.linalg_lu_factor,
r"""
linalg.lu_factor(A, *, bool pivot=True, out=None) -> (Tensor, Tensor)
Computes a compact representation of the LU factorization with partial pivoting of a matrix.
This function computes a compact representation of the decomposition given by :func:`torch.linalg.lu`.
If the matrix is square, this representation may be used in :func:`torch.linalg.lu_solve`
to solve system of linear equations that share the matrix :attr:`A`.
The returned decomposition is represented as a named tuple `(LU, pivots)`.
The ``LU`` matrix has the same shape as the input matrix ``A``. Its upper and lower triangular
parts encode the non-constant elements of ``L`` and ``U`` of the LU decomposition of ``A``.
The returned permutation matrix is represented by a 1-indexed vector. `pivots[i] == j` represents
that in the `i`-th step of the algorithm, the `i`-th row was permuted with the `j-1`-th row.
On CUDA, one may use :attr:`pivot`\ `= False`. In this case, this function returns the LU
decomposition without pivoting if it exists.
Supports inputs of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if the inputs are batches of matrices then
the output has the same batch dimensions.
"""
+ rf"""
.. note:: {common_notes["sync_note_has_ex"].format("torch.linalg.lu_factor_ex")}
"""
+ r"""
.. warning:: The LU decomposition is almost never unique, as often there are different permutation
matrices that can yield different LU decompositions.
As such, different platforms, like SciPy, or inputs on different devices,
may produce different valid decompositions.
Gradient computations are only supported if the input matrix is full-rank.
If this condition is not met, no error will be thrown, but the gradient may not be finite.
This is because the LU decomposition with pivoting is not differentiable at these points.
.. seealso::
:func:`torch.linalg.lu_solve` solves a system of linear equations given the output of this
function provided the input matrix was square and invertible.
:func:`torch.lu_unpack` unpacks the tensors returned by :func:`~lu_factor` into the three
matrices `P, L, U` that form the decomposition.
:func:`torch.linalg.lu` computes the LU decomposition with partial pivoting of a possibly
non-square matrix. It is a composition of :func:`~lu_factor` and :func:`torch.lu_unpack`.
:func:`torch.linalg.solve` solves a system of linear equations. It is a composition
of :func:`~lu_factor` and :func:`~lu_solve`.
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
Keyword args:
pivot (bool, optional): Whether to compute the LU decomposition with partial pivoting, or the regular LU
decomposition. :attr:`pivot`\ `= False` not supported on CPU. Default: `True`.
out (tuple, optional): tuple of two tensors to write the output to. Ignored if `None`. Default: `None`.
Returns:
A named tuple `(LU, pivots)`.
Raises:
RuntimeError: if the :attr:`A` matrix is not invertible or any matrix in a batched :attr:`A`
is not invertible.
Examples::
>>> A = torch.randn(2, 3, 3)
>>> B1 = torch.randn(2, 3, 4)
>>> B2 = torch.randn(2, 3, 7)
>>> LU, pivots = torch.linalg.lu_factor(A)
>>> X1 = torch.linalg.lu_solve(LU, pivots, B1)
>>> X2 = torch.linalg.lu_solve(LU, pivots, B2)
>>> torch.allclose(A @ X1, B1)
True
>>> torch.allclose(A @ X2, B2)
True
.. _invertible:
https://en.wikipedia.org/wiki/Invertible_matrix#The_invertible_matrix_theorem
""",
)
lu_factor_ex = _add_docstr(
_linalg.linalg_lu_factor_ex,
r"""
linalg.lu_factor_ex(A, *, pivot=True, check_errors=False, out=None) -> (Tensor, Tensor, Tensor)
This is a version of :func:`~lu_factor` that does not perform error checks unless :attr:`check_errors`\ `= True`.
It also returns the :attr:`info` tensor returned by `LAPACK's getrf`_.
"""
+ rf"""
.. note:: {common_notes["sync_note_ex"]}
.. warning:: {common_notes["experimental_warning"]}
"""
+ r"""
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
Keyword args:
pivot (bool, optional): Whether to compute the LU decomposition with partial pivoting, or the regular LU
decomposition. :attr:`pivot`\ `= False` not supported on CPU. Default: `True`.
check_errors (bool, optional): controls whether to check the content of ``infos`` and raise
an error if it is non-zero. Default: `False`.
out (tuple, optional): tuple of three tensors to write the output to. Ignored if `None`. Default: `None`.
Returns:
A named tuple `(LU, pivots, info)`.
.. _LAPACK's getrf:
https://www.netlib.org/lapack/explore-html/dd/d9a/group__double_g_ecomputational_ga0019443faea08275ca60a734d0593e60.html
""",
)
lu_solve = _add_docstr(
_linalg.linalg_lu_solve,
r"""
linalg.lu_solve(LU, pivots, B, *, left=True, adjoint=False, out=None) -> Tensor
Computes the solution of a square system of linear equations with a unique solution given an LU decomposition.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
this function computes the solution :math:`X \in \mathbb{K}^{n \times k}` of the **linear system** associated to
:math:`A \in \mathbb{K}^{n \times n}, B \in \mathbb{K}^{n \times k}`, which is defined as
.. math:: AX = B
where :math:`A` is given factorized as returned by :func:`~lu_factor`.
If :attr:`left`\ `= False`, this function returns the matrix :math:`X \in \mathbb{K}^{n \times k}` that solves the system
.. math::
XA = B\mathrlap{\qquad A \in \mathbb{K}^{k \times k}, B \in \mathbb{K}^{n \times k}.}
If :attr:`adjoint`\ `= True` (and :attr:`left`\ `= True`), given an LU factorization of :math:`A`
this function function returns the :math:`X \in \mathbb{K}^{n \times k}` that solves the system
.. math::
A^{\text{H}}X = B\mathrlap{\qquad A \in \mathbb{K}^{k \times k}, B \in \mathbb{K}^{n \times k}.}
where :math:`A^{\text{H}}` is the conjugate transpose when :math:`A` is complex, and the
transpose when :math:`A` is real-valued. The :attr:`left`\ `= False` case is analogous.
Supports inputs of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if the inputs are batches of matrices then
the output has the same batch dimensions.
Args:
LU (Tensor): tensor of shape `(*, n, n)` (or `(*, k, k)` if :attr:`left`\ `= True`)
where `*` is zero or more batch dimensions as returned by :func:`~lu_factor`.
pivots (Tensor): tensor of shape `(*, n)` (or `(*, k)` if :attr:`left`\ `= True`)
where `*` is zero or more batch dimensions as returned by :func:`~lu_factor`.
B (Tensor): right-hand side tensor of shape `(*, n, k)`.
Keyword args:
left (bool, optional): whether to solve the system :math:`AX=B` or :math:`XA = B`. Default: `True`.
adjoint (bool, optional): whether to solve the system :math:`AX=B` or :math:`A^{\text{H}}X = B`. Default: `False`.
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Examples::
>>> A = torch.randn(3, 3)
>>> LU, pivots = torch.linalg.lu_factor(A)
>>> B = torch.randn(3, 2)
>>> X = torch.linalg.lu_solve(LU, pivots, B)
>>> torch.allclose(A @ X, B)
True
>>> B = torch.randn(3, 3, 2) # Broadcasting rules apply: A is broadcasted
>>> X = torch.linalg.lu_solve(LU, pivots, B)
>>> torch.allclose(A @ X, B)
True
>>> B = torch.randn(3, 5, 3)
>>> X = torch.linalg.lu_solve(LU, pivots, B, left=False)
>>> torch.allclose(X @ A, B)
True
>>> B = torch.randn(3, 3, 4) # Now solve for A^T
>>> X = torch.linalg.lu_solve(LU, pivots, B, adjoint=True)
>>> torch.allclose(A.mT @ X, B)
True
.. _invertible:
https://en.wikipedia.org/wiki/Invertible_matrix#The_invertible_matrix_theorem
""",
)
lu = _add_docstr(
_linalg.linalg_lu,
r"""
lu(A, *, pivot=True, out=None) -> (Tensor, Tensor, Tensor)
Computes the LU decomposition with partial pivoting of a matrix.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **LU decomposition with partial pivoting** of a matrix
:math:`A \in \mathbb{K}^{m \times n}` is defined as
.. math::
A = PLU\mathrlap{\qquad P \in \mathbb{K}^{m \times m}, L \in \mathbb{K}^{m \times k}, U \in \mathbb{K}^{k \times n}}
where `k = min(m,n)`, :math:`P` is a `permutation matrix`_, :math:`L` is lower triangular with ones on the diagonal
and :math:`U` is upper triangular.
If :attr:`pivot`\ `= False` and :attr:`A` is on GPU, then the **LU decomposition without pivoting** is computed
.. math::
A = LU\mathrlap{\qquad L \in \mathbb{K}^{m \times k}, U \in \mathbb{K}^{k \times n}}
When :attr:`pivot`\ `= False`, the returned matrix :attr:`P` will be empty.
The LU decomposition without pivoting `may not exist`_ if any of the principal minors of :attr:`A` is singular.
In this case, the output matrix may contain `inf` or `NaN`.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
.. seealso::
:func:`torch.linalg.solve` solves a system of linear equations using the LU decomposition
with partial pivoting.
.. warning:: The LU decomposition is almost never unique, as often there are different permutation
matrices that can yield different LU decompositions.
As such, different platforms, like SciPy, or inputs on different devices,
may produce different valid decompositions.
.. warning:: Gradient computations are only supported if the input matrix is full-rank.
If this condition is not met, no error will be thrown, but the gradient
may not be finite.
This is because the LU decomposition with pivoting is not differentiable at these points.
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
pivot (bool, optional): Controls whether to compute the LU decomposition with partial pivoting or
no pivoting. Default: `True`.
Keyword args:
out (tuple, optional): output tuple of three tensors. Ignored if `None`. Default: `None`.
Returns:
A named tuple `(P, L, U)`.
Examples::
>>> A = torch.randn(3, 2)
>>> P, L, U = torch.linalg.lu(A)
>>> P
tensor([[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]])
>>> L
tensor([[1.0000, 0.0000],
[0.5007, 1.0000],
[0.0633, 0.9755]])
>>> U
tensor([[0.3771, 0.0489],
[0.0000, 0.9644]])
>>> torch.dist(A, P @ L @ U)
tensor(5.9605e-08)
>>> A = torch.randn(2, 5, 7, device="cuda")
>>> P, L, U = torch.linalg.lu(A, pivot=False)
>>> P
tensor([], device='cuda:0')
>>> torch.dist(A, L @ U)
tensor(1.0376e-06, device='cuda:0')
.. _permutation matrix:
https://en.wikipedia.org/wiki/Permutation_matrix
.. _may not exist:
https://en.wikipedia.org/wiki/LU_decomposition#Definitions
""",
)
tensorinv = _add_docstr(
_linalg.linalg_tensorinv,
r"""
linalg.tensorinv(A, ind=2, *, out=None) -> Tensor
Computes the multiplicative inverse of :func:`torch.tensordot`.
If `m` is the product of the first :attr:`ind` dimensions of :attr:`A` and `n` is the product of
the rest of the dimensions, this function expects `m` and `n` to be equal.
If this is the case, it computes a tensor `X` such that
`tensordot(\ `:attr:`A`\ `, X, \ `:attr:`ind`\ `)` is the identity matrix in dimension `m`.
`X` will have the shape of :attr:`A` but with the first :attr:`ind` dimensions pushed back to the end
.. code:: text
X.shape == A.shape[ind:] + A.shape[:ind]
Supports input of float, double, cfloat and cdouble dtypes.
.. note:: When :attr:`A` is a `2`-dimensional tensor and :attr:`ind`\ `= 1`,
this function computes the (multiplicative) inverse of :attr:`A`
(see :func:`torch.linalg.inv`).
.. note::
Consider using :func:`torch.linalg.tensorsolve` if possible for multiplying a tensor on the left
by the tensor inverse, as::
linalg.tensorsolve(A, B) == torch.tensordot(linalg.tensorinv(A), B) # When B is a tensor with shape A.shape[:B.ndim]
It is always preferred to use :func:`~tensorsolve` when possible, as it is faster and more
numerically stable than computing the pseudoinverse explicitly.
.. seealso::
:func:`torch.linalg.tensorsolve` computes
`torch.tensordot(tensorinv(\ `:attr:`A`\ `), \ `:attr:`B`\ `)`.
Args:
A (Tensor): tensor to invert. Its shape must satisfy
`prod(\ `:attr:`A`\ `.shape[:\ `:attr:`ind`\ `]) ==
prod(\ `:attr:`A`\ `.shape[\ `:attr:`ind`\ `:])`.
ind (int): index at which to compute the inverse of :func:`torch.tensordot`. Default: `2`.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Raises:
RuntimeError: if the reshaped :attr:`A` is not invertible or the product of the first
:attr:`ind` dimensions is not equal to the product of the rest.
Examples::
>>> A = torch.eye(4 * 6).reshape((4, 6, 8, 3))
>>> Ainv = torch.linalg.tensorinv(A, ind=2)
>>> Ainv.shape
torch.Size([8, 3, 4, 6])
>>> B = torch.randn(4, 6)
>>> torch.allclose(torch.tensordot(Ainv, B), torch.linalg.tensorsolve(A, B))
True
>>> A = torch.randn(4, 4)
>>> Atensorinv = torch.linalg.tensorinv(A, ind=1)
>>> Ainv = torch.linalg.inv(A)
>>> torch.allclose(Atensorinv, Ainv)
True
""",
)
tensorsolve = _add_docstr(
_linalg.linalg_tensorsolve,
r"""
linalg.tensorsolve(A, B, dims=None, *, out=None) -> Tensor
Computes the solution `X` to the system `torch.tensordot(A, X) = B`.
If `m` is the product of the first :attr:`B`\ `.ndim` dimensions of :attr:`A` and
`n` is the product of the rest of the dimensions, this function expects `m` and `n` to be equal.
The returned tensor `x` satisfies
`tensordot(\ `:attr:`A`\ `, x, dims=x.ndim) == \ `:attr:`B`.
`x` has shape :attr:`A`\ `[B.ndim:]`.
If :attr:`dims` is specified, :attr:`A` will be reshaped as
.. code:: text
A = movedim(A, dims, range(len(dims) - A.ndim + 1, 0))
Supports inputs of float, double, cfloat and cdouble dtypes.
.. seealso::
:func:`torch.linalg.tensorinv` computes the multiplicative inverse of
:func:`torch.tensordot`.
Args:
A (Tensor): tensor to solve for. Its shape must satisfy
`prod(\ `:attr:`A`\ `.shape[:\ `:attr:`B`\ `.ndim]) ==
prod(\ `:attr:`A`\ `.shape[\ `:attr:`B`\ `.ndim:])`.
B (Tensor): tensor of shape :attr:`A`\ `.shape[:\ `:attr:`B`\ `.ndim]`.
dims (Tuple[int], optional): dimensions of :attr:`A` to be moved.
If `None`, no dimensions are moved. Default: `None`.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Raises:
RuntimeError: if the reshaped :attr:`A`\ `.view(m, m)` with `m` as above is not
invertible or the product of the first :attr:`ind` dimensions is not equal
to the product of the rest of the dimensions.
Examples::
>>> A = torch.eye(2 * 3 * 4).reshape((2 * 3, 4, 2, 3, 4))
>>> B = torch.randn(2 * 3, 4)
>>> X = torch.linalg.tensorsolve(A, B)
>>> X.shape
torch.Size([2, 3, 4])
>>> torch.allclose(torch.tensordot(A, X, dims=X.ndim), B)
True
>>> A = torch.randn(6, 4, 4, 3, 2)
>>> B = torch.randn(4, 3, 2)
>>> X = torch.linalg.tensorsolve(A, B, dims=(0, 2))
>>> X.shape
torch.Size([6, 4])
>>> A = A.permute(1, 3, 4, 0, 2)
>>> A.shape[B.ndim:]
torch.Size([6, 4])
>>> torch.allclose(torch.tensordot(A, X, dims=X.ndim), B, atol=1e-6)
True
""",
)
qr = _add_docstr(
_linalg.linalg_qr,
r"""
qr(A, mode='reduced', *, out=None) -> (Tensor, Tensor)
Computes the QR decomposition of a matrix.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **full QR decomposition** of a matrix
:math:`A \in \mathbb{K}^{m \times n}` is defined as
.. math::
A = QR\mathrlap{\qquad Q \in \mathbb{K}^{m \times m}, R \in \mathbb{K}^{m \times n}}
where :math:`Q` is orthogonal in the real case and unitary in the complex case,
and :math:`R` is upper triangular with real diagonal (even in the complex case).
When `m > n` (tall matrix), as `R` is upper triangular, its last `m - n` rows are zero.
In this case, we can drop the last `m - n` columns of `Q` to form the
**reduced QR decomposition**:
.. math::
A = QR\mathrlap{\qquad Q \in \mathbb{K}^{m \times n}, R \in \mathbb{K}^{n \times n}}
The reduced QR decomposition agrees with the full QR decomposition when `n >= m` (wide matrix).
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
The parameter :attr:`mode` chooses between the full and reduced QR decomposition.
If :attr:`A` has shape `(*, m, n)`, denoting `k = min(m, n)`
- :attr:`mode`\ `= 'reduced'` (default): Returns `(Q, R)` of shapes `(*, m, k)`, `(*, k, n)` respectively.
It is always differentiable.
- :attr:`mode`\ `= 'complete'`: Returns `(Q, R)` of shapes `(*, m, m)`, `(*, m, n)` respectively.
It is differentiable for `m <= n`.
- :attr:`mode`\ `= 'r'`: Computes only the reduced `R`. Returns `(Q, R)` with `Q` empty and `R` of shape `(*, k, n)`.
It is never differentiable.
Differences with `numpy.linalg.qr`:
- :attr:`mode`\ `= 'raw'` is not implemented.
- Unlike `numpy.linalg.qr`, this function always returns a tuple of two tensors.
When :attr:`mode`\ `= 'r'`, the `Q` tensor is an empty tensor.
.. warning:: The elements in the diagonal of `R` are not necessarily positive.
As such, the returned QR decomposition is only unique up to the sign of the diagonal of `R`.
Therefore, different platforms, like NumPy, or inputs on different devices,
may produce different valid decompositions.
.. warning:: The QR decomposition is only well-defined if the first `k = min(m, n)` columns
of every matrix in :attr:`A` are linearly independent.
If this condition is not met, no error will be thrown, but the QR produced
may be incorrect and its autodiff may fail or produce incorrect results.
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
mode (str, optional): one of `'reduced'`, `'complete'`, `'r'`.
Controls the shape of the returned tensors. Default: `'reduced'`.
Keyword args:
out (tuple, optional): output tuple of two tensors. Ignored if `None`. Default: `None`.
Returns:
A named tuple `(Q, R)`.
Examples::
>>> A = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]])
>>> Q, R = torch.linalg.qr(A)
>>> Q
tensor([[-0.8571, 0.3943, 0.3314],
[-0.4286, -0.9029, -0.0343],
[ 0.2857, -0.1714, 0.9429]])
>>> R
tensor([[ -14.0000, -21.0000, 14.0000],
[ 0.0000, -175.0000, 70.0000],
[ 0.0000, 0.0000, -35.0000]])
>>> (Q @ R).round()
tensor([[ 12., -51., 4.],
[ 6., 167., -68.],
[ -4., 24., -41.]])
>>> (Q.T @ Q).round()
tensor([[ 1., 0., 0.],
[ 0., 1., -0.],
[ 0., -0., 1.]])
>>> Q2, R2 = torch.linalg.qr(A, mode='r')
>>> Q2
tensor([])
>>> torch.equal(R, R2)
True
>>> A = torch.randn(3, 4, 5)
>>> Q, R = torch.linalg.qr(A, mode='complete')
>>> torch.dist(Q @ R, A)
tensor(1.6099e-06)
>>> torch.dist(Q.mT @ Q, torch.eye(4))
tensor(6.2158e-07)
""",
)
vander = _add_docstr(
_linalg.linalg_vander,
r"""
vander(x, N=None) -> Tensor
Generates a Vandermonde matrix.
Returns the Vandermonde matrix :math:`V`
.. math::
V = \begin{pmatrix}
1 & x_1 & x_1^2 & \dots & x_1^{N-1}\\
1 & x_2 & x_2^2 & \dots & x_2^{N-1}\\
1 & x_3 & x_3^2 & \dots & x_3^{N-1}\\
\vdots & \vdots & \vdots & \ddots &\vdots \\
1 & x_n & x_n^2 & \dots & x_n^{N-1}
\end{pmatrix}.
for `N > 1`.
If :attr:`N`\ `= None`, then `N = x.size(-1)` so that the output is a square matrix.
Supports inputs of float, double, cfloat, cdouble, and integral dtypes.
Also supports batches of vectors, and if :attr:`x` is a batch of vectors then
the output has the same batch dimensions.
Differences with `numpy.vander`:
- Unlike `numpy.vander`, this function returns the powers of :attr:`x` in ascending order.
To get them in the reverse order call ``linalg.vander(x, N).flip(-1)``.
Args:
x (Tensor): tensor of shape `(*, n)` where `*` is zero or more batch dimensions
consisting of vectors.
Keyword args:
N (int, optional): Number of columns in the output. Default: `x.size(-1)`
Example::
>>> x = torch.tensor([1, 2, 3, 5])
>>> linalg.vander(x)
tensor([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
>>> linalg.vander(x, N=3)
tensor([[ 1, 1, 1],
[ 1, 2, 4],
[ 1, 3, 9],
[ 1, 5, 25]])
""",
)
vecdot = _add_docstr(
_linalg.linalg_vecdot,
r"""
linalg.vecdot(x, y, *, dim=-1, out=None) -> Tensor
Computes the dot product of two batches of vectors along a dimension.
In symbols, this function computes
.. math::
\sum_{i=1}^n \overline{x_i}y_i.
over the dimension :attr:`dim` where :math:`\overline{x_i}` denotes the conjugate for complex
vectors, and it is the identity for real vectors.
Supports input of half, bfloat16, float, double, cfloat, cdouble and integral dtypes.
It also supports broadcasting.
Args:
x (Tensor): first batch of vectors of shape `(*, n)`.
y (Tensor): second batch of vectors of shape `(*, n)`.
Keyword args:
dim (int): Dimension along which to compute the dot product. Default: `-1`.
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Examples::
>>> v1 = torch.randn(3, 2)
>>> v2 = torch.randn(3, 2)
>>> linalg.vecdot(v1, v2)
tensor([ 0.3223, 0.2815, -0.1944])
>>> torch.vdot(v1[0], v2[0])
tensor(0.3223)
""",
)
```
|
================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.96 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\masked\__init__.py
ENCODING: utf-8
```py
from torch.masked._ops import (
_canonical_dim,
_combine_input_and_mask,
_generate_docstring,
_input_mask,
_output_mask,
_reduction_identity,
_where,
amax,
amin,
argmax,
argmin,
cumprod,
cumsum,
log_softmax,
logaddexp,
logsumexp,
mean,
median,
norm,
normalize,
prod,
softmax,
softmin,
std,
sum,
var,
)
from torch.masked.maskedtensor.core import is_masked_tensor, MaskedTensor
from torch.masked.maskedtensor.creation import as_masked_tensor, masked_tensor
__all__ = [
"amax",
"amin",
"argmax",
"argmin",
"as_masked_tensor",
"cumprod",
"cumsum",
"is_masked_tensor",
"log_softmax",
"logaddexp",
"logsumexp",
"masked_tensor",
"MaskedTensor",
"mean",
"median",
"norm",
"normalize",
"prod",
"softmax",
"softmin",
"std",
"sum",
"var",
]
```
|
=============================================================================================================
SOURCE CODE FILE: _docs.py
LINES: 1
SIZE: 49.46 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\masked\_docs.py
ENCODING: utf-8
```py
# This file is generated, do not modify it!
#
# To update this file, run the update masked docs script as follows:
#
# python tools/update_masked_docs.py
#
# The script must be called from an environment where the development
# version of torch package can be imported and is functional.
#
amax_docstring = """amax(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns maximum of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
The identity value of maximum operation, which is used to start the
reduction, depends on input dtype. For instance, for float32, uint8,
and int32 dtypes, the identity values are ``-inf``, ``0``, and ``-2147483648``, respectively.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in maximum computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of maximum operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
>>> input
tensor([[-3, -2, -1],
[ 0, 1, 2]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch.masked._ops.amax(input, 1, mask=mask)
tensor([ -1, -9223372036854775808])
"""
amin_docstring = """amin(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns minimum of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
The identity value of minimum operation, which is used to start the
reduction, depends on input dtype. For instance, for float32, uint8,
and int32 dtypes, the identity values are ``inf``, ``255``, and ``2147483647``, respectively.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in minimum computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of minimum operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
>>> input
tensor([[-3, -2, -1],
[ 0, 1, 2]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch.masked._ops.amin(input, 1, mask=mask)
tensor([ -3, 9223372036854775807])
"""
argmax_docstring = """argmax(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns argmax of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
The identity value of argmax operation, which is used to start the
reduction, depends on input dtype. For instance, for float32, uint8,
and int32 dtypes, the identity values are ``-inf``, ``0``, and ``-2147483648``, respectively.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in argmax computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of argmax operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int): the dimension along which argmax is computed.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
>>> input
tensor([[-3, -2, -1],
[ 0, 1, 2]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch.masked._ops.argmax(input, 1, mask=mask)
tensor([2, 0])
"""
argmin_docstring = """argmin(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns argmin of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
The identity value of argmin operation, which is used to start the
reduction, depends on input dtype. For instance, for float32, uint8,
and int32 dtypes, the identity values are ``inf``, ``255``, and ``2147483647``, respectively.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in argmin computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of argmin operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int): the dimension along which argmin is computed.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
>>> input
tensor([[-3, -2, -1],
[ 0, 1, 2]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch.masked._ops.argmin(input, 1, mask=mask)
tensor([0, 0])
"""
cumprod_docstring = """cumprod(input, dim, *, dtype=None, mask=None) -> Tensor
Returns cumulative_prod of all the slices in the :attr:`input` tensor
along :attr:`dim` while the :attr:`input` elements are masked out
according to the boolean tensor :attr:`mask`.
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is
defined as ``prod(x[:i])``.
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True then
the corresponding element in :attr:`input` tensor will be included in
cumulative_prod computation, otherwise the element is ignored.
The values of masked-out elements of the output tensor have undefined
value: it may or may not be set to zero or nan; the choice may correspond to
the value that leads to the most efficient storage of :attr:`output`
tensor.
The mask of the cumulative_prod output tensor can be computed as
``torch.broadcast_to(mask, input.shape)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int): the dimension along which cumulative_prod is computed.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
>>> input
tensor([[-3., -2., -1.],
[ 0., 1., 2.]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch.masked._ops.cumprod(input, 1, mask=mask)
tensor([[-3., -3., 3.],
[ 1., 1., 1.]])
"""
cumsum_docstring = """cumsum(input, dim, *, dtype=None, mask=None) -> Tensor
Returns cumulative_sum of all the slices in the :attr:`input` tensor
along :attr:`dim` while the :attr:`input` elements are masked out
according to the boolean tensor :attr:`mask`.
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is
defined as ``sum(x[:i])``.
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True then
the corresponding element in :attr:`input` tensor will be included in
cumulative_sum computation, otherwise the element is ignored.
The values of masked-out elements of the output tensor have undefined
value: it may or may not be set to zero or nan; the choice may correspond to
the value that leads to the most efficient storage of :attr:`output`
tensor.
The mask of the cumulative_sum output tensor can be computed as
``torch.broadcast_to(mask, input.shape)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int): the dimension along which cumulative_sum is computed.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
>>> input
tensor([[-3., -2., -1.],
[ 0., 1., 2.]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch.masked._ops.cumsum(input, 1, mask=mask)
tensor([[-3., -3., -4.],
[ 0., 0., 0.]])
"""
log_softmax_docstring = """log_softmax(input, dim, *, dtype=None, mask=None) -> Tensor
Returns log_softmax of all the slices in the :attr:`input` tensor
along :attr:`dim` while the :attr:`input` elements are masked out
according to the boolean tensor :attr:`mask`.
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. LogSoftmax of i-th element in ``x`` is
defined as ``log(exp(x[i])/sum(exp(x)))``.
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True then
the corresponding element in :attr:`input` tensor will be included in
log_softmax computation, otherwise the element is ignored.
The values of masked-out elements of the output tensor have undefined
value: it may or may not be set to zero or nan; the choice may correspond to
the value that leads to the most efficient storage of :attr:`output`
tensor.
The mask of the log_softmax output tensor can be computed as
``torch.broadcast_to(mask, input.shape)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int): the dimension along which log_softmax is computed.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
>>> input
tensor([[-3., -2., -1.],
[ 0., 1., 2.]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch.masked._ops.log_softmax(input, 1, mask=mask)
tensor([[-2.1269, -inf, -0.1269],
[ nan, nan, nan]])
"""
logsumexp_docstring = """logsumexp(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns logsumexp of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
The identity value of logsumexp operation, which is used to start the reduction, is ``-2147483648``.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in logsumexp computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of logsumexp operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
>>> input
tensor([[-3, -2, -1],
[ 0, 1, 2]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch.masked._ops.logsumexp(input, 1, mask=mask)
tensor([ 0, -9223372036854775808])
"""
mean_docstring = """mean(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns mean of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
By definition, the identity value of a mean operation is the mean
value of the tensor. If all elements of the input tensor along given
dimension(s) :attr:`dim` are masked-out, the identity value of the
mean is undefined. Due to this ambiguity, the elements of output
tensor with strided layout, that correspond to fully masked-out
elements, have ``nan`` values.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in mean computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of mean operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
>>> input
tensor([[-3, -2, -1],
[ 0, 1, 2]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch.masked._ops.mean(input, 1, mask=mask)
tensor([-2., nan])
"""
median_docstring = """median(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns median of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
By definition, the identity value of a median operation is the median
value of the tensor. If all elements of the input tensor along given
dimension(s) :attr:`dim` are masked-out, the identity value of the
median is undefined. Due to this ambiguity, the elements of output
tensor with strided layout, that correspond to fully masked-out
elements, have ``nan`` values.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in median computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of median operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int): the dimension along which median is computed.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
>>> input
tensor([[-3., -2., -1.],
[ 0., 1., 2.]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch.masked._ops.median(input, 1, mask=mask)
tensor([-3., nan])
"""
norm_docstring = """norm(input, ord, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns norm of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
The identity value of norm operation, which is used to start the
reduction, is ``0.0``, except for ``ord=-inf`` it is
``inf``.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in norm computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of norm operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
ord (int, float, optional): the order of vector norm. Default: 2.
See :func:`torch.linalg.vector_norm` for a list of supported norms.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
>>> input
tensor([[-3., -2., -1.],
[ 0., 1., 2.]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch.masked._ops.norm(input, 2.0, 1, mask=mask)
tensor([3.1623, 0.0000])
"""
normalize_docstring = """normalize(input, ord, dim, *, eps=1e-12, dtype=None, mask=None) -> Tensor
Returns normalize of all the slices in the :attr:`input` tensor
along :attr:`dim` while the :attr:`input` elements are masked out
according to the boolean tensor :attr:`mask`.
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Normalize of i-th element in ``x`` is
defined as ``x[i]/max(norm(x, p), eps)``.
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True then
the corresponding element in :attr:`input` tensor will be included in
normalize computation, otherwise the element is ignored.
The values of masked-out elements of the output tensor have undefined
value: it may or may not be set to zero or nan; the choice may correspond to
the value that leads to the most efficient storage of :attr:`output`
tensor.
The mask of the normalize output tensor can be computed as
``torch.broadcast_to(mask, input.shape)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
ord (int, float): the order of vector norm. Default: 2.
See :func:`torch.linalg.vector_norm` for a list of supported norms.
dim (int): the dimension along which normalize is computed.
Keyword args:
eps (float, optional): small value to avoid division by zero. Default: 1e-12.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
>>> input
tensor([[-3., -2., -1.],
[ 0., 1., 2.]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch.masked._ops.normalize(input, 2.0, 1, mask=mask)
tensor([[-0.9487, 0.0000, -0.3162],
[ 0.0000, 0.0000, 0.0000]])
"""
prod_docstring = """prod(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns product of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
The identity value of product operation, which is used to start the reduction, is ``1``.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in product computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of product operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
>>> input
tensor([[-3, -2, -1],
[ 0, 1, 2]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch.masked._ops.prod(input, 1, mask=mask)
tensor([3, 1])
"""
softmax_docstring = """softmax(input, dim, *, dtype=None, mask=None) -> Tensor
Returns softmax of all the slices in the :attr:`input` tensor
along :attr:`dim` while the :attr:`input` elements are masked out
according to the boolean tensor :attr:`mask`.
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Softmax of i-th element in ``x`` is
defined as ``exp(x[i])/sum(exp(x))``.
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True then
the corresponding element in :attr:`input` tensor will be included in
softmax computation, otherwise the element is ignored.
The values of masked-out elements of the output tensor have undefined
value: it may or may not be set to zero or nan; the choice may correspond to
the value that leads to the most efficient storage of :attr:`output`
tensor.
The mask of the softmax output tensor can be computed as
``torch.broadcast_to(mask, input.shape)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int): the dimension along which softmax is computed.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
>>> input
tensor([[-3., -2., -1.],
[ 0., 1., 2.]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch.masked._ops.softmax(input, 1, mask=mask)
tensor([[0.1192, 0.0000, 0.8808],
[ nan, nan, nan]])
"""
softmin_docstring = """softmin(input, dim, *, dtype=None, mask=None) -> Tensor
Returns softmin of all the slices in the :attr:`input` tensor
along :attr:`dim` while the :attr:`input` elements are masked out
according to the boolean tensor :attr:`mask`.
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Softmin of i-th element in ``x`` is
defined as ``exp(-x[i])/sum(exp(-x))``.
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True then
the corresponding element in :attr:`input` tensor will be included in
softmin computation, otherwise the element is ignored.
The values of masked-out elements of the output tensor have undefined
value: it may or may not be set to zero or nan; the choice may correspond to
the value that leads to the most efficient storage of :attr:`output`
tensor.
The mask of the softmin output tensor can be computed as
``torch.broadcast_to(mask, input.shape)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int): the dimension along which softmin is computed.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
>>> input
tensor([[-3., -2., -1.],
[ 0., 1., 2.]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch.masked._ops.softmin(input, 1, mask=mask)
tensor([[0.8808, 0.0000, 0.1192],
[ nan, nan, nan]])
"""
std_docstring = """std(input, dim, unbiased, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns standard_deviation of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
The identity value of sample standard deviation operation is undefined. The
elements of output tensor with strided layout, that correspond to
fully masked-out elements, have ``nan`` values.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in standard_deviation computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of standard_deviation operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
unbiased (bool): when True, use Bessel's correction, otherwise, compute
the uncorrected sample variance.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
>>> input
tensor([[-3, -2, -1],
[ 0, 1, 2]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch.masked._ops.std(input, 1, False, mask=mask)
tensor([1., nan])
"""
sum_docstring = """sum(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns sum of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
The identity value of sum operation, which is used to start the reduction, is ``0``.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in sum computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of sum operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
>>> input
tensor([[-3, -2, -1],
[ 0, 1, 2]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch.masked._ops.sum(input, 1, mask=mask)
tensor([-4, 0])
"""
var_docstring = """var(input, dim, unbiased, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns variance of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
The identity value of sample variance operation is undefined. The
elements of output tensor with strided layout, that correspond to
fully masked-out elements, have ``nan`` values.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in variance computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of variance operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
unbiased (bool): when True, use Bessel's correction, otherwise, compute
the uncorrected sample variance.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
>>> input
tensor([[-3, -2, -1],
[ 0, 1, 2]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch.masked._ops.var(input, 1, False, mask=mask)
tensor([1., nan])
"""
```
|
============================================================================================================
SOURCE CODE FILE: _ops.py
LINES: 10
SIZE: 66.27 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\masked\_ops.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import warnings
from typing import Any, Callable, Optional, TYPE_CHECKING, TypeVar, Union
from typing_extensions import ParamSpec
import torch
from torch import sym_float, Tensor
from torch._prims_common import corresponding_real_dtype
from torch.masked import _docs
from torch.masked.maskedtensor.core import is_masked_tensor, MaskedTensor
from torch.masked.maskedtensor.creation import as_masked_tensor
if TYPE_CHECKING:
from torch.types import _dtype as DType
DimOrDims = Optional[Union[int, tuple[int], list[int]]]
else:
# The JIT doesn't understand Union, nor torch.dtype here
DType = int
DimOrDims = Optional[tuple[int]]
__all__: list[str] = []
_T = TypeVar("_T")
_P = ParamSpec("_P")
# All masked reduction/normalization operations have the same
# signatures. Here we introduce docstring templates that are applied
# to docstrings of reduction/normalization functions via
# _apply_docstring_templates decorator.
def _apply_docstring_templates(func: Callable[_P, _T]) -> Callable[_P, _T]:
"""Decorator that applies docstring templates to function docstring
and returns the function instance.
"""
doc_string = getattr(_docs, f"{func.__name__}_docstring", None)
if doc_string is None:
warnings.warn(
f"No documentation string available for {func.__name__}."
" PyTorch team should run `python tools/update_masked_docs.py`"
" to generate the missing docstrings."
)
else:
func.__doc__ = doc_string
# Expose function as public symbol
__all__.append(func.__name__)
return func
def _generate_docstring(func):
"""A utility function called from tools/update_masked_docs.py
script to update the module torch.masked._docs.py
"""
docstring_templates = dict(
reduction_signature="""\
{function_name}(input, {operation_args}, *, {operation_kwargs}) -> Tensor""",
reduction_descr="""\
Returns {operation name} of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.""",
reduction_args="""\
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in {operation name} computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of {operation name} operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
{args_declarations}
Keyword args:
{kwargs_declarations}""",
reduction_example="""\
Example::
>>> input = {example_input}
>>> input
{indent_example_input}
>>> mask = {example_mask}
>>> mask
{indent_example_mask}
>>> {full_function_name}(input, {example_args}, mask=mask)
{indent_example_output}
""",
reduction_identity="""\
The identity value of {operation name} operation, which is used to start the reduction, is ``{identity_int32}``.""",
reduction_identity_dtype="""\
The identity value of {operation name} operation, which is used to start the
reduction, depends on input dtype. For instance, for float32, uint8,
and int32 dtypes, the identity values are ``{identity_float32}``, ``{identity_uint8}``, and ``{identity_int32}``, respectively.""",
normalization_signature="""\
{function_name}(input, {operation_args}, *, {operation_kwargs}) -> Tensor""",
normalization_descr="""\
Returns {operation name} of all the slices in the :attr:`input` tensor
along :attr:`dim` while the :attr:`input` elements are masked out
according to the boolean tensor :attr:`mask`.
{definition}""",
normalization_args="""\
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True then
the corresponding element in :attr:`input` tensor will be included in
{operation name} computation, otherwise the element is ignored.
The values of masked-out elements of the output tensor have undefined
value: it may or may not be set to zero or nan; the choice may correspond to
the value that leads to the most efficient storage of :attr:`output`
tensor.
The mask of the {operation name} output tensor can be computed as
``torch.broadcast_to(mask, input.shape)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
{args_declarations}
Keyword args:
{kwargs_declarations}""",
normalization_example="""\
Example::
>>> input = {example_input}
>>> input
{indent_example_input}
>>> mask = {example_mask}
>>> mask
{indent_example_mask}
>>> {full_function_name}(input, {example_args}, mask=mask)
{indent_example_output}
""",
)
args_and_kwargs = dict(
# argument name sufficies separated by double underscore will
# be removed in the final documentation string.
sum=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
prod=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
cumsum=(("dim__as_int",), ("dtype=None", "mask=None")),
cumprod=(("dim__as_int",), ("dtype=None", "mask=None")),
amin=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
amax=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
argmin=(("dim__as_int",), ("keepdim=False", "dtype=None", "mask=None")),
argmax=(("dim__as_int",), ("keepdim=False", "dtype=None", "mask=None")),
mean=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
median=(("dim__as_int",), ("keepdim=False", "dtype=None", "mask=None")),
norm=(
(
"ord",
"dim",
),
("keepdim=False", "dtype=None", "mask=None"),
),
var=(("dim", "unbiased"), ("keepdim=False", "dtype=None", "mask=None")),
std=(("dim", "unbiased"), ("keepdim=False", "dtype=None", "mask=None")),
logsumexp=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
softmax=(("dim__as_int",), ("dtype=None", "mask=None")),
log_softmax=(("dim__as_int",), ("dtype=None", "mask=None")),
softmin=(("dim__as_int",), ("dtype=None", "mask=None")),
normalize=(
(
"ord__required",
"dim__as_int",
),
("eps=1e-12", "dtype=None", "mask=None"),
),
)
argument_declarations = dict(
dim="""\
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.""",
dim__as_int="""\
dim (int): the dimension along which {operation name} is computed.""",
ord="""\
ord (int, float, optional): the order of vector norm. Default: 2.
See :func:`torch.linalg.vector_norm` for a list of supported norms.""",
ord__required="""\
ord (int, float): the order of vector norm. Default: 2.
See :func:`torch.linalg.vector_norm` for a list of supported norms.""",
unbiased="""\
unbiased (bool): when True, use Bessel's correction, otherwise, compute
the uncorrected sample variance.""",
eps="""\
eps (float, optional): small value to avoid division by zero. Default: {default}.""",
keepdim="""\
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: {default}.""",
dtype="""\
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: {default}.""",
mask="""\
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.""",
)
definitions = dict(
softmax="""\
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Softmax of i-th element in ``x`` is
defined as ``exp(x[i])/sum(exp(x))``.""",
log_softmax="""\
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. LogSoftmax of i-th element in ``x`` is
defined as ``log(exp(x[i])/sum(exp(x)))``.""",
softmin="""\
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Softmin of i-th element in ``x`` is
defined as ``exp(-x[i])/sum(exp(-x))``.""",
normalize="""\
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Normalize of i-th element in ``x`` is
defined as ``x[i]/max(norm(x, p), eps)``.""",
cumsum="""\
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is
defined as ``sum(x[:i])``.""",
cumprod="""\
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is
defined as ``prod(x[:i])``.""",
)
reduction_names = dict(
sum="sum",
prod="product",
amax="maximum",
amin="minimum",
argmax="argmax",
argmin="argmin",
mean="mean",
median="median",
norm="norm",
var="variance",
std="standard_deviation",
logsumexp="logsumexp",
)
normalization_names = dict(
softmax="softmax",
log_softmax="log_softmax",
softmin="softmin",
normalize="normalize",
cumsum="cumulative_sum",
cumprod="cumulative_prod",
)
operation_names = {}
operation_names.update(reduction_names)
operation_names.update(normalization_names)
# Default example data:
example_dim = 1
example_input = torch.tensor([[-3, -2, -1], [0, 1, 2]])
example_mask = torch.tensor([[True, False, True], [False, False, False]])
example_args: tuple[Any, ...]
if func.__name__ in {"norm", "normalize"}:
example_args = (2.0, example_dim)
example_input = example_input.to(dtype=torch.float32)
elif func.__name__ in {"var", "std"}:
example_args = (example_dim, False)
elif func.__name__ == "median":
example_args = (example_dim,)
example_input = example_input.to(dtype=torch.float32)
else:
example_args = (example_dim,)
operation_args: tuple[str, ...]
operation_kwargs: tuple[str, ...]
operation_args, operation_kwargs = args_and_kwargs[func.__name__]
arg_declarations = [
"\n ".join(
argument_declarations.get(a, f'{a.split("__", 1)[0]}: TBD.').splitlines()
)
for a in operation_args
]
kwarg_declarations = [
"\n ".join(
argument_declarations.get(
a.split("=", 1)[0], f'{a.split("__", 1)[0]}: TBD.'
)
.format(default=a.split("=", 1)[1])
.splitlines()
)
for a in operation_kwargs
]
if func.__name__ in reduction_names:
op_kind = "reduction"
doc_sections = ["signature", "descr", "identity", "args", "example"]
elif func.__name__ in normalization_names:
op_kind = "normalization"
doc_sections = ["signature", "descr", "args", "example"]
example_input = example_input.to(dtype=torch.float32)
else:
assert 0 # add function name to operation names dictionaries
example_output = func(example_input, *example_args, mask=example_mask)
template_data = {
"function_name": func.__name__,
"full_function_name": func.__module__ + "." + func.__name__,
"operation name": operation_names[func.__name__],
"operation_args": ", ".join(a.split("__", 1)[0] for a in operation_args),
"operation_kwargs": ", ".join(a.split("__", 1)[0] for a in operation_kwargs),
# one-line representation of a tensor:
"example_input": " ".join(str(example_input).split()),
"example_args": ", ".join(map(str, example_args)),
"example_mask": " ".join(str(example_mask).split()),
# multi-line representation of a tensor with indent
"indent_example_input": ("\n ").join(str(example_input).splitlines()),
"indent_example_mask": ("\n ").join(str(example_mask).splitlines()),
"indent_example_output": ("\n ").join(str(example_output).splitlines()),
}
if func.__name__ in reduction_names:
template_data.update(
identity_uint8=_reduction_identity(
func.__name__, torch.tensor(0, dtype=torch.uint8)
),
identity_int32=_reduction_identity(
func.__name__, torch.tensor(0, dtype=torch.int32)
),
identity_float32=_reduction_identity(
func.__name__, torch.tensor(0, dtype=torch.float32)
),
)
if func.__name__ == "norm":
template_data.update(
identity_ord_ninf=_reduction_identity(
func.__name__, torch.tensor(0, dtype=torch.float32), float("-inf")
)
)
elif func.__name__ in normalization_names:
template_data.update(definition=definitions[func.__name__])
else:
assert 0 # add function name to operation names dictionaries
template_data.update(
args_declarations=("\n ".join(arg_declarations)).format_map(template_data)
)
template_data.update(
kwargs_declarations=("\n ".join(kwarg_declarations)).format_map(
template_data
)
)
# Apply function name info to docstring templates:
templates = {
k: v.format_map(template_data)
for k, v in docstring_templates.items()
if k.startswith(op_kind)
}
templates.update(
(k, v.format_map(template_data) if isinstance(v, str) else v)
for k, v in template_data.items()
)
# Apply docstring templates to function doctring:
if func.__doc__ is None:
doc_template = "\n\n".join([f"{{{op_kind}_{sec}}}" for sec in doc_sections])
else:
doc_template = func.__doc__
return doc_template.format_map(templates)
def _reduction_identity(op_name: str, input: Tensor, *args):
"""Return identity value as scalar tensor of a reduction operation on
given input, or None, if the identity value cannot be uniquely
defined for the given input.
The identity value of the operation is defined as the initial
value to reduction operation that has a property ``op(op_identity,
value) == value`` for any value in the domain of the operation.
Or put it another way, including or excluding the identity value in
a list of operands will not change the reduction result.
See https://github.com/pytorch/rfcs/pull/27 for more information.
"""
dtype: DType = input.dtype
device = input.device
op_name = op_name.rsplit(".", 1)[-1] # lstrip module name when present
if op_name in {"sum", "cumsum"}:
return torch.tensor(0, dtype=dtype, device=device)
elif op_name in {"prod", "cumprod"}:
return torch.tensor(1, dtype=dtype, device=device)
elif op_name in {"amax", "argmax", "logaddexp"}:
if torch.is_floating_point(input):
return torch.tensor(-torch.inf, dtype=dtype, device=device)
elif torch.is_signed(input) or dtype == torch.uint8:
return torch.tensor(torch.iinfo(dtype).min, dtype=dtype, device=device)
elif op_name in {"logsumexp"}:
if torch.is_floating_point(input):
return torch.tensor(-torch.inf, dtype=dtype, device=device)
elif torch.is_complex(input):
return torch.tensor(-torch.inf + 0j, dtype=dtype, device=device)
elif torch.is_signed(input) or dtype == torch.uint8:
return torch.tensor(torch.iinfo(dtype).min, dtype=dtype, device=device)
elif op_name in {"amin", "argmin"}:
if torch.is_floating_point(input):
return torch.tensor(torch.inf, dtype=dtype, device=device)
elif torch.is_signed(input) or dtype == torch.uint8:
return torch.tensor(torch.iinfo(dtype).max, dtype=dtype, device=device)
elif op_name == "mean":
# Strictly speaking, the identity value of the mean operation
# is the mean of the input. Since the mean value depends on
# the dim argument and it may be a non-scalar tensor, we
# consider the identity value of the mean operation ambiguous.
# Moreover, the mean value of empty input is undefined.
return None
elif op_name == "norm":
ord = args[0] if args else 2
if ord == float("-inf"):
assert torch.is_floating_point(input), input.dtype
return torch.tensor(torch.inf, dtype=dtype, device=device)
return torch.tensor(0, dtype=dtype, device=device)
elif op_name == "median":
# We use NaN for now because the implementation is currently using torch.nanmedian
# and NaN is the identity for that function since it gets ignored
dtype = input.dtype if torch.is_floating_point(input) else torch.float
return torch.tensor(torch.nan, dtype=dtype, device=device)
elif op_name in {"var", "std"}:
return None
raise NotImplementedError(f"identity of {op_name} on {dtype} input")
def _canonical_dim(dim: DimOrDims, ndim: int) -> tuple[int, ...]:
"""Return dim argument as a tuple of sorted dim values."""
dims: list[int] = []
if dim == ():
# Currently, `dim=()` in reductions operations means "reduce
# over all dimensions" while in future, it will read "no
# reduce". See https://github.com/pytorch/pytorch/issues/29137
# When gh-29137 is resolved, this if-block must be deleted.
dim = None
if dim is None:
return tuple(range(ndim))
ndim = max(ndim, 1)
dim_ = (dim,) if isinstance(dim, (int, torch.SymInt)) else dim
for d in dim_:
if d in dims:
raise RuntimeError(f"dim={d} appears multiple times in the list of dims")
if d >= ndim or d < -ndim:
raise IndexError(
f"Dimension out of range (expected to be in range of [{-ndim}, {ndim - 1}], but got {d})"
)
dims.append(d % ndim)
return tuple(sorted(dims))
def _sparse_coo_flatten_indices(indices: Tensor, shape: tuple):
# Flatted N-D indices to 1-D indices
flat_indices = indices.new_zeros(indices.size(1))
for d, sz in enumerate(shape):
flat_indices.mul_(sz)
flat_indices.add_(indices[d])
return flat_indices
def _any(input: Tensor, dim: tuple, keepdim: bool):
# Support torch.any with tuple dim argument.
# Workaround of https://github.com/pytorch/pytorch/issues/56586
r = input
for d in reversed(dim):
r = r.any(dim=d, keepdim=keepdim)
return r
def _sparse_coo_where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor:
"""Sparse variant of torch.where. Supports sparse COO and hybrid sparse COO tensors.
_sparse_coo_where implements the following invariant:
_sparse_coo_where(mask, input, fill_value).to_dense(fill_value) ==
torch.where(mask.to_dense(), input.to_dense(), torch.full(input.shape, fill_value))
where `a == b` means `assertEqual(a, b)`, mask is boolean sparse
tensor, and `to_dense(fill_value)` is like `to_dense()` except
that the unspecified elements are mapped to `fill_value` rather
than to `0`.
Returns a sparse COO tensor with the following features:
- all specified elements correspond to masked-in elements that
have the values of the input tensor. If there exists a masked-in
element (as specified by mask) that is not specified in the
input, in the result tensor, the corresponding element has value
0. In the dense part of the sparse tensor, the masked-out
elements are replaced with fill_value.
- all unspecified elements correspond to masked-out elements.
"""
assert input.layout == torch.sparse_coo
assert mask.layout == input.layout
assert mask.shape == input.shape
assert mask.dense_dim() == input.dense_dim() # TODO: eliminate this restriction
input = input.coalesce()
# For set operations on sparse tensor indices, we'll convert
# multi-dimensional indices to 1-D indices for efficiency.
input_flat_indices = _sparse_coo_flatten_indices(
input.indices(), input.shape[: input.sparse_dim()]
)
mask_flat_indices = _sparse_coo_flatten_indices(
mask.indices(), mask.shape[: mask.sparse_dim()]
)
# the set of mask flat indices that define masked-in elements:
if mask.dense_dim() > 0:
mask_values = _any(
mask.values(), tuple(range(1, input.sparse_dim() + 1)), False
)
else:
mask_values = mask.values()
maskin_flat_indices = mask_flat_indices[mask_values.nonzero()[:, 0]]
def intersection(i1, i2):
union, counts = torch.cat([i1, i2]).unique(return_counts=True)
return union, torch.where(counts.gt(1))
def minus(i1, i2):
union, counts = torch.cat([i1, i2]).unique(return_counts=True)
return intersection(union[torch.where(counts.eq(1))], i1)
def _apply(a):
obj, w = a
return obj[w]
# the set of input flat indices of specified and masked-in elements:
maskin_input_flat_indices = _apply(
intersection(maskin_flat_indices, input_flat_indices)
)
_, w = intersection(input_flat_indices, maskin_input_flat_indices)
# the indices and values of masked-in elements
where_input_indices = input.indices()[(slice(None),) + w]
where_input_values = input.values()[w]
if mask.dense_dim() > 0:
# apply mask to the dense part of the input values:
_, w1 = intersection(mask_flat_indices, maskin_input_flat_indices)
where_mask_values = mask.values()[w1]
where_input_values = torch.where(
where_mask_values, where_input_values, fill_value
)
# the set of flat indices of unspecified input and masked-in elements:
maskin_zero_flat_indices = _apply(
minus(maskin_flat_indices, maskin_input_flat_indices)
)
# the indices of masked-in zero elements
_, w = intersection(mask_flat_indices, maskin_zero_flat_indices)
where_zero_indices = mask.indices()[(slice(None),) + w]
# construct result
n = where_zero_indices.size(1)
if n == 0:
# the input is coalesced, hence input_flat_indices are ordered
# and the result is guaranteed to be coalesced:
result = torch.sparse_coo_tensor(
where_input_indices, where_input_values, input.shape
)
return result._coalesced_(True)
where_indices = torch.cat([where_input_indices, where_zero_indices], dim=1)
where_values = torch.cat(
[
where_input_values,
where_input_values.new_zeros((n,) + where_input_values.shape[1:]),
]
)
result = torch.sparse_coo_tensor(where_indices, where_values, input.shape)
# appending zero elements leads to uncoalesced sparse tensor
return result.coalesce()
def _sparse_coo_scatter_reduction_helper(
op,
mask_input: Tensor,
dims: tuple[int, ...],
keepdim: bool,
dtype: Optional[DType] = None,
) -> Tensor:
reduce = op.__name__
valid_reductions = ["sum", "prod", "amax", "amin"]
if reduce not in valid_reductions:
raise ValueError(
f"op must be one of {' '.join(valid_reductions)}, but got {reduce} instead"
)
output_dtype = dtype
values, indices = mask_input._values(), mask_input._indices()
input_dims = mask_input.dim()
num_sparse_dims = mask_input.sparse_dim()
reduced_sparse_dims = []
retained_sparse_dims = []
reduced_dense_dims = []
# promote dtype if specified
if values.dtype != output_dtype:
values = values.to(output_dtype)
if keepdim:
output_shape = tuple(
1 if i in dims else si for (i, si) in enumerate(mask_input.shape)
)
else:
output_shape = tuple(
si for (i, si) in enumerate(mask_input.shape) if i not in dims
)
for d in dims:
if d >= input_dims:
continue
if d < num_sparse_dims:
reduced_sparse_dims.append(d)
else:
reduced_dense_dims.append(d + 1 - num_sparse_dims)
# Reduce dense dimensions
if len(reduced_dense_dims) > 0:
if reduce == "sum":
new_values = values
new_values = op(new_values, dim=reduced_dense_dims, keepdim=bool(keepdim))
else:
# FIXME: Implement reductions for dense dimensions for ops with non-zero reduction identities
return NotImplemented
else:
new_values = values.clone()
# Reduce sparse dimensions
if len(reduced_sparse_dims) == num_sparse_dims:
if reduce in {"amax", "amin"} and new_values.size(0) == 0:
# IndexError: amax(): Expected reduction dim 0 to have non-zero size.
# sum()/prod() return the reduction identity when dim has size 0 but amax()/amin() do not
# See https://github.com/pytorch/pytorch/issues/61901
new_values = _reduction_identity(reduce, new_values)
else:
new_values = op(new_values, dim=0)
if keepdim:
for _ in range(num_sparse_dims):
new_values = new_values.unsqueeze(0)
return new_values.to(dtype=output_dtype).to_sparse()
else:
new_indices = indices.clone()
if keepdim:
# zero out reduced sparse dimensions if keepdim = True
# ensures that the call to torch.unique folds duplicated indices together while preserving the dimension
new_indices[reduced_sparse_dims, :] = 0
else:
# remove reduced sparse dimensions if keepdim = False
if len(reduced_sparse_dims) > 0:
retained_sparse_dims = [
i
for i in range(num_sparse_dims)
if i not in set(reduced_sparse_dims)
]
new_indices = new_indices.index_select(
0, torch.tensor(retained_sparse_dims).to(mask_input.device)
)
# Use scatter_reduce to reduce items in the new_values tensor that correspond to the same indices in new_indices
if new_indices.numel() > 0:
# lexsort indices and get index tensor for scatter reduction
new_indices, inverse_indices = torch.unique(
new_indices, return_inverse=True, dim=1
)
out_shape = list(new_values.shape)
out_shape[0] = new_indices.shape[1]
for _ in range(new_values.ndim - 1):
inverse_indices = inverse_indices.unsqueeze(-1)
scatter_indices = inverse_indices.expand(new_values.shape)
# FIXME: temporary workaround for issue with bfloat16/float16 remove when acctype is implemented for scatter_reduce
if output_dtype in {torch.bfloat16, torch.float16}:
new_values = new_values.to(torch.float)
out = new_values.new_empty(out_shape)
new_values = out.scatter_reduce_(
0, scatter_indices, new_values, reduce=reduce, include_self=False
)
new_values = new_values.to(dtype=output_dtype)
else:
out = new_values.new_empty(out_shape)
new_values = out.scatter_reduce_(
0, scatter_indices, new_values, reduce=reduce, include_self=False
)
return torch.sparse_coo_tensor(
new_indices,
new_values,
output_shape,
dtype=output_dtype,
device=mask_input.device,
)
def _sparse_csr_segment_reduction_helper(
op,
mask_input: Tensor,
dims: tuple[int, ...],
keepdim: bool,
dtype: Optional[DType] = None,
) -> Tensor:
# Currently, while sparse CSR is always 2D with no dense dimensions keepdim must be True
# FIXME: when dense dimensions are implemented for CSR tensors
assert (
keepdim
), "reduction operations on CSR tensors with keepdim=False is unsupported"
reduce = op.__name__
valid_reductions = ["sum", "prod", "mean", "amax", "amin"]
if reduce not in valid_reductions:
raise ValueError(
f"op must be one of {' '.join(valid_reductions)}, but got {reduce} instead"
)
device = mask_input.device
output_dtype = dtype
values, crow_indices, col_indices = (
mask_input.values(),
mask_input.crow_indices(),
mask_input.col_indices(),
)
# promote dtype if specified
if values.dtype != output_dtype:
values = values.to(output_dtype)
if len(dims) == 0:
return mask_input
if len(dims) == 1:
if dims[0] == 0:
new_col_indices, scatter_indices = torch.unique(
col_indices, return_inverse=True
)
new_nnz = new_col_indices.shape[0]
new_crow_indices = torch.tensor([0, new_nnz])
new_values = values.new_empty(new_col_indices.shape)
new_values.scatter_reduce_(
0, scatter_indices, values, reduce, include_self=False
)
new_shape = [1, mask_input.size(1)]
else:
assert (
dims[0] == 1
), "Sparse CSR tensors are 2D and only support reduction along dim 0 or 1."
# all intervals new_crow_indices[i] - new_crow_indices[i-1] are 1
# except for where crow_indices[i] == crow_indices[i-1] where the interval remains as 0
new_crow_indices = torch.cat(
(
crow_indices.new_zeros(1),
torch.cumsum(torch.diff(crow_indices) != 0, 0),
),
0,
)
new_nnz = new_crow_indices[-1]
new_col_indices = col_indices.new_zeros(new_nnz)
new_values = torch._segment_reduce(values, reduce, offsets=crow_indices) # type: ignore[attr-defined]
new_shape = [mask_input.size(0), 1]
else:
assert len(dims) == 2
nnz = min(1, values.numel())
if nnz == 1:
op_kwargs = {"keepdim": True, "dtype": output_dtype}
# amax and amin do not support dtype kwarg
if reduce in ["amax", "amin"]:
del op_kwargs["dtype"]
new_values = op(values, 0, **op_kwargs)
else:
new_values = torch.empty(0, dtype=output_dtype)
new_col_indices = col_indices.new_zeros(nnz)
new_crow_indices = torch.tensor([0, nnz])
new_shape = [1, nnz]
return torch.sparse_csr_tensor(
new_crow_indices,
new_col_indices,
new_values,
new_shape,
dtype=output_dtype,
device=device,
)
def _sparse_csr_where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor:
"""Sparse variant of torch.where. Supports sparse CSR tensors."""
# TODO: implement sparse CSR specific where operator for efficiency
return _sparse_coo_where(
mask.to_sparse_coo(), input.to_sparse_coo(), fill_value
).to_sparse_csr()
def _where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor:
"""torch.where with sparse inputs support.
_where implements the following invariant:
_where(mask, input, fill_value).to_dense(fill_value) ==
torch.where(mask.to_dense(), input.to_dense(), torch.full(input.shape, fill_value))
where `a == b` means `assertEqual(a, b)`, mask is boolean sparse
tensor, and `to_dense(fill_value)` is like `to_dense()` except
that the unspecified elements are mapped to `fill_value` rather
than to `0`.
Returns a sparse tensor with the following features:
- all specified elements correspond to masked-in elements that
have the values of the input tensor. If there exists a masked-in
element (as specified by mask) that is not specified in the
input, in the result tensor, the corresponding element has value
0. In the dense part of the sparse tensor, the masked-out
elements are replaced with fill_value.
- all unspecified elements correspond to masked-out elements.
"""
if mask.layout == torch.strided:
return torch.where(mask, input, fill_value)
elif mask.layout == torch.sparse_coo:
return _sparse_coo_where(mask, input, fill_value)
elif mask.layout == torch.sparse_csr:
return _sparse_csr_where(mask, input, fill_value)
else:
raise ValueError(
f"_where expects strided or sparse COO or sparse CSR tensor but got {mask.layout}"
)
def _input_mask(input: Union[Tensor, MaskedTensor], *args, **kwargs) -> Tensor:
"""Return canonical input mask.
A canonical input mask is defined as a boolean mask tensor that
shape and layout matches with the shape and the layout of the
input.
The canonical input mask is computed from the :attr:`mask` tensor
content to meet the following criteria:
1. The shape of the canonical input mask is the same as the shape
of :attr:`input` tensor. If the mask tensor has a smaller shape
than the shape of the :attr:`input`, broadcasting rules will be
applied. Downcasting of mask is not supported.
2. The layout of the canonical input mask is the same as the
layout of the :attr:`input` tensor. If the mask has different
layout, it will be converted to the expected layout. In the
case of sparse COO layout, the canonical input mask will be
coalesced.
3. The dtype of the canonical input mask is torch.bool. If the
mask dtype is not bool then it will be converted to bool dtype
using `.to(dtype=bool)` method call.
4. The elements of the canonical input mask have boolean values
copied from the content of the :attr:`mask` tensor (after
possible broadcasting and dtype conversion transforms). In
general, the sparsity pattern of the sparse canonical input
mask need not to be the same as the sparsity pattern of the
sparse :attr:`input` tensor.
"""
if input.layout not in {torch.strided, torch.sparse_coo, torch.sparse_csr}:
raise ValueError(
f"_input_mask expects strided or sparse COO or sparse CSR tensor but got {input.layout}"
)
mask = kwargs.get("mask")
# default mask
if mask is None:
raise ValueError("_input_mask requires explicit mask")
# mask shape must match with input shape
if mask.shape != input.shape:
if mask.ndim > input.ndim:
raise IndexError(
"_input_mask expected broadcastable mask (got mask dimensionality higher than of the input)"
)
if mask.layout == torch.strided:
mask = torch.broadcast_to(mask.clone(), input.shape).to(dtype=torch.bool)
elif mask.layout == torch.sparse_coo:
mask = torch._sparse_broadcast_to(mask, input.shape)
else:
assert mask.layout == torch.sparse_csr
# Broadcasting of CSR tensors is not implemented. Working
# around by using COO layout.
mask = torch._sparse_broadcast_to(
mask.to_sparse(), input.shape
).to_sparse_csr()
# mask layout must match with input layout
if mask.layout != input.layout:
if input.layout == torch.strided:
mask = mask.to_dense()
elif input.layout == torch.sparse_coo:
if mask.layout == torch.strided:
mask = mask.to_sparse(input.sparse_dim())
else:
mask = mask.to_sparse()
else:
assert input.layout == torch.sparse_csr
mask = mask.to_sparse_csr()
# sparse mask must be coalesced
if mask.layout == torch.sparse_coo:
mask = mask.coalesce()
# mask is a boolean tensor
mask = mask.to(dtype=torch.bool)
return mask
def _output_mask(op, input: Tensor, *args, **kwargs) -> Tensor:
"""Return output mask of masked operation applied to given arguments."""
if callable(op):
is_reduction = op.__name__ in {
"sum",
"prod",
"amax",
"amin",
"argmax",
"argmin",
"mean",
"median",
"norm",
"var",
"std",
"logsumexp",
}
is_normalization = op.__name__ in {
"softmax",
"log_softmax",
"softmin",
"normalize",
"cumsum",
"cumprod",
}
if is_reduction:
if op.__name__ == "norm":
if args:
args = args[1:] # lstrip ord argument
dim = args[0] if args else kwargs.get("dim")
outmask = _input_mask(input, *args, **kwargs)
keepdim = kwargs.get("keepdim", False)
dim_ = _canonical_dim(dim, input.ndim)
return _any(outmask, dim_, bool(keepdim))
elif is_normalization:
return _input_mask(input, *args, **kwargs)
else:
raise ValueError(
f"_output_mask expected masked operation (got callable {op.__module__}.{op.__name__})"
)
else:
raise ValueError(
f"_output_mask expected masked operation (got {type(op).__name__} object)"
)
def _combine_input_and_mask(
op, input: Union[MaskedTensor, Tensor], mask, *args
) -> Tensor:
def helper(input, mask):
if mask is None:
return input
canonical_mask = _input_mask(input, mask=mask)
if callable(op):
fill_value = _reduction_identity(op.__name__, input, *args)
return _where(canonical_mask, input, fill_value)
else:
raise ValueError(
f"_combine_input_and_mask expected masked operation (got {type(op).__name__} object)"
)
class Combine(torch.autograd.Function):
@staticmethod
def forward(ctx, input, mask):
"""Return input with masked-out elements eliminated for the given operations."""
ctx.save_for_backward(mask)
if mask is not None:
ctx.mark_non_differentiable(mask)
return helper(input, mask)
@staticmethod
def backward(ctx, grad_output):
(mask,) = ctx.saved_tensors
grad_data = (
grad_output.get_data() if is_masked_tensor(grad_output) else grad_output
)
result = as_masked_tensor(grad_data, mask)
return result, None
return (
Combine.apply(input.get_data(), input.get_mask()) # type: ignore[union-attr]
if is_masked_tensor(input)
else helper(input, mask)
)
@_apply_docstring_templates
def sum(
input: Union[Tensor, MaskedTensor],
dim: DimOrDims = None,
*,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
# __doc__ is generated by _apply_docstring_templates decorator
if dtype is None:
# promote integer types to int64 when output dtype is not specified
if input.layout == torch.sparse_csr:
if input.dtype in {
torch.uint8,
torch.bool,
torch.int8,
torch.int16,
torch.int32,
}:
# csr.to(dtype=torch.int64) is not implemented, so
# using coo.to on input to ensure the promoted dtype
input = input.to_sparse_coo().to(dtype=torch.int64).to_sparse_csr()
else:
dtype = input.dtype
else:
dtype = input.dtype
if input.dtype in {
torch.uint8,
torch.bool,
torch.int8,
torch.int16,
torch.int32,
}:
dtype = torch.int64
dim_ = _canonical_dim(dim, input.ndim)
mask_input = _combine_input_and_mask(sum, input, mask)
if mask_input.layout == torch.strided:
return torch.sum(mask_input, dim_, bool(keepdim), dtype=dtype)
elif mask_input.layout == torch.sparse_coo:
return _sparse_coo_scatter_reduction_helper(
torch.sum, mask_input, dim_, bool(keepdim), dtype
)
elif mask_input.layout == torch.sparse_csr:
return torch._sparse_csr_sum(
mask_input, dim=list(dim_), keepdim=bool(keepdim), dtype=dtype
)
else:
raise ValueError(
f"masked sum expects strided, sparse_coo or sparse_csr tensor (got {mask_input.layout} tensor)"
)
@_apply_docstring_templates
def prod(
input: Union[Tensor, MaskedTensor],
dim: DimOrDims = None,
*,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
# __doc__ is generated by _apply_docstring_templates decorator
if dtype is None:
# promote integer types to int64 when output dtype is not specified
if input.layout == torch.sparse_csr:
if input.dtype in {
torch.uint8,
torch.bool,
torch.int8,
torch.int16,
torch.int32,
}:
# csr.to(dtype=torch.int64) is not implemented, so
# using coo.to on input to ensure the promoted dtype
input = input.to_sparse_coo().to(dtype=torch.int64).to_sparse_csr()
else:
dtype = input.dtype
else:
dtype = input.dtype
if input.dtype in {
torch.uint8,
torch.bool,
torch.int8,
torch.int16,
torch.int32,
}:
dtype = torch.int64
dim_ = _canonical_dim(dim, input.ndim)
mask_input = _combine_input_and_mask(prod, input, mask)
if mask_input.layout == torch.strided:
# Workaround https://github.com/pytorch/pytorch/issues/56586
result = mask_input
result = result.to(dtype=dtype)
for d in reversed(dim_):
result = result.prod(dim=d, keepdim=bool(keepdim))
return result
elif mask_input.layout == torch.sparse_coo:
if mask is None:
# See comment in the sparse_csr branch, the same issue arises for sparse_coo tensors
raise ValueError(
"masked prod expects explicit mask for sparse_coo tensor input"
)
return _sparse_coo_scatter_reduction_helper(
torch.prod, mask_input, dim_, bool(keepdim), dtype
)
elif mask_input.layout == torch.sparse_csr:
if mask is None:
# mask is None corresponds to all-True mask. The
# unspecified elements in the CSR tensor correspond to
# zero values. Hence, the prod reduction result is
# automatically zero unless all elements are specified.
# A semi-optimal way to take this into account is to use:
#
# masked_prod(csr, ..., mask=None) == torch._sparse_csr_prod(csr, ...) * all(csr.nonzero(), ...)
#
# but that requires implementing `all` and `nonzero`
# support for sparse csr tensors.
raise ValueError(
"masked prod expects explicit mask for sparse_csr tensor input"
)
return torch._sparse_csr_prod(
mask_input, dim=list(dim_), keepdim=bool(keepdim), dtype=dtype
)
else:
raise ValueError(
f"masked prod expects strided, sparse_coo or sparse_csr tensor (got {mask_input.layout} tensor)"
)
@_apply_docstring_templates
def cumsum(
input: Tensor,
dim: int,
*,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
if dtype is None:
dtype = input.dtype
dim_ = _canonical_dim(dim, input.ndim)[0]
mask_input = _combine_input_and_mask(sum, input, mask)
if mask_input.layout == torch.strided:
return torch.cumsum(mask_input, dim_, dtype=dtype).to(dtype=dtype)
else:
raise ValueError(
f"masked cumsum expects strided tensor (got {mask_input.layout} tensor)"
)
@_apply_docstring_templates
def cumprod(
input: Tensor,
dim: int,
*,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
if dtype is None:
dtype = input.dtype
dim_ = _canonical_dim(dim, input.ndim)[0]
mask_input = _combine_input_and_mask(prod, input, mask)
if mask_input.layout == torch.strided:
return torch.cumprod(mask_input, dim_, dtype=dtype).to(dtype=dtype)
else:
raise ValueError(
f"masked cumprod expects strided tensor (got {mask_input.layout} tensor)"
)
@_apply_docstring_templates
def amax(
input: Union[Tensor, MaskedTensor],
dim: DimOrDims = None,
*,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
"""\
{reduction_signature}
{reduction_descr}
{reduction_identity_dtype}
{reduction_args}
{reduction_example}"""
if dtype is None:
dtype = input.dtype
mask_input = _combine_input_and_mask(amax, input, mask)
dim_ = _canonical_dim(dim, mask_input.ndim)
if mask_input.layout == torch.strided:
return torch.amax(mask_input, dim_, bool(keepdim)).to(dtype=dtype)
elif mask_input.layout == torch.sparse_coo:
if mask is None:
# See comment in the sparse_csr branch of prod, a similar issue arises here
# where unspecified elements along a dimension may need to be reduced with the result
raise ValueError(
"masked amax expects explicit mask for sparse_coo tensor input"
)
return _sparse_coo_scatter_reduction_helper(
torch.amax, mask_input, dim_, bool(keepdim), dtype
)
elif mask_input.layout == torch.sparse_csr:
if mask is None:
raise ValueError(
"masked amax expects explicit mask for sparse_csr tensor input"
)
return _sparse_csr_segment_reduction_helper(
torch.amax, mask_input, dim_, bool(keepdim), dtype
)
else:
raise ValueError(
f"masked amax expects strided, sparse_coo or sparse_csr tensor (got {mask_input.layout} tensor)"
)
@_apply_docstring_templates
def amin(
input: Union[Tensor, MaskedTensor],
dim: DimOrDims = None,
*,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
"""\
{reduction_signature}
{reduction_descr}
{reduction_identity_dtype}
{reduction_args}
{reduction_example}"""
if dtype is None:
dtype = input.dtype
mask_input = _combine_input_and_mask(amin, input, mask)
dim_ = _canonical_dim(dim, mask_input.ndim)
if mask_input.layout == torch.strided:
return torch.amin(mask_input, dim_, bool(keepdim)).to(dtype=dtype)
elif mask_input.layout == torch.sparse_coo:
if mask is None:
# See comment in the sparse_csr branch of prod, a similar issue arises here
# where unspecified elements along a dimension may need to be reduced with the result
raise ValueError(
"masked amax expects explicit mask for sparse_coo tensor input"
)
return _sparse_coo_scatter_reduction_helper(
torch.amin, mask_input, dim_, bool(keepdim), dtype
)
elif mask_input.layout == torch.sparse_csr:
if mask is None:
raise ValueError(
"masked amin expects explicit mask for sparse_csr tensor input"
)
return _sparse_csr_segment_reduction_helper(
torch.amin, mask_input, dim_, bool(keepdim), dtype
)
else:
raise ValueError(
f"masked amin expects strided, sparse_coo or sparse_csr tensor (got {mask_input.layout} tensor)"
)
@_apply_docstring_templates
def argmax(
input: Union[Tensor, MaskedTensor],
dim: Optional[int] = None,
*,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
"""\
{reduction_signature}
{reduction_descr}
{reduction_identity_dtype}
{reduction_args}
{reduction_example}"""
if dtype is None:
dtype = input.dtype
mask_input = _combine_input_and_mask(argmax, input, mask)
if mask_input.layout == torch.strided:
return torch.argmax(mask_input, dim, bool(keepdim)).to(dtype=dtype)
else:
raise ValueError(
f"masked argmax expects strided tensor (got {mask_input.layout} tensor)"
)
@_apply_docstring_templates
def argmin(
input: Union[Tensor, MaskedTensor],
dim: Optional[int] = None,
*,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
"""\
{reduction_signature}
{reduction_descr}
{reduction_identity_dtype}
{reduction_args}
{reduction_example}"""
if dtype is None:
dtype = input.dtype
mask_input = _combine_input_and_mask(argmin, input, mask)
if mask_input.layout == torch.strided:
return torch.argmin(mask_input, dim, bool(keepdim)).to(dtype=dtype)
else:
raise ValueError(
f"masked argmin expects strided tensor (got {mask_input.layout} tensor)"
)
@_apply_docstring_templates
def mean(
input: Union[Tensor, MaskedTensor],
dim: DimOrDims = None,
*,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
"""\
{reduction_signature}
{reduction_descr}
By definition, the identity value of a mean operation is the mean
value of the tensor. If all elements of the input tensor along given
dimension(s) :attr:`dim` are masked-out, the identity value of the
mean is undefined. Due to this ambiguity, the elements of output
tensor with strided layout, that correspond to fully masked-out
elements, have ``nan`` values.
{reduction_args}
{reduction_example}"""
dtype_source = "Optional"
if dtype is None:
dtype = input.dtype
dtype_source = "Input"
if not (dtype.is_floating_point or dtype.is_complex):
raise ValueError(
f"mean(): Could not infer output dtype. {dtype_source} dtype must be either "
f"a floating point or complex dtype. Got: {dtype}"
)
if input.layout == torch.strided:
if mask is None:
# TODO: compute count analytically
count = sum(
torch.ones(input.shape, dtype=torch.int64, device=input.device),
dim,
keepdim=keepdim,
)
total = sum(input, dim, keepdim=keepdim, dtype=dtype)
else:
inmask = _input_mask(input, mask=mask)
count = inmask.sum(dim=dim, keepdim=bool(keepdim))
total = sum(input, dim, keepdim=keepdim, dtype=dtype, mask=inmask)
return total / count
elif input.layout == torch.sparse_csr:
mask_input = _combine_input_and_mask(mean, input, mask)
dim_ = _canonical_dim(dim, mask_input.ndim)
if mask is None:
raise ValueError(
"masked mean expects explicit mask for sparse_csr tensor input"
)
return _sparse_csr_segment_reduction_helper(
torch.mean, mask_input, dim_, bool(keepdim), dtype
)
else:
raise ValueError(
f"masked mean expects strided or sparse_csr tensor (got {input.layout} tensor)"
)
@_apply_docstring_templates
def median(
input: Union[Tensor, MaskedTensor],
dim: int = -1,
*,
keepdim: bool = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
"""\
{reduction_signature}
{reduction_descr}
By definition, the identity value of a median operation is the median
value of the tensor. If all elements of the input tensor along given
dimension(s) :attr:`dim` are masked-out, the identity value of the
median is undefined. Due to this ambiguity, the elements of output
tensor with strided layout, that correspond to fully masked-out
elements, have ``nan`` values.
{reduction_args}
{reduction_example}"""
if dtype is None:
dtype = input.dtype
dim_ = _canonical_dim(dim, input.ndim)[0]
is_float = torch.is_floating_point(input)
if not is_float:
input = input.to(dtype=torch.float)
mask_input = _combine_input_and_mask(median, input, mask)
if mask_input.layout == torch.strided:
output = torch.nanmedian(mask_input, dim_, keepdim).values
if is_float:
return output
elif not is_float and not torch.isnan(output).any():
return output.to(dtype=dtype)
else:
raise ValueError(
"masked median expects no fully masked out rows if dtype is not floating point"
)
else:
raise ValueError(
f"masked median expects strided tensor (got {mask_input.layout} tensor)"
)
@_apply_docstring_templates
def logsumexp(
input: Tensor,
dim: DimOrDims = None,
*,
keepdim: bool = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
if dtype is None:
dtype = input.dtype
dim_ = _canonical_dim(dim, input.ndim)
mask_input = _combine_input_and_mask(logsumexp, input, mask)
if mask_input.layout == torch.strided:
return torch.logsumexp(mask_input, dim_, keepdim=keepdim).to(dtype=dtype)
else:
raise ValueError(
f"masked logsumexp expects strided tensor (got {mask_input.layout} tensor)"
)
# Cannot use _apply_docstring_templates as it is only set up for reductions and normalizations
def logaddexp(
input: Union[Tensor, MaskedTensor],
other: Union[Tensor, MaskedTensor],
*,
dtype: Optional[DType] = None,
input_mask: Optional[Tensor] = None,
other_mask: Optional[Tensor] = None,
) -> Tensor:
"""logaddexp(input, other, *, dtype=None, input_mask=None, other_mask=None) -> Tensor
Returns logaddexp of all the elements in the :attr:`input` and the :attr:`other`
tensor. The :attr:`input` elements are masked out according to the boolean tensor
:attr:`input_mask` and the attr:`other` elements are masked out according to the boolean tensor
:attr:`other_mask`.
The shapes of a mask tensor and the tensor to be masked
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the mask
tensor must not be greater than of the tensor to be masked.
Args:
input (Tensor): the input tensor
other (Tensor): the second input tensor
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the output tensor is
casted to :attr:`dtype` after the operation is
performed. Default: None.
input_mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of :attr:`input` tensor elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
other_mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of :attr:`other` tensor elements.
Default: None that is equivalent to ``torch.ones(other.shape, dtype=torch.bool)``.
Example::
>>> input = torch.tensor([-100.0, -200, -300])
>>> input
tensor([-100., -200., -300.])
>>> other = torch.tensor([-1.0, -2, -3])
>>> other
tensor([-1., -2., -3.])
>>> mask = torch.tensor([True, False, True])
>>> mask
tensor([ True, False, True])
>>> torch.masked._ops.logaddexp(input, other, input_mask=mask, other_mask=mask)
tensor([-1., -inf, -3.])"""
if dtype is None:
dtype = input.dtype
if input.layout == torch.strided and other.layout == torch.strided:
mask_input = _combine_input_and_mask(logaddexp, input, input_mask)
mask_other = _combine_input_and_mask(logaddexp, other, other_mask)
return torch.logaddexp(mask_input, mask_other).to(dtype=dtype)
else:
raise ValueError(
f"masked logaddexp expects strided tensors (got {input.layout} tensor for input, {other.layout} for other)"
)
@_apply_docstring_templates
def norm(
input: Union[Tensor, MaskedTensor],
ord: Optional[float] = 2.0,
dim: DimOrDims = None,
*,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
"""\
{reduction_signature}
{reduction_descr}
The identity value of norm operation, which is used to start the
reduction, is ``{identity_float32}``, except for ``ord=-inf`` it is
``{identity_ord_ninf}``.
{reduction_args}
{reduction_example}"""
if dtype is None:
dtype = input.dtype
mask_input = _combine_input_and_mask(norm, input, mask, ord)
if mask_input.layout == torch.strided:
dim_ = _canonical_dim(dim, input.ndim)
return torch.linalg.vector_norm(
mask_input, ord, dim_, bool(keepdim), dtype=dtype
)
else:
raise ValueError(
f"masked norm expects strided tensor (got {mask_input.layout} tensor)"
)
def _std_var(
input: Union[Tensor, MaskedTensor],
dim: DimOrDims,
unbiased: Optional[bool],
*,
correction_opt: Optional[Union[int, float]],
keepdim: Optional[bool],
dtype: Optional[DType],
mask: Optional[Tensor],
take_sqrt: Optional[bool],
) -> Tensor:
assert (
unbiased is None or correction_opt is None
), "Only one of unbiased and correction may be given"
correction = 1.0
if unbiased is not None:
correction = 1.0 if unbiased else 0.0
if correction_opt is not None:
correction = sym_float(correction_opt)
if dtype is None:
dtype = input.dtype
if not (dtype.is_floating_point or dtype.is_complex):
dtype = torch.float32
compute_dtype = dtype
if not (compute_dtype.is_floating_point or compute_dtype.is_complex):
compute_dtype = torch.float32
if input.layout == torch.strided:
if mask is None:
# TODO: compute count analytically
count = sum(
torch.ones(input.shape, dtype=torch.int64, device=input.device),
dim,
keepdim=True,
)
sample_total = sum(input, dim, keepdim=True, dtype=dtype)
else:
inmask = _input_mask(input, mask=mask)
count = inmask.sum(dim=dim, keepdim=True)
sample_total = sum(input, dim, keepdim=True, dtype=dtype, mask=inmask)
# TODO: replace torch.subtract/divide/square/maximum with
# masked subtract/divide/square/maximum when these will be
# available.
sample_mean = torch.divide(sample_total, count)
x = torch.subtract(input, sample_mean)
if mask is None:
total = sum(x * x.conj(), dim, keepdim=keepdim, dtype=compute_dtype)
else:
total = sum(
x * x.conj(), dim, keepdim=keepdim, dtype=compute_dtype, mask=inmask # type: ignore[possibly-undefined]
)
if not keepdim:
count = count.reshape(total.shape)
if correction != 0:
real_dtype = (
corresponding_real_dtype(compute_dtype)
if compute_dtype.is_complex
else compute_dtype
)
count = count.to(real_dtype)
count = torch.subtract(count, correction)
count = torch.maximum(count, count.new_zeros([]))
output = torch.divide(total, count).to(dtype=dtype)
if take_sqrt:
output = torch.sqrt(output)
return output
else:
raise ValueError(
f"masked std/var expects strided tensor (got {input.layout} tensor)"
)
@_apply_docstring_templates
def var(
input: Union[Tensor, MaskedTensor],
dim: DimOrDims = None,
unbiased: Optional[bool] = None,
*,
correction: Optional[Union[int, float]] = None,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
"""\
{reduction_signature}
{reduction_descr}
The identity value of sample variance operation is undefined. The
elements of output tensor with strided layout, that correspond to
fully masked-out elements, have ``nan`` values.
{reduction_args}
{reduction_example}"""
return _std_var(
input=input,
dim=dim,
unbiased=unbiased,
correction_opt=correction,
keepdim=keepdim,
dtype=dtype,
mask=mask,
take_sqrt=False,
)
@_apply_docstring_templates
def std(
input: Union[Tensor, MaskedTensor],
dim: DimOrDims = None,
unbiased: Optional[bool] = None,
*,
correction: Optional[int] = None,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
"""\
{reduction_signature}
{reduction_descr}
The identity value of sample standard deviation operation is undefined. The
elements of output tensor with strided layout, that correspond to
fully masked-out elements, have ``nan`` values.
{reduction_args}
{reduction_example}"""
return _std_var(
input=input,
dim=dim,
unbiased=unbiased,
correction_opt=correction,
keepdim=keepdim,
dtype=dtype,
mask=mask,
take_sqrt=True,
)
@_apply_docstring_templates
def softmax(
input: Union[Tensor, MaskedTensor],
dim: int,
*,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
if dtype is None:
dtype = input.dtype
dim_ = _canonical_dim(dim, input.ndim)[0]
mask_input = _combine_input_and_mask(amax, input, mask)
if mask_input.layout == torch.strided:
return torch.nn.functional.softmax(mask_input, dim_, dtype=dtype)
else:
raise ValueError(
f"masked softmax expects strided tensor (got {mask_input.layout} tensor)"
)
@_apply_docstring_templates
def log_softmax(
input: Union[Tensor, MaskedTensor],
dim: int,
*,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
if dtype is None:
dtype = input.dtype
dim_ = _canonical_dim(dim, input.ndim)[0]
mask_input = _combine_input_and_mask(amax, input, mask)
if mask_input.layout == torch.strided:
return torch.nn.functional.log_softmax(mask_input, dim_, dtype=dtype)
else:
raise ValueError(
f"masked log_softmax expects strided tensor (got {mask_input.layout} tensor)"
)
@_apply_docstring_templates
def softmin(
input: Union[Tensor, MaskedTensor],
dim: int,
*,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
if dtype is None:
dtype = input.dtype
dim_ = _canonical_dim(dim, input.ndim)[0]
mask_input = _combine_input_and_mask(amin, input, mask)
if mask_input.layout == torch.strided:
return torch.nn.functional.softmin(mask_input, dim_, dtype=dtype)
else:
raise ValueError(
f"masked softmin expects strided tensor (got {mask_input.layout} tensor)"
)
@_apply_docstring_templates
def normalize(
input: Union[Tensor, MaskedTensor],
ord: float,
dim: int,
*,
eps: float = 1e-12,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
if dtype is None:
dtype = input.dtype
# TODO: eliminate mask_input as unnecessary when using masked divide.
mask_input = _combine_input_and_mask(sum, input, mask)
if mask_input.layout == torch.strided:
nrm_ = norm(input, ord, dim, keepdim=True, dtype=dtype, mask=mask)
# TODO: replace torch.maximum with masked maximum when available.
denom = torch.maximum(nrm_, nrm_.new_full([], eps))
# TODO: replace torch.divide with masked divide when available.
return torch.divide(mask_input, denom)
else:
raise ValueError(
f"masked normalize expects strided tensor (got {mask_input.layout} tensor)"
)
```
|
=============================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.36 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\masked\maskedtensor\__init__.py
ENCODING: utf-8
```py
# Copyright (c) Meta Platforms, Inc. and affiliates
# flake8: noqa
from .binary import _apply_native_binary, _is_native_binary
from .core import is_masked_tensor, MaskedTensor
from .passthrough import _apply_pass_through_fn, _is_pass_through_fn
from .reductions import _apply_reduction, _is_reduction
from .unary import _apply_native_unary, _is_native_unary
```
|
==============================================================================================================================
SOURCE CODE FILE: _ops_refs.py
LINES: 1
SIZE: 17.66 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\masked\maskedtensor\_ops_refs.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
# Copyright (c) Meta Platforms, Inc. and affiliates
from functools import partial
from typing import Any, Callable, TYPE_CHECKING
import torch
from .binary import _apply_native_binary, NATIVE_BINARY_FNS, NATIVE_INPLACE_BINARY_FNS
from .core import (
_get_data,
_masks_match,
_maybe_get_mask,
is_masked_tensor,
MaskedTensor,
)
from .passthrough import _apply_pass_through_fn, PASSTHROUGH_FNS
from .reductions import (
_apply_reduction,
NATIVE_REDUCE_FNS,
TENSOR_REDUCE_FNS,
TORCH_REDUCE_FNS,
)
from .unary import _apply_native_unary, NATIVE_INPLACE_UNARY_FNS, NATIVE_UNARY_FNS
if TYPE_CHECKING:
from torch._ops import OpOverload
__all__ = [] # type: ignore[var-annotated]
def _check_args_kwargs_length(
args, kwargs, error_prefix, len_args=None, len_kwargs=None
):
if len_args is not None and len_args != len(args):
raise ValueError(
f"{error_prefix}: len(args) must be {len_args} but got {len(args)}"
)
if len_kwargs is not None and len_kwargs != len(kwargs):
raise ValueError(
f"{error_prefix}: len(kwargs) must be {len_kwargs} but got {len(kwargs)}"
)
class _MaskedContiguous(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
if not is_masked_tensor(input):
raise ValueError("MaskedContiguous forward: input must be a MaskedTensor.")
if input.is_contiguous():
return input
data = input.get_data()
mask = input.get_mask()
return MaskedTensor(data.contiguous(), mask.contiguous())
@staticmethod
def backward(ctx, grad_output):
return grad_output
class _MaskedToDense(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
if not is_masked_tensor(input):
raise ValueError("MaskedToDense forward: input must be a MaskedTensor.")
if input.layout == torch.strided:
return input
ctx.layout = input.layout
data = input.get_data()
mask = input.get_mask()
return MaskedTensor(data.to_dense(), mask.to_dense())
@staticmethod
def backward(ctx, grad_output):
layout = ctx.layout
if layout == torch.sparse_coo:
return grad_output.to_sparse_coo()
elif layout == torch.sparse_csr:
return grad_output.to_sparse_csr()
elif layout == torch.strided:
return grad_output.to_dense()
raise ValueError("to_dense: Unsupported input layout: ", layout)
class _MaskedToSparse(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
if not is_masked_tensor(input):
raise ValueError("MaskedToSparse forward: input must be a MaskedTensor.")
# Following the convention from sparse tensors that to_sparse always means that we convert to sparse_coo
if input.layout == torch.sparse_coo:
return input
data = input.get_data()
mask = input.get_mask()
sparse_mask = mask.to_sparse_coo().coalesce()
sparse_data = data.sparse_mask(sparse_mask)
return MaskedTensor(sparse_data, sparse_mask)
@staticmethod
def backward(ctx, grad_output):
return grad_output.to_dense()
class _MaskedToSparseCsr(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
if not is_masked_tensor(input):
raise ValueError("MaskedToSparseCsr forward: input must be a MaskedTensor.")
if input._masked_data.ndim != 2:
raise ValueError(
f"Only 2D tensors can be converted to the SparseCsr layout but got shape: {input._masked_data.size()}"
)
if input.layout == torch.sparse_csr:
return input
data = input.get_data()
mask = input.get_mask()
sparse_mask = mask.to_sparse_csr()
sparse_data = data.sparse_mask(sparse_mask)
return MaskedTensor(sparse_data, sparse_mask)
@staticmethod
def backward(ctx, grad_output):
return grad_output.to_dense()
class _MaskedWhere(torch.autograd.Function):
@staticmethod
def forward(ctx, cond, self, other):
ctx.mark_non_differentiable(cond)
ctx.save_for_backward(cond)
return torch.ops.aten.where(cond, self, other)
@staticmethod
def backward(ctx, grad_output):
(cond,) = ctx.saved_tensors
def masked_out_like(mt):
return MaskedTensor(mt.get_data(), torch.zeros_like(mt.get_mask()).bool())
return (
None,
torch.ops.aten.where(cond, grad_output, masked_out_like(grad_output)),
torch.ops.aten.where(cond, masked_out_like(grad_output), grad_output),
)
_MASKEDTENSOR_FUNCTION_TABLE = {}
_function_fn_apply_map = {
(
tuple(NATIVE_REDUCE_FNS),
tuple(TORCH_REDUCE_FNS),
tuple(TENSOR_REDUCE_FNS),
): _apply_reduction,
}
for fn_map_list, apply_fn in _function_fn_apply_map.items():
for fn_map in fn_map_list:
for fn in fn_map:
_MASKEDTENSOR_FUNCTION_TABLE[fn] = partial(apply_fn, fn)
def register_function_func(ops):
"""
Used for registering a new __torch_function__ function to MaskedTensor
Called via _MASKEDTENSOR_FUNCTION_TABLE[func](*args, **kwargs)
The code to register a new function looks like:
@register_function_func(list_of_ops)
def foo(func, *args, **kwargs):
<implementation>
"""
def wrapper(func):
for op in ops:
_MASKEDTENSOR_FUNCTION_TABLE[op] = partial(func, op)
return wrapper
@register_function_func(NATIVE_REDUCE_FNS + TORCH_REDUCE_FNS + TENSOR_REDUCE_FNS)
def _general_function_reductions(func, *args, **kwargs):
return _apply_reduction(func, *args, **kwargs)
@register_function_func([torch.Tensor.where, torch.where])
def _function_where(func, *args, **kwargs):
_check_args_kwargs_length(
args, kwargs, "__torch_function__, torch.where", len_args=3, len_kwargs=0
)
return _MaskedWhere.apply(*args)
@register_function_func([torch.Tensor.contiguous])
def _function_contiguous(func, *args, **kwargs):
return _MaskedContiguous.apply(args[0])
@register_function_func([torch.Tensor.to_dense])
def _function_to_dense(func, *args, **kwargs):
return _MaskedToDense.apply(args[0])
@register_function_func([torch.Tensor.to_sparse])
def _function_to_sparse(func, *args, **kwargs):
return _MaskedToSparse.apply(args[0])
@register_function_func([torch.Tensor.to_sparse_csr])
def _function_to_sparse_csr(func, *args, **kwargs):
return _MaskedToSparseCsr.apply(args[0])
_MASKEDTENSOR_DISPATCH_TABLE: dict["OpOverload", Callable[..., Any]] = {}
def register_dispatch_func(aten_ops):
"""
Used for registering a new __torch_dispatch__ function to MaskedTensor
Called via _MASKEDTENSOR_DISPATCH_TABLE[func](*args, **kwargs)
The code to register a new function looks like:
@register_dispatch_func(list_of_ops)
def foo(func, *args, **kwargs):
<implementation>
"""
def wrapper(func):
for aten_op in aten_ops:
_MASKEDTENSOR_DISPATCH_TABLE[aten_op] = partial(func, aten_op)
return wrapper
@register_dispatch_func(NATIVE_REDUCE_FNS + TORCH_REDUCE_FNS + TENSOR_REDUCE_FNS)
def _general_reduction(func, *args, **kwargs):
return _apply_reduction(func, *args, **kwargs)
@register_dispatch_func(PASSTHROUGH_FNS)
def _general_passthrough(func, *args, **kwargs):
return _apply_pass_through_fn(func, *args, **kwargs)
@register_dispatch_func(NATIVE_UNARY_FNS + NATIVE_INPLACE_UNARY_FNS)
def _general_unary(func, *args, **kwargs):
return _apply_native_unary(func, *args, **kwargs)
@register_dispatch_func(NATIVE_BINARY_FNS + NATIVE_INPLACE_BINARY_FNS)
def _general_binary(func, *args, **kwargs):
return _apply_native_binary(func, *args, **kwargs)
@register_dispatch_func([torch.ops.aten.stride])
def stride(func, *args, **kwargs):
return None
@register_dispatch_func([torch.ops.aten.sym_stride])
def sym_stride(func, *args, **kwargs):
return None
@register_dispatch_func([torch.ops.prim.layout])
def layout(func, *args, **kwargs):
return _get_data(args[0]).layout
@register_dispatch_func([torch.ops.aten.is_contiguous])
def is_contiguous(func, *args, **kwargs):
data = _get_data(args[0])
if data.is_sparse:
raise ValueError("MaskedTensors with sparse data do not have is_contiguous")
return func(data, *args[1:], **kwargs)
@register_dispatch_func([torch.ops.aten.is_strides_like_format])
def is_strides_like_format(func, *args, **kwargs):
data = _get_data(args[0])
if data.is_sparse:
raise ValueError(
"MaskedTensors with sparse data do not have is_strides_like_format"
)
return func(data, *args[1:], **kwargs)
@register_dispatch_func([torch.ops.aten.is_non_overlapping_and_dense])
def is_non_overlapping_and_dense(func, *args, **kwargs):
data = _get_data(args[0])
if data.is_sparse:
raise ValueError(
"MaskedTensors with sparse data do not have is_non_overlapping_and_dense"
)
return func(data, *args[1:], **kwargs)
@register_dispatch_func([torch.ops.aten.contiguous])
def contiguous(func, *args, **kwargs):
if _get_data(args[0]).is_sparse:
raise ValueError("MaskedTensors with sparse data do not have contiguous")
return _MaskedContiguous.apply(args[0])
@register_dispatch_func([torch.ops.aten.new_empty_strided])
def new_empty_strided(func, *args, **kwargs):
_check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=3)
data = _get_data(args[0])
mask = _maybe_get_mask(args[0])
if tuple(args[1]) != tuple(data.size()):
raise ValueError(
f"__torch_dispatch__, {func}: args[1] expected to be the same as data.size()"
)
if tuple(args[2]) != tuple(data.stride()):
raise ValueError(
f"__torch_dispatch__, {func}: args[2] expected to be the same as data.stride()"
)
return MaskedTensor(func(data, args[1], args[2], **kwargs), mask)
@register_dispatch_func([torch.ops.aten._local_scalar_dense])
def _local_scalar_dense(func, *args, **kwargs):
if not _maybe_get_mask(args[0]):
raise ValueError(f"__torch_dispatch__, {func}: expected a mask tensor")
return torch.ops.aten._local_scalar_dense(_get_data(args[0]))
@register_dispatch_func([torch.ops.aten.detach, torch.ops.aten.clone])
def _apply_fn_on_data(func, *args, **kwargs):
return MaskedTensor(func(_get_data(args[0])), _maybe_get_mask(args[0]))
@register_dispatch_func([torch.ops.aten._to_copy])
def _to_copy(func, *args, **kwargs):
new_data = func(_get_data(args[0]), *args[1:], **kwargs)
return MaskedTensor(new_data, _maybe_get_mask(args[0]))
@register_dispatch_func([torch.ops.aten._softmax])
def _softmax(func, *args, **kwargs):
_check_args_kwargs_length(
args, kwargs, f"__torch_dispatch__, {func}", len_args=3, len_kwargs=0
)
data = _get_data(args[0])
mask = _maybe_get_mask(args[0])
result_data = torch.ops.aten._masked_softmax(data, ~mask, args[1], 2)
return MaskedTensor(result_data, mask)
@register_dispatch_func([torch.ops.aten.ones_like])
def ones_like(func, *args, **kwargs):
_check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=1)
result_data = func(_get_data(args[0]), **kwargs)
return MaskedTensor(result_data, _maybe_get_mask(args[0]))
@register_dispatch_func([torch.ops.aten._softmax_backward_data])
def _softmax_backward_data(func, *args, **kwargs):
_check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=4)
grad, output, dim, _input_dtype = args
if is_masked_tensor(grad) and is_masked_tensor(output):
if not _masks_match(grad, output):
raise ValueError(
"__torch_dispatch__, {func}: expected the masks of grad and output to match"
)
grad_data = _get_data(grad)
new_grad_data = torch.ops.aten._masked_softmax_backward(
grad_data,
_get_data(output),
~_maybe_get_mask(grad),
dim % grad_data.ndim,
)
res = MaskedTensor(new_grad_data, _maybe_get_mask(grad))
return res
else:
raise ValueError(
f"__torch_dispatch__, {func}: grad and output must both be MaskedTensors"
)
@register_dispatch_func([torch.ops.aten.copy_])
def copy_(func, *args, **kwargs):
_check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=2)
if not _masks_match(_maybe_get_mask(args[0]), _maybe_get_mask(args[1])):
raise ValueError("args[0] mask and args[1] mask must match but do not")
func(_get_data(args[0]), _get_data(args[1]))
return args[0]
@register_dispatch_func([torch.ops.aten.where])
def where(func, *args, **kwargs):
_check_args_kwargs_length(
args, kwargs, f"__torch_dispatch__, {func}", len_args=3, len_kwargs=0
)
if not torch.is_tensor(args[0]):
raise ValueError("__torch_dispatch__, {func}: expected args[0] to be a tensor")
mx = args[1]
my = args[2]
if not is_masked_tensor(mx):
mx = MaskedTensor(mx, torch.ones_like(mx, dtype=torch.bool))
if not is_masked_tensor(my):
my = MaskedTensor(my, torch.ones_like(my, dtype=torch.bool))
new_data = func(args[0], mx.get_data(), my.get_data())
new_mask = func(args[0], mx.get_mask(), my.get_mask())
return MaskedTensor(new_data, new_mask)
@register_dispatch_func([torch.ops.aten._to_sparse])
def _to_sparse(func, *args, **kwargs):
_check_args_kwargs_length(
args, kwargs, f"__torch_dispatch__, {func}", len_args=1, len_kwargs=0
)
if not torch.is_tensor(args[0]):
raise TypeError("__torch_dispatch__, {func}: expected args[0] to be a tensor")
mt = args[0]
if not is_masked_tensor(mt):
mt = MaskedTensor(mt, torch.ones_like(mt, dtype=torch.bool))
if mt.is_sparse_coo():
return mt
new_mask = func(_maybe_get_mask(args[0])).coalesce()
new_data = _get_data(args[0]).sparse_mask(new_mask)
return MaskedTensor(new_data, new_mask)
@register_dispatch_func([torch.ops.aten._to_sparse_csr])
def _to_sparse_csr(func, *args, **kwargs):
_check_args_kwargs_length(
args, kwargs, f"__torch_dispatch__, {func}", len_args=1, len_kwargs=0
)
if not torch.is_tensor(args[0]):
raise ValueError("__torch_dispatch__, {func}: expected args[0] to be a tensor")
mt = args[0]
if not is_masked_tensor(mt):
mt = MaskedTensor(mt, torch.ones_like(mt).bool())
if mt.is_sparse_csr():
return mt
new_mask = func(_maybe_get_mask(args[0]))
new_data = _get_data(args[0]).sparse_mask(new_mask)
return MaskedTensor(new_data, new_mask)
@register_dispatch_func([torch.ops.aten._to_dense])
def _to_dense(func, *args, **kwargs):
_check_args_kwargs_length(
args, kwargs, f"__torch_dispatch__, {func}", len_args=1, len_kwargs=0
)
if not torch.is_tensor(args[0]):
raise ValueError("__torch_dispatch__, {func}: expected args[0] to be a tensor")
mt = args[0]
if not is_masked_tensor(mt):
mt = MaskedTensor(mt, torch.ones_like(mt).bool())
new_data = func(_get_data(args[0]))
new_mask = func(_maybe_get_mask(args[0]))
return MaskedTensor(new_data, new_mask)
@register_dispatch_func([torch.ops.aten._indices])
def _indices(func, *args, **kwargs):
# Assumes data is sparse
_check_args_kwargs_length(
args, kwargs, f"__torch_dispatch__, {func}", len_args=1, len_kwargs=0
)
data = _get_data(args[0]).indices()
return MaskedTensor(data, torch.ones_like(data).bool())
@register_dispatch_func([torch.ops.aten._values])
def _values(func, *args, **kwargs):
_check_args_kwargs_length(
args, kwargs, f"__torch_dispatch__, {func}", len_args=1, len_kwargs=0
)
data = _get_data(args[0]).values()
return MaskedTensor(data, torch.ones_like(data).bool())
@register_dispatch_func([torch.ops.aten._sparse_coo_tensor_with_dims_and_tensors])
def _sparse_coo_tensor_with_dims_and_tensors(func, *args, **kwargs):
new_args = list(args)
if is_masked_tensor(args[-1]):
new_args[-1] = args[-1].get_data()
if is_masked_tensor(args[-2]):
new_args[-2] = args[-2].get_data()
new_data = func(*new_args, **kwargs)
new_args[-1] = torch.ones_like(new_args[-1])
new_mask = func(*new_args, **kwargs).bool()
return MaskedTensor(new_data, new_mask)
@register_dispatch_func([torch.ops.aten.is_same_size])
def is_same_size(func, *args, **kwargs):
_check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=2)
return _get_data(args[0]).is_same_size(_get_data(args[1]))
@register_dispatch_func([torch.ops.aten._is_any_true])
def _is_any_true(func, *args, **kwargs):
_check_args_kwargs_length(
args, kwargs, f"__torch_dispatch__, {func}", len_args=1, len_kwargs=0
)
data = _get_data(args[0])
mask = _maybe_get_mask(args[0])
if mask is None:
raise ValueError(
f"__torch_dispatch__, {func}: expected args[0] to be a MaskedTensor"
)
if data.dtype != torch.bool:
raise ValueError(f"__torch_dispatch__, {func}: expected a boolean tensor")
if data.is_sparse:
raise ValueError(f"MaskedTensors with sparse data do not have {func}")
return MaskedTensor(func(data & mask), torch.tensor(True))
```
|
===========================================================================================================================
SOURCE CODE FILE: binary.py
LINES: 1
SIZE: 5.56 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\masked\maskedtensor\binary.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
# Copyright (c) Meta Platforms, Inc. and affiliates
import torch
from .core import (
_map_mt_args_kwargs,
_masks_match,
_tensors_match,
_wrap_result,
is_masked_tensor,
)
__all__ = [] # type: ignore[var-annotated]
BINARY_NAMES = [
"add",
"atan2",
"arctan2",
"bitwise_and",
"bitwise_or",
"bitwise_xor",
"bitwise_left_shift",
"bitwise_right_shift",
"div",
"divide",
"floor_divide",
"fmod",
"logaddexp",
"logaddexp2",
"mul",
"multiply",
"nextafter",
"remainder",
"sub",
"subtract",
"true_divide",
"eq",
"ne",
"le",
"ge",
"greater",
"greater_equal",
"gt",
"less_equal",
"lt",
"less",
"maximum",
"minimum",
"fmax",
"fmin",
"not_equal",
]
INPLACE_BINARY_NAMES = [
n + "_"
for n in (
list(
set(BINARY_NAMES)
- {
"logaddexp",
"logaddexp2",
"equal",
"fmin",
"minimum",
"maximum",
"fmax",
}
)
)
]
def _get_at_least_one_mask(a, b):
if not is_masked_tensor(a) and not is_masked_tensor(b):
raise TypeError("At least one of `a` and `b` must be a MaskedTensor")
if not _masks_match(a, b):
raise ValueError("a and b must have matching masks")
if is_masked_tensor(a):
return a.get_mask()
return b.get_mask()
def _binary_helper(fn, args, kwargs, inplace):
if len(kwargs) != 0:
raise ValueError("len(kwargs) must equal 0")
for a in args[2:]:
if torch.is_tensor(a):
raise TypeError(
"MaskedTensor binary ops do not support Tensor arguments aside from the lhs and rhs"
)
if not _masks_match(*args[:2]):
raise ValueError(
"Input masks must match. If you need support for this, please open an issue on Github."
)
data_args, _data_kwargs = _map_mt_args_kwargs(args, kwargs, lambda x: x.get_data())
mask_args, _mask_kwargs = _map_mt_args_kwargs(args, kwargs, lambda x: x.get_mask())
args0_layout = data_args[0].layout
same_layout = (
torch.is_tensor(data_args[1]) or is_masked_tensor(data_args[1])
) and (args0_layout == data_args[1].layout)
if args0_layout == torch.sparse_coo:
if same_layout:
if not _tensors_match(data_args[0].indices(), data_args[1].indices()):
raise ValueError(
"sparse_coo indices must match. If you need support for this, please open an issue on Github."
)
if data_args[0].size() != data_args[1].size():
raise ValueError(
"input1 and input2 must have the same size for binary functions."
)
data_args[1] = data_args[1].values()
i = data_args[0].indices()
size = data_args[0].size()
data_args[0] = data_args[0].values()
v = fn(*data_args)
result_data = torch.sparse_coo_tensor(i, v, size)
elif args0_layout == torch.sparse_csr:
if same_layout:
if not (
_tensors_match(data_args[0].crow_indices(), data_args[1].crow_indices())
and _tensors_match(
data_args[0].col_indices(), data_args[1].col_indices()
)
):
raise ValueError(
"sparse_csr indices must match. If you need support for this, please open an issue on Github."
)
data_args[1] = data_args[1].values()
crow = data_args[0].crow_indices()
col = data_args[0].col_indices()
size = data_args[0].size()
data_args[0] = data_args[0].values()
v = fn(*data_args)
result_data = torch.sparse_csr_tensor(crow, col, v, size)
else:
result_data = fn(*data_args)
if inplace:
args[0]._set_data_mask(result_data, mask_args[0])
return args[0]
else:
result_mask = _get_at_least_one_mask(*args[:2])
# sparse tensors don't have strides so we can only expand if the layout is strided
if args0_layout == torch.strided:
result_mask = result_mask.expand_as(result_data)
return _wrap_result(result_data, result_mask)
def _torch_binary(fn_name):
fn = getattr(torch.ops.aten, fn_name)
def binary_fn(*args, **kwargs):
return _binary_helper(fn, args, kwargs, inplace=False)
return binary_fn
def _torch_inplace_binary(fn_name):
fn = getattr(torch.ops.aten, fn_name)
def binary_fn(*args, **kwargs):
return _binary_helper(fn, args, kwargs, inplace=True)
return binary_fn
NATIVE_BINARY_MAP = {
getattr(torch.ops.aten, name): _torch_binary(name) for name in BINARY_NAMES
}
NATIVE_INPLACE_BINARY_MAP = {
getattr(torch.ops.aten, name): _torch_inplace_binary(name)
for name in INPLACE_BINARY_NAMES
}
NATIVE_BINARY_FNS = list(NATIVE_BINARY_MAP.keys())
NATIVE_INPLACE_BINARY_FNS = list(NATIVE_INPLACE_BINARY_MAP.keys())
def _is_native_binary(fn):
return fn in NATIVE_BINARY_FNS or fn in NATIVE_INPLACE_BINARY_FNS
def _apply_native_binary(fn, *args, **kwargs):
if fn in NATIVE_BINARY_FNS:
return NATIVE_BINARY_MAP[fn](*args, **kwargs)
if fn in NATIVE_INPLACE_BINARY_FNS:
return NATIVE_INPLACE_BINARY_MAP[fn](*args, **kwargs)
return NotImplemented
```
|
=========================================================================================================================
SOURCE CODE FILE: core.py
LINES: 12
SIZE: 12.85 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\masked\maskedtensor\core.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
# Copyright (c) Meta Platforms, Inc. and affiliates
import warnings
from typing import Any
from typing_extensions import TypeIs
import torch
from torch.overrides import get_default_nowrap_functions
__all__ = [
"MaskedTensor",
"is_masked_tensor",
]
def is_masked_tensor(obj: Any, /) -> TypeIs["MaskedTensor"]:
r"""Returns True if the input is a MaskedTensor, else False
Args:
a: any input
Examples:
>>> # xdoctest: +SKIP
>>> from torch.masked import MaskedTensor
>>> data = torch.arange(6).reshape(2,3)
>>> mask = torch.tensor([[True, False, False], [True, True, False]])
>>> mt = MaskedTensor(data, mask)
>>> is_masked_tensor(mt)
True
"""
return isinstance(obj, MaskedTensor)
def _tensors_match(a, b, exact=True, rtol=1e-05, atol=1e-08):
if is_masked_tensor(a) or is_masked_tensor(b):
raise ValueError("Neither `a` nor `b` can be a MaskedTensor.")
if a.layout != b.layout:
raise ValueError(
f"`a` and `b` must have the same layout. Got {a.layout} and {b.layout}"
)
if a.dtype != b.dtype:
b = b.type(a.dtype)
if a.layout == b.layout == torch.sparse_coo:
return _tensors_match(a.values(), b.values(), exact) and _tensors_match(
a.indices(), b.indices(), exact
)
elif a.layout == b.layout == torch.sparse_csr:
return (
_tensors_match(a.crow_indices(), b.crow_indices(), exact)
and _tensors_match(a.col_indices(), b.col_indices(), exact)
and _tensors_match(a.values(), b.values(), exact)
)
if exact:
return (a.dim() == b.dim()) and torch.eq(a, b).all().item()
return (a.dim() == b.dim()) and torch.allclose(a, b, rtol=rtol, atol=atol)
def _masks_match(a, b):
if is_masked_tensor(a) and is_masked_tensor(b):
mask_a = a.get_mask()
mask_b = b.get_mask()
return _tensors_match(mask_a, mask_b, exact=True)
return True
def _map_mt_args_kwargs(args, kwargs, map_fn):
def _helper(a, map_fn):
if is_masked_tensor(a):
return map_fn(a)
elif torch.is_tensor(a):
return a
elif isinstance(a, list):
a_impl, _ = _map_mt_args_kwargs(a, {}, map_fn)
return a_impl
elif isinstance(a, tuple):
a_impl, _ = _map_mt_args_kwargs(a, {}, map_fn)
return tuple(a_impl)
else:
return a
if kwargs is None:
kwargs = {}
impl_args = []
for a in args:
impl_args.append(_helper(a, map_fn))
impl_kwargs = {}
for k in kwargs.keys():
impl_kwargs[k] = _helper(a, map_fn)
return impl_args, impl_kwargs
def _wrap_result(result_data, result_mask):
if isinstance(result_data, list):
return [_wrap_result(r, m) for (r, m) in zip(result_data, result_mask)]
if isinstance(result_data, tuple):
return tuple(_wrap_result(r, m) for (r, m) in zip(result_data, result_mask))
if torch.is_tensor(result_data):
return MaskedTensor(result_data, result_mask)
# Expect result_data and result_mask to be Tensors only
return NotImplemented
def _masked_tensor_str(data, mask, formatter):
if data.layout in {torch.sparse_coo, torch.sparse_csr}:
data = data.to_dense()
mask = mask.to_dense()
if data.dim() == 1:
formatted_elements = [
formatter.format(d.item()) if isinstance(d.item(), float) else str(d.item())
for d in data
]
max_len = max(8 if x[1] else len(x[0]) for x in zip(formatted_elements, ~mask))
return (
"["
+ ", ".join(
[
"--".rjust(max_len) if m else e
for (e, m) in zip(formatted_elements, ~mask)
]
)
+ "]"
)
sub_strings = [_masked_tensor_str(d, m, formatter) for (d, m) in zip(data, mask)]
sub_strings = ["\n".join([" " + si for si in s.split("\n")]) for s in sub_strings]
return "[\n" + ",\n".join(sub_strings) + "\n]"
def _get_data(a):
if is_masked_tensor(a):
return a._masked_data
return a
def _maybe_get_mask(a):
if is_masked_tensor(a):
return a.get_mask()
return None
class MaskedTensor(torch.Tensor):
@staticmethod
def __new__(cls, data, mask, requires_grad=False):
if is_masked_tensor(data) or not torch.is_tensor(data):
raise TypeError("data must be a Tensor")
if is_masked_tensor(mask) or not torch.is_tensor(mask):
raise TypeError("mask must be a Tensor")
# Use a Tensor that of the give size for the wrapper.
kwargs = {
"device": data.device,
"dtype": data.dtype,
"layout": data.layout,
"requires_grad": requires_grad,
"dispatch_sizes_strides_policy": "strides",
"dispatch_layout": True,
}
warnings.warn(
(
"The PyTorch API of MaskedTensors is in prototype stage "
"and will change in the near future. Please open a Github issue "
"for features requests and see our documentation on the torch.masked "
"module for further information about the project."
),
UserWarning,
stacklevel=2,
)
if data.requires_grad:
warnings.warn(
"It is not recommended to create a MaskedTensor with a tensor that requires_grad. "
"To avoid this, you can use data.detach().clone()",
UserWarning,
stacklevel=2,
)
return torch.Tensor._make_wrapper_subclass(cls, data.size(), **kwargs) # type: ignore[attr-defined]
def _preprocess_data(self, data, mask):
from .._ops import _sparse_coo_where, _sparse_csr_where
if data.layout != mask.layout:
raise TypeError("data and mask must have the same layout.")
if data.layout == torch.sparse_coo:
data = data.coalesce()
mask = mask.coalesce()
if data._nnz() != mask._nnz():
data = _sparse_coo_where(mask, data, torch.tensor(0))
elif data.layout == torch.sparse_csr:
if data._nnz() != mask._nnz():
data = _sparse_csr_where(mask, data, torch.tensor(0))
# Have to pick awkward names to not conflict with existing fields such as data
self._masked_data = data.clone()
self._masked_mask = mask.clone()
def _validate_members(self):
data = self._masked_data
mask = self.get_mask()
if type(data) != type(mask):
raise TypeError(
f"data and mask must have the same type. Got {type(data)} and {type(mask)}"
)
if data.layout not in {torch.strided, torch.sparse_coo, torch.sparse_csr}:
raise TypeError(f"data layout of {data.layout} is not supported.")
if data.layout == torch.sparse_coo:
if not _tensors_match(data.indices(), mask.indices(), exact=True):
raise ValueError(
"data and mask are both sparse COO tensors but do not have the same indices."
)
elif data.layout == torch.sparse_csr:
if not _tensors_match(
data.crow_indices(), mask.crow_indices(), exact=True
) or not _tensors_match(data.col_indices(), mask.col_indices(), exact=True):
raise ValueError(
"data and mask are both sparse CSR tensors but do not share either crow or col indices."
)
if mask.dtype != torch.bool:
raise TypeError("mask must have dtype bool.")
if not (
data.dtype == torch.float16
or data.dtype == torch.float32
or data.dtype == torch.float64
or data.dtype == torch.bool
or data.dtype == torch.int8
or data.dtype == torch.int16
or data.dtype == torch.int32
or data.dtype == torch.int64
):
raise TypeError(f"{data.dtype} is not supported in MaskedTensor.")
if data.dim() != mask.dim():
raise ValueError("data.dim() must equal mask.dim()")
if data.size() != mask.size():
raise ValueError("data.size() must equal mask.size()")
def __init__(self, data, mask, requires_grad=False):
self._preprocess_data(data, mask)
self._validate_members()
@staticmethod
def _from_values(data, mask):
"""Differentiable constructor for MaskedTensor"""
class Constructor(torch.autograd.Function):
@staticmethod
def forward(ctx, data, mask):
return MaskedTensor(data, mask)
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
result = Constructor.apply(data, mask)
return result
def _set_data_mask(self, data, mask):
self._masked_data = data
self._masked_mask = mask
self._validate_members()
def __repr__(self): # type: ignore[override]
formatter = "{0:8.4f}"
if self.dim() == 0:
scalar_data = self.get_data().item()
data_formatted = (
formatter.format(scalar_data)
if isinstance(scalar_data, float)
else str(scalar_data)
)
if not self.get_mask().item():
data_formatted = "--"
return (
"MaskedTensor("
+ data_formatted
+ ", "
+ str(self.get_mask().item())
+ ")"
)
s = _masked_tensor_str(self.get_data(), self.get_mask(), formatter)
s = "\n".join(" " + si for si in s.split("\n"))
return "MaskedTensor(\n" + s + "\n)"
# Seems like this needs to be defined before torch_dispatch to work
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
from ._ops_refs import _MASKEDTENSOR_FUNCTION_TABLE
if func in _MASKEDTENSOR_FUNCTION_TABLE:
return _MASKEDTENSOR_FUNCTION_TABLE[func](*args, **kwargs)
if not all(issubclass(cls, t) for t in types):
return NotImplemented
with torch._C.DisableTorchFunctionSubclass():
ret = func(*args, **kwargs)
if func in get_default_nowrap_functions():
return ret
else:
return torch._tensor._convert(ret, cls)
@classmethod
def unary(cls, fn, data, mask):
return MaskedTensor(fn(data), mask)
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
func = func.overloadpacket
from ._ops_refs import _MASKEDTENSOR_DISPATCH_TABLE
if func in _MASKEDTENSOR_DISPATCH_TABLE:
return _MASKEDTENSOR_DISPATCH_TABLE[func](*args, **kwargs)
msg = (
f"{func.__name__} is not implemented in __torch_dispatch__ for MaskedTensor.\n"
"If you would like this operator to be supported, please file an issue for a feature request at "
"https://github.com/pytorch/maskedtensor/issues with a minimal reproducible code snippet.\n"
"In the case that the semantics for the operator are not trivial, it would be appreciated "
"to also include a proposal for the semantics."
)
warnings.warn(msg)
return NotImplemented
def __lt__(self, other):
if is_masked_tensor(other):
return MaskedTensor(self.get_data() < _get_data(other), self.get_mask())
return MaskedTensor(self.get_data() < other, self.get_mask())
def to_tensor(self, value):
return self.get_data().masked_fill(~self.get_mask(), value)
def get_data(self):
class GetData(torch.autograd.Function):
@staticmethod
def forward(ctx, self):
return self._masked_data.detach()
@staticmethod
def backward(ctx, grad_output):
if is_masked_tensor(grad_output):
return grad_output
return MaskedTensor(grad_output, self.get_mask())
return GetData.apply(self)
def get_mask(self):
return self._masked_mask
def is_sparse_coo(self):
return self.layout == torch.sparse_coo
def is_sparse_csr(self): # type: ignore[override]
return self.layout == torch.sparse_csr
# Update later to support more sparse layouts
@property
def is_sparse(self):
return self.is_sparse_coo() or self.is_sparse_csr()
```
|
=============================================================================================================================
SOURCE CODE FILE: creation.py
LINES: 1
SIZE: 0.61 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\masked\maskedtensor\creation.py
ENCODING: utf-8
```py
# Copyright (c) Meta Platforms, Inc. and affiliates
from .core import MaskedTensor
__all__ = [
"as_masked_tensor",
"masked_tensor",
]
# These two factory functions are intended to mirror
# torch.tensor - guaranteed to be a leaf node
# torch.as_tensor - differentiable constructor that preserves the autograd history
def masked_tensor(
data: object, mask: object, requires_grad: bool = False
) -> MaskedTensor:
return MaskedTensor(data, mask, requires_grad)
def as_masked_tensor(data: object, mask: object) -> MaskedTensor:
return MaskedTensor._from_values(data, mask)
```
|
================================================================================================================================
SOURCE CODE FILE: passthrough.py
LINES: 1
SIZE: 1.46 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\masked\maskedtensor\passthrough.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
# Copyright (c) Meta Platforms, Inc. and affiliates
"""
These are functions that should simply be applied to both mask and data.
Take select or stack as an example. This operation can be applied to
both the mask and data of a MaskedTensor and the result wrapped into
a new MaskedTensor as a result.
"""
import torch
from .core import _map_mt_args_kwargs, _wrap_result
__all__ = [] # type: ignore[var-annotated]
PASSTHROUGH_FNS = [
torch.ops.aten.select,
torch.ops.aten.transpose,
torch.ops.aten.split,
torch.ops.aten.t,
torch.ops.aten.slice,
torch.ops.aten.slice_backward,
torch.ops.aten.select_backward,
torch.ops.aten.index,
torch.ops.aten.expand,
torch.ops.aten.view,
torch.ops.aten._unsafe_view,
torch.ops.aten._reshape_alias,
torch.ops.aten.cat,
torch.ops.aten.unsqueeze,
torch.ops.aten.unfold,
torch.ops.aten.unfold_backward,
torch.ops.aten.im2col,
torch.ops.aten.col2im,
torch.ops.aten.stack,
]
def _is_pass_through_fn(fn):
return fn in PASSTHROUGH_FNS
def _apply_pass_through_fn(fn, *args, **kwargs):
data_args, data_kwargs = _map_mt_args_kwargs(args, kwargs, lambda x: x.get_data())
result_data = fn(*data_args, **data_kwargs)
mask_args, mask_kwargs = _map_mt_args_kwargs(args, kwargs, lambda x: x.get_mask())
result_mask = fn(*mask_args, **mask_kwargs)
return _wrap_result(result_data, result_mask)
```
|
===============================================================================================================================
SOURCE CODE FILE: reductions.py
LINES: 3
SIZE: 5.63 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\masked\maskedtensor\reductions.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
# Copyright (c) Meta Platforms, Inc. and affiliates
import warnings
import torch
from .core import is_masked_tensor
from .creation import as_masked_tensor, masked_tensor
__all__ = [] # type: ignore[var-annotated]
def _masked_all_all(data, mask=None):
if mask is None:
return data.all()
return data.masked_fill(~mask, True).all()
def _masked_all_dim(data, dim, keepdim=False, mask=None):
if mask is None:
return torch.all(data, dim=dim, keepdim=keepdim)
return torch.all(data.masked_fill(~mask, True), dim=dim, keepdim=keepdim)
def _masked_all(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 1:
return _masked_all_all(args[0], mask=kwargs["mask"])
return _masked_all_dim(*args, **kwargs)
def _multidim_any(mask, dim, keepdim):
if isinstance(dim, int):
return _multidim_any(mask, [dim], keepdim)
for d in sorted(dim, reverse=True):
mask = torch.any(mask, dim=d, keepdim=keepdim)
return mask
def _get_masked_fn(fn):
if fn == "all":
return _masked_all
return getattr(torch.masked, fn)
def _torch_reduce_all(fn):
def reduce_all(self):
masked_fn = _get_masked_fn(fn)
data = self.get_data()
mask = self.get_mask().values() if self.is_sparse else self.get_mask()
# When reduction is "all", then torch.argmin/torch.argmax needs to return the index of the
# element corresponding to the min/max, but this operation isn't supported correctly for sparse layouts.
# Therefore, this implementation calculates it using the strides.
if fn == "all":
result_data = masked_fn(data, mask=mask)
elif fn in {"argmin", "argmax"} and self.is_sparse_coo():
sparse_idx = masked_fn(data.values(), mask=mask).to(dtype=torch.int)
indices = (
data.to_sparse_coo().indices()
if not self.is_sparse_coo()
else data.indices()
)
idx = indices.unbind(1)[sparse_idx]
stride = data.size().numel() / torch.tensor(
data.size(), device=data.device
).cumprod(0)
result_data = torch.sum(idx * stride)
# we simply pass in the values for sparse COO/CSR tensors
elif self.is_sparse:
result_data = masked_fn(masked_tensor(data.values(), mask))
else:
result_data = masked_fn(self, mask=mask)
return as_masked_tensor(result_data, torch.any(mask))
return reduce_all
def _torch_reduce_dim(fn):
def reduce_dim(self, dim, keepdim=False, dtype=None):
if self.is_sparse:
msg = (
f"The sparse version of {fn} is not implemented in reductions.\n"
"If you would like this operator to be supported, please file an issue for a feature request at "
"https://github.com/pytorch/maskedtensor/issues with a minimal reproducible code snippet.\n"
"In the case that the semantics for the operator are not trivial, it would be appreciated "
"to also include a proposal for the semantics."
)
warnings.warn(msg)
return NotImplemented
if not is_masked_tensor(self):
raise TypeError("Input to reduce_dim must be a MaskedTensor")
masked_fn = _get_masked_fn(fn)
data = self.get_data()
mask = self.get_mask()
if fn == "all":
result_data = masked_fn(data, dim=dim, keepdim=keepdim, mask=mask)
else:
result_data = masked_fn(
self, dim=dim, keepdim=keepdim, dtype=dtype, mask=self.get_mask()
)
return as_masked_tensor(result_data, _multidim_any(mask, dim, keepdim))
return reduce_dim
def _torch_reduce(fn):
def reduce_fn(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0:
return _torch_reduce_all(fn)(args[0])
return _torch_reduce_dim(fn)(*args, **kwargs)
return reduce_fn
def _reduce_dim_args(input, dim, keepdim=False, dtype=None):
return input, dim, keepdim, dtype
def _torch_grad_reduce(fn):
def grad_reduce(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0:
return _torch_reduce_all(fn)(args[0])
# TODO: autograd.Function doesn't support kwarg
input, dim, keepdim, dtype = _reduce_dim_args(*args, **kwargs)
return _torch_reduce_dim(fn)(input, dim, keepdim, dtype)
return grad_reduce
REDUCE_NAMES = [
"sum",
"mean",
"amin",
"amax",
"argmin",
"argmax",
"prod",
"all",
"norm",
"var",
"std",
]
NATIVE_REDUCE_MAP = {
getattr(torch.ops.aten, name): _torch_reduce(name) for name in REDUCE_NAMES
}
TORCH_REDUCE_MAP = {
getattr(torch, name): _torch_grad_reduce(name) for name in REDUCE_NAMES
}
TENSOR_REDUCE_MAP = {
getattr(torch.Tensor, name): _torch_grad_reduce(name) for name in REDUCE_NAMES
}
NATIVE_REDUCE_FNS = list(NATIVE_REDUCE_MAP.keys())
TORCH_REDUCE_FNS = list(TORCH_REDUCE_MAP.keys())
TENSOR_REDUCE_FNS = list(TENSOR_REDUCE_MAP.keys())
def _is_reduction(fn):
return fn in NATIVE_REDUCE_MAP or fn in TORCH_REDUCE_MAP or fn in TENSOR_REDUCE_MAP
def _apply_reduction(fn, *args, **kwargs):
if fn in NATIVE_REDUCE_MAP:
return NATIVE_REDUCE_MAP[fn](*args, **kwargs)
if fn in TORCH_REDUCE_MAP:
return TORCH_REDUCE_MAP[fn](*args, **kwargs)
if fn in TENSOR_REDUCE_MAP:
return TENSOR_REDUCE_MAP[fn](*args, **kwargs)
return NotImplemented
```
|
==========================================================================================================================
SOURCE CODE FILE: unary.py
LINES: 1
SIZE: 4.29 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\masked\maskedtensor\unary.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
# Copyright (c) Meta Platforms, Inc. and affiliates
import torch
from .core import _map_mt_args_kwargs, _wrap_result
__all__ = [] # type: ignore[var-annotated]
UNARY_NAMES = [
"abs",
"absolute",
"acos",
"arccos",
"acosh",
"arccosh",
"angle",
"asin",
"arcsin",
"asinh",
"arcsinh",
"atan",
"arctan",
"atanh",
"arctanh",
"bitwise_not",
"ceil",
"clamp",
"clip",
"conj_physical",
"cos",
"cosh",
"deg2rad",
"digamma",
"erf",
"erfc",
"erfinv",
"exp",
"exp2",
"expm1",
"fix",
"floor",
"frac",
"lgamma",
"log",
"log10",
"log1p",
"log2",
"logit",
"i0",
"isnan",
"nan_to_num",
"neg",
"negative",
"positive",
"pow",
"rad2deg",
"reciprocal",
"round",
"rsqrt",
"sigmoid",
"sign",
"sgn",
"signbit",
"sin",
"sinc",
"sinh",
"sqrt",
"square",
"tan",
"tanh",
"trunc",
]
INPLACE_UNARY_NAMES = [
n + "_"
for n in (list(set(UNARY_NAMES) - {"angle", "positive", "signbit", "isnan"}))
]
# Explicitly tracking functions we know are currently not supported
# This might be due to missing code gen or because of complex semantics
UNARY_NAMES_UNSUPPORTED = [
"atan2",
"arctan2",
"bitwise_left_shift",
"bitwise_right_shift",
"copysign",
"float_power",
"fmod",
"frexp",
"gradient",
"imag",
"ldexp",
"lerp",
"logical_not",
"hypot",
"igamma",
"igammac",
"mvlgamma",
"nextafter",
"polygamma",
"real",
"remainder",
"true_divide",
"xlogy",
]
def _unary_helper(fn, args, kwargs, inplace):
if len(kwargs) != 0:
raise ValueError(
"MaskedTensor unary ops require that len(kwargs) == 0. "
"If you need support for this, please open an issue on Github."
)
for a in args[1:]:
if torch.is_tensor(a):
raise TypeError(
"MaskedTensor unary ops do not support additional Tensor arguments"
)
mask_args, _mask_kwargs = _map_mt_args_kwargs(
args, kwargs, lambda x: x._masked_mask
)
data_args, _data_kwargs = _map_mt_args_kwargs(
args, kwargs, lambda x: x._masked_data
)
if args[0].layout == torch.sparse_coo:
data_args[0] = data_args[0].coalesce()
s = data_args[0].size()
i = data_args[0].indices()
data_args[0] = data_args[0].coalesce().values()
v = fn(*data_args)
result_data = torch.sparse_coo_tensor(i, v, size=s)
elif args[0].layout == torch.sparse_csr:
crow = data_args[0].crow_indices()
col = data_args[0].col_indices()
data_args[0] = data_args[0].values()
v = fn(*data_args)
result_data = torch.sparse_csr_tensor(crow, col, v)
else:
result_data = fn(*data_args)
if inplace:
args[0]._set_data_mask(result_data, mask_args[0])
return args[0]
else:
return _wrap_result(result_data, mask_args[0])
def _torch_unary(fn_name):
fn = getattr(torch.ops.aten, fn_name)
def unary_fn(*args, **kwargs):
return _unary_helper(fn, args, kwargs, inplace=False)
return unary_fn
def _torch_inplace_unary(fn_name):
fn = getattr(torch.ops.aten, fn_name)
def unary_fn(*args, **kwargs):
return _unary_helper(fn, args, kwargs, inplace=True)
return unary_fn
NATIVE_UNARY_MAP = {
getattr(torch.ops.aten, name): _torch_unary(name) for name in UNARY_NAMES
}
NATIVE_INPLACE_UNARY_MAP = {
getattr(torch.ops.aten, name): _torch_inplace_unary(name)
for name in INPLACE_UNARY_NAMES
}
NATIVE_UNARY_FNS = list(NATIVE_UNARY_MAP.keys())
NATIVE_INPLACE_UNARY_FNS = list(NATIVE_INPLACE_UNARY_MAP.keys())
def _is_native_unary(fn):
return fn in NATIVE_UNARY_FNS or fn in NATIVE_INPLACE_UNARY_FNS
def _apply_native_unary(fn, *args, **kwargs):
if fn in NATIVE_UNARY_FNS:
return NATIVE_UNARY_MAP[fn](*args, **kwargs)
if fn in NATIVE_INPLACE_UNARY_FNS:
return NATIVE_INPLACE_UNARY_MAP[fn](*args, **kwargs)
return NotImplemented
```
|
=================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 1.29 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\monitor\__init__.py
ENCODING: utf-8
```py
from typing import TYPE_CHECKING
from torch._C._monitor import * # noqa: F403
from torch._C._monitor import _WaitCounter, _WaitCounterTracker
if TYPE_CHECKING:
from torch.utils.tensorboard import SummaryWriter
STAT_EVENT = "torch.monitor.Stat"
class TensorboardEventHandler:
"""
TensorboardEventHandler is an event handler that will write known events to
the provided SummaryWriter.
This currently only supports ``torch.monitor.Stat`` events which are logged
as scalars.
Example:
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_MONITOR)
>>> # xdoctest: +REQUIRES(module:tensorboard)
>>> from torch.utils.tensorboard import SummaryWriter
>>> from torch.monitor import TensorboardEventHandler, register_event_handler
>>> writer = SummaryWriter("log_dir")
>>> register_event_handler(TensorboardEventHandler(writer))
"""
def __init__(self, writer: "SummaryWriter") -> None:
"""
Constructs the ``TensorboardEventHandler``.
"""
self._writer = writer
def __call__(self, event: Event) -> None:
if event.name == STAT_EVENT:
for k, v in event.data.items():
self._writer.add_scalar(k, v, walltime=event.timestamp.timestamp())
```
|
=============================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 3
SIZE: 6.32 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\mps\__init__.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
r"""
This package enables an interface for accessing MPS (Metal Performance Shaders) backend in Python.
Metal is Apple's API for programming metal GPU (graphics processor unit). Using MPS means that increased
performance can be achieved, by running work on the metal GPU(s).
See https://developer.apple.com/documentation/metalperformanceshaders for more details.
"""
from typing import Union
import torch
from torch import Tensor
_is_in_bad_fork = getattr(torch._C, "_mps_is_in_bad_fork", lambda: False)
_default_mps_generator: torch._C.Generator = None # type: ignore[assignment]
# local helper function (not public or exported)
def _get_default_mps_generator() -> torch._C.Generator:
global _default_mps_generator
if _default_mps_generator is None:
_default_mps_generator = torch._C._mps_get_default_generator()
return _default_mps_generator
def device_count() -> int:
r"""Returns the number of available MPS devices."""
return int(torch._C._has_mps and torch._C._mps_is_available())
def synchronize() -> None:
r"""Waits for all kernels in all streams on a MPS device to complete."""
return torch._C._mps_deviceSynchronize()
def get_rng_state(device: Union[int, str, torch.device] = "mps") -> Tensor:
r"""Returns the random number generator state as a ByteTensor.
Args:
device (torch.device or int, optional): The device to return the RNG state of.
Default: ``'mps'`` (i.e., ``torch.device('mps')``, the current MPS device).
"""
return _get_default_mps_generator().get_state()
def set_rng_state(
new_state: Tensor, device: Union[int, str, torch.device] = "mps"
) -> None:
r"""Sets the random number generator state.
Args:
new_state (torch.ByteTensor): The desired state
device (torch.device or int, optional): The device to set the RNG state.
Default: ``'mps'`` (i.e., ``torch.device('mps')``, the current MPS device).
"""
new_state_copy = new_state.clone(memory_format=torch.contiguous_format)
_get_default_mps_generator().set_state(new_state_copy)
def manual_seed(seed: int) -> None:
r"""Sets the seed for generating random numbers.
Args:
seed (int): The desired seed.
"""
# the torch.mps.manual_seed() can be called from the global
# torch.manual_seed() in torch/random.py. So we need to make
# sure mps is available (otherwise we just return without
# erroring out)
if not torch._C._has_mps:
return
seed = int(seed)
_get_default_mps_generator().manual_seed(seed)
def seed() -> None:
r"""Sets the seed for generating random numbers to a random number."""
_get_default_mps_generator().seed()
def empty_cache() -> None:
r"""Releases all unoccupied cached memory currently held by the caching
allocator so that those can be used in other GPU applications.
"""
torch._C._mps_emptyCache()
def set_per_process_memory_fraction(fraction) -> None:
r"""Set memory fraction for limiting process's memory allocation on MPS device.
The allowed value equals the fraction multiplied by recommended maximum device memory
(obtained from Metal API device.recommendedMaxWorkingSetSize).
If trying to allocate more than the allowed value in a process, it will raise an out of
memory error in allocator.
Args:
fraction(float): Range: 0~2. Allowed memory equals total_memory * fraction.
.. note::
Passing 0 to fraction means unlimited allocations
(may cause system failure if out of memory).
Passing fraction greater than 1.0 allows limits beyond the value
returned from device.recommendedMaxWorkingSetSize.
"""
if not isinstance(fraction, float):
raise TypeError("Invalid type for fraction argument, must be `float`")
if fraction < 0 or fraction > 2:
raise ValueError(f"Invalid fraction value: {fraction}. Allowed range: 0~2")
torch._C._mps_setMemoryFraction(fraction)
def current_allocated_memory() -> int:
r"""Returns the current GPU memory occupied by tensors in bytes.
.. note::
The returned size does not include cached allocations in
memory pools of MPSAllocator.
"""
return torch._C._mps_currentAllocatedMemory()
def driver_allocated_memory() -> int:
r"""Returns total GPU memory allocated by Metal driver for the process in bytes.
.. note::
The returned size includes cached allocations in MPSAllocator pools
as well as allocations from MPS/MPSGraph frameworks.
"""
return torch._C._mps_driverAllocatedMemory()
def recommended_max_memory() -> int:
r"""Returns recommended max Working set size for GPU memory in bytes.
.. note::
Recommended max working set size for Metal.
returned from device.recommendedMaxWorkingSetSize.
"""
return torch._C._mps_recommendedMaxMemory()
def compile_shader(source: str):
r"""Compiles compute shader from source and allows one to invoke kernels
defined there from the comfort of Python runtime
Example::
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_MPS)
>>> lib = torch.mps.compile_shader(
... "kernel void full(device float* out, constant float& val, uint idx [[thread_position_in_grid]]) { out[idx] = val; }"
... )
>>> x = torch.zeros(16, device="mps")
>>> lib.full(x, 3.14)
"""
from pathlib import Path
from torch.utils._cpp_embed_headers import _embed_headers
if not hasattr(torch._C, "_mps_compileShader"):
raise RuntimeError("MPS is not available")
source = _embed_headers(
[l + "\n" for l in source.split("\n")],
[Path(__file__).parent.parent / "include"],
set(),
)
return torch._C._mps_compileShader(source)
def is_available() -> bool:
return device_count() > 0
from . import profiler
from .event import Event
__all__ = [
"compile_shader",
"device_count",
"get_rng_state",
"manual_seed",
"seed",
"set_rng_state",
"synchronize",
"empty_cache",
"set_per_process_memory_fraction",
"current_allocated_memory",
"driver_allocated_memory",
"Event",
"profiler",
"recommended_max_memory",
"is_available",
]
```
|
==========================================================================================================
SOURCE CODE FILE: event.py
LINES: 1
SIZE: 1.73 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\mps\event.py
ENCODING: utf-8
```py
import torch
class Event:
r"""Wrapper around an MPS event.
MPS events are synchronization markers that can be used to monitor the
device's progress, to accurately measure timing, and to synchronize MPS streams.
Args:
enable_timing (bool, optional): indicates if the event should measure time
(default: ``False``)
"""
def __init__(self, enable_timing: bool = False) -> None:
self.__eventId = torch._C._mps_acquireEvent(enable_timing)
def __del__(self) -> None:
# checks if torch._C is already destroyed
if hasattr(torch._C, "_mps_releaseEvent") and self.__eventId > 0:
torch._C._mps_releaseEvent(self.__eventId)
def record(self) -> None:
r"""Records the event in the default stream."""
torch._C._mps_recordEvent(self.__eventId)
def wait(self) -> None:
r"""Makes all future work submitted to the default stream wait for this event."""
torch._C._mps_waitForEvent(self.__eventId)
def query(self) -> bool:
r"""Returns True if all work currently captured by event has completed."""
return torch._C._mps_queryEvent(self.__eventId)
def synchronize(self) -> None:
r"""Waits until the completion of all work currently captured in this event.
This prevents the CPU thread from proceeding until the event completes.
"""
torch._C._mps_synchronizeEvent(self.__eventId)
def elapsed_time(self, end_event: "Event") -> float:
r"""Returns the time elapsed in milliseconds after the event was
recorded and before the end_event was recorded.
"""
return torch._C._mps_elapsedTimeOfEvents(self.__eventId, end_event.__eventId)
```
|
=============================================================================================================
SOURCE CODE FILE: profiler.py
LINES: 1
SIZE: 3.29 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\mps\profiler.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import contextlib
import torch
__all__ = [
"start",
"stop",
"profile",
"metal_capture",
"is_metal_capture_enabled",
"is_capturing_metal",
]
def start(mode: str = "interval", wait_until_completed: bool = False) -> None:
r"""Start OS Signpost tracing from MPS backend.
The generated OS Signposts could be recorded and viewed in
XCode Instruments Logging tool.
Args:
mode(str): OS Signpost tracing mode could be "interval", "event",
or both "interval,event".
The interval mode traces the duration of execution of the operations,
whereas event mode marks the completion of executions.
See document `Recording Performance Data`_ for more info.
wait_until_completed(bool): Waits until the MPS Stream complete
executing each encoded GPU operation. This helps generating single
dispatches on the trace's timeline.
Note that enabling this option would affect the performance negatively.
.. _Recording Performance Data:
https://developer.apple.com/documentation/os/logging/recording_performance_data
"""
mode_normalized = mode.lower().replace(" ", "")
torch._C._mps_profilerStartTrace(mode_normalized, wait_until_completed)
def stop():
r"""Stops generating OS Signpost tracing from MPS backend."""
torch._C._mps_profilerStopTrace()
@contextlib.contextmanager
def profile(mode: str = "interval", wait_until_completed: bool = False):
r"""Context Manager to enabling generating OS Signpost tracing from MPS backend.
Args:
mode(str): OS Signpost tracing mode could be "interval", "event",
or both "interval,event".
The interval mode traces the duration of execution of the operations,
whereas event mode marks the completion of executions.
See document `Recording Performance Data`_ for more info.
wait_until_completed(bool): Waits until the MPS Stream complete
executing each encoded GPU operation. This helps generating single
dispatches on the trace's timeline.
Note that enabling this option would affect the performance negatively.
.. _Recording Performance Data:
https://developer.apple.com/documentation/os/logging/recording_performance_data
"""
try:
start(mode, wait_until_completed)
yield
finally:
stop()
def is_metal_capture_enabled() -> bool:
"""Checks if `metal_capture` context manager is usable
To enable metal capture, set MTL_CAPTURE_ENABLED envvar
"""
return torch._C._mps_isCaptureEnabled() # type: ignore[attr-defined]
def is_capturing_metal() -> bool:
"""Cheks if metal capture is in progress"""
return torch._C._mps_isCapturing() # type: ignore[attr-defined]
@contextlib.contextmanager
def metal_capture(fname: str):
"""Conext manager that enables capturing of Metal calls into gputrace"""
try:
torch._C._mps_startCapture(fname) # type: ignore[attr-defined]
yield
# Drain all the work that were enqueued during the context call
torch.mps.synchronize()
finally:
torch._C._mps_stopCapture() # type: ignore[attr-defined]
```
|
==============================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 5
SIZE: 12.24 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\mtia\__init__.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
r"""
This package enables an interface for accessing MTIA backend in python
"""
import threading
import warnings
from typing import Any, Callable, Optional, Union
import torch
from torch import device as _device, Tensor
from torch._utils import _dummy_type, _LazySeedTracker, classproperty
from torch.types import Device
from ._utils import _get_device_index
_device_t = Union[_device, str, int]
# torch.mtia.Event/Stream is alias of torch.Event/Stream
Event = torch.Event
Stream = torch.Stream
_initialized = False
_queued_calls: list[
tuple[Callable[[], None], list[str]]
] = [] # don't invoke these until initialization occurs
_tls = threading.local()
_initialization_lock = threading.Lock()
_lazy_seed_tracker = _LazySeedTracker()
def init():
_lazy_init()
def is_initialized():
r"""Return whether PyTorch's MTIA state has been initialized."""
return _initialized and not _is_in_bad_fork()
def _is_in_bad_fork() -> bool:
return torch._C._mtia_isInBadFork()
def _lazy_init() -> None:
global _initialized, _queued_calls
if is_initialized() or hasattr(_tls, "is_initializing"):
return
with _initialization_lock:
# We be double-checking locking, boys! This is OK because
# the above test was GIL protected anyway. The inner test
# is for when a thread blocked on some other thread which was
# doing the initialization; when they get the lock, they will
# find there is nothing left to do.
if is_initialized():
return
# It is important to prevent other threads from entering _lazy_init
# immediately, while we are still guaranteed to have the GIL, because some
# of the C calls we make below will release the GIL
if _is_in_bad_fork():
raise RuntimeError(
"Cannot re-initialize MTIA in forked subprocess. To use MTIA with "
"multiprocessing, you must use the 'spawn' start method"
)
if not _is_compiled():
raise AssertionError(
"Torch not compiled with MTIA enabled. "
"Ensure you have `import mtia.host_runtime.torch_mtia.dynamic_library` in your python "
"src file and include `//mtia/host_runtime/torch_mtia:torch_mtia` as "
"your target dependency!"
)
torch._C._mtia_init()
# Some of the queued calls may reentrantly call _lazy_init();
# we need to just return without initializing in that case.
# However, we must not let any *other* threads in!
_tls.is_initializing = True
_queued_calls.extend(calls for calls in _lazy_seed_tracker.get_calls() if calls)
try:
for queued_call, orig_traceback in _queued_calls:
try:
queued_call()
except Exception as e:
msg = (
f"MTIA call failed lazily at initialization with error: {str(e)}\n\n"
f"MTIA call was originally invoked at:\n\n{''.join(orig_traceback)}"
)
raise DeferredMtiaCallError(msg) from e
finally:
delattr(_tls, "is_initializing")
_initialized = True
class DeferredMtiaCallError(Exception):
pass
def _is_compiled() -> bool:
r"""Return true if compiled with MTIA support."""
return torch._C._mtia_isBuilt()
def is_available() -> bool:
r"""Return true if MTIA device is available"""
if not _is_compiled():
return False
# MTIA has to init devices first to know if there is any devices available.
return device_count() > 0
def synchronize(device: Optional[_device_t] = None) -> None:
r"""Waits for all jobs in all streams on a MTIA device to complete."""
with torch.mtia.device(device):
return torch._C._mtia_deviceSynchronize()
def device_count() -> int:
r"""Return the number of MTIA devices available."""
# TODO: Update _accelerator_hooks_device_count to abstract a MTIA device count API
return torch._C._mtia_getDeviceCount()
def current_device() -> int:
r"""Return the index of a currently selected device."""
return torch._C._accelerator_hooks_get_current_device()
def current_stream(device: Optional[_device_t] = None) -> Stream:
r"""Return the currently selected :class:`Stream` for a given device.
Args:
device (torch.device or int, optional): selected device. Returns
the currently selected :class:`Stream` for the current device, given
by :func:`~torch.mtia.current_device`, if :attr:`device` is ``None``
(default).
"""
return torch._C._mtia_getCurrentStream(_get_device_index(device, optional=True))
def default_stream(device: Optional[_device_t] = None) -> Stream:
r"""Return the default :class:`Stream` for a given device.
Args:
device (torch.device or int, optional): selected device. Returns
the default :class:`Stream` for the current device, given by
:func:`~torch.mtia.current_device`, if :attr:`device` is ``None``
(default).
"""
return torch._C._mtia_getDefaultStream(_get_device_index(device, optional=True))
def record_memory_history(
enabled: Optional[str] = "all", stacks: str = "python", max_entries: int = 0
) -> None:
r"""Enable/Disable the memory profiler on MTIA allocator
Args:
enabled (all or state, optional) selected device. Returns
statistics for the current device, given by current_device(),
if device is None (default).
stacks ("python" or "cpp", optional). Select the stack trace to record.
max_entries (int, optional). Maximum number of entries to record.
"""
if not is_initialized():
return
torch._C._mtia_recordMemoryHistory(enabled, stacks, max_entries)
def snapshot() -> dict[str, Any]:
r"""Return a dictionary of MTIA memory allocator history"""
return torch._C._mtia_memorySnapshot()
def get_device_capability(device: Optional[_device_t] = None) -> tuple[int, int]:
r"""Return capability of a given device as a tuple of (major version, minor version).
Args:
device (torch.device or int, optional) selected device. Returns
statistics for the current device, given by current_device(),
if device is None (default).
"""
return torch._C._mtia_getDeviceCapability(_get_device_index(device, optional=True))
def empty_cache() -> None:
r"""Empty the MTIA device cache."""
return torch._C._mtia_emptyCache()
def set_stream(stream: Stream):
r"""Set the current stream.This is a wrapper API to set the stream.
Usage of this function is discouraged in favor of the ``stream``
context manager.
Args:
stream (Stream): selected stream. This function is a no-op
if this argument is ``None``.
"""
if stream is None:
return
torch._C._mtia_setCurrentStream(stream)
def set_device(device: _device_t) -> None:
r"""Set the current device.
Args:
device (torch.device or int): selected device. This function is a no-op
if this argument is negative.
"""
device = _get_device_index(device)
if device >= 0:
torch._C._accelerator_hooks_set_current_device(device)
class device:
r"""Context-manager that changes the selected device.
Args:
device (torch.device or int): device index to select. It's a no-op if
this argument is a negative integer or ``None``.
"""
def __init__(self, device: Any):
self.idx = _get_device_index(device, optional=True)
self.prev_idx = -1
def __enter__(self):
self.prev_idx = torch._C._accelerator_hooks_maybe_exchange_device(self.idx)
def __exit__(self, type: Any, value: Any, traceback: Any):
self.idx = torch._C._accelerator_hooks_maybe_exchange_device(self.prev_idx)
return False
class StreamContext:
r"""Context-manager that selects a given stream.
All MTIA kernels queued within its context will be enqueued on a selected
stream.
Args:
Stream (Stream): selected stream. This manager is a no-op if it's
``None``.
.. note:: Streams are per-device.
"""
cur_stream: Optional["torch.mtia.Stream"]
def __init__(self, stream: Optional["torch.mtia.Stream"]):
self.cur_stream = None
self.stream = stream
self.idx = _get_device_index(None, True)
if not torch.jit.is_scripting():
if self.idx is None:
self.idx = -1
self.src_prev_stream = (
None if not torch.jit.is_scripting() else torch.mtia.default_stream(None)
)
self.dst_prev_stream = (
None if not torch.jit.is_scripting() else torch.mtia.default_stream(None)
)
def __enter__(self):
# Local cur_stream variable for type refinement
cur_stream = self.stream
# Return if stream is None or MTIA device not available
if cur_stream is None or self.idx == -1:
return
self.src_prev_stream = torch.mtia.current_stream(None)
# If the stream is not on the current device, then
# set the current stream on the device
if self.src_prev_stream.device != cur_stream.device:
with device(cur_stream.device):
self.dst_prev_stream = torch.mtia.current_stream(cur_stream.device)
torch.mtia.set_stream(cur_stream)
def __exit__(self, type: Any, value: Any, traceback: Any):
# Local cur_stream variable for type refinement
cur_stream = self.stream
# If stream is None or no MTIA device available, return
if cur_stream is None or self.idx == -1:
return
# Reset the stream on the original device
# and destination device
if self.src_prev_stream.device != cur_stream.device: # type: ignore[union-attr]
torch.mtia.set_stream(self.dst_prev_stream) # type: ignore[arg-type]
torch.mtia.set_stream(self.src_prev_stream) # type: ignore[arg-type]
def stream(stream: Optional["torch.mtia.Stream"]) -> StreamContext:
r"""Wrap around the Context-manager StreamContext that selects a given stream.
Arguments:
stream (Stream): selected stream. This manager is a no-op if it's
``None``.
.. note:: In eager mode stream is of type Stream class while in JIT it doesn't support torch.mtia.stream
"""
return StreamContext(stream)
def get_rng_state(device: Union[int, str, torch.device] = "mtia") -> Tensor:
r"""Returns the random number generator state as a ByteTensor.
Args:
device (torch.device or int, optional): The device to return the RNG state of.
Default: ``'mtia'`` (i.e., ``torch.device('mtia')``, the current mtia device).
"""
warnings.warn(
"get_rng_state is not implemented in torch.mtia",
UserWarning,
stacklevel=2,
)
return torch.zeros([1], dtype=torch.uint8, device=device)
def set_rng_state(
new_state: Tensor, device: Union[int, str, torch.device] = "mtia"
) -> None:
r"""Sets the random number generator state.
Args:
new_state (torch.ByteTensor): The desired state
device (torch.device or int, optional): The device to set the RNG state.
Default: ``'mtia'`` (i.e., ``torch.device('mtia')``, the current mtia device).
"""
warnings.warn(
"set_rng_state is not implemented in torch.mtia",
UserWarning,
stacklevel=2,
)
from .memory import * # noqa: F403
__all__ = [
"init",
"is_available",
"is_initialized",
"synchronize",
"device_count",
"current_device",
"current_stream",
"default_stream",
"memory_stats",
"max_memory_allocated",
"reset_peak_memory_stats",
"get_device_capability",
"record_memory_history",
"snapshot",
"empty_cache",
"set_device",
"set_stream",
"stream",
"device",
"set_rng_state",
"get_rng_state",
]
```
|
============================================================================================================
SOURCE CODE FILE: _utils.py
LINES: 1
SIZE: 1.60 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\mtia\_utils.py
ENCODING: utf-8
```py
from typing import Any
import torch
# The _get_device_index has been moved to torch.utils._get_device_index
from torch._utils import _get_device_index as _torch_get_device_index
def _get_device_index(
device: Any, optional: bool = False, allow_cpu: bool = False
) -> int:
r"""Get the device index from :attr:`device`, which can be a torch.device object, a Python integer, or ``None``.
If :attr:`device` is a torch.device object, returns the device index if it
is a MTIA device. Note that for a MTIA device without a specified index,
i.e., ``torch.device('mtia')``, this will return the current default MTIA
device if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``,
CPU devices will be accepted and ``-1`` will be returned in this case.
If :attr:`device` is a Python integer, it is returned as is.
If :attr:`device` is ``None``, this will return the current default MTIA
device if :attr:`optional` is ``True``.
"""
if isinstance(device, int):
return device
if isinstance(device, str):
device = torch.device(device)
if isinstance(device, torch.device):
if allow_cpu:
if device.type not in ["mtia", "cpu"]:
raise ValueError(f"Expected a mtia or cpu device, but got: {device}")
elif device.type != "mtia":
raise ValueError(f"Expected a mtia device, but got: {device}")
if not torch.jit.is_scripting():
if isinstance(device, torch.mtia.device):
return device.idx
return _torch_get_device_index(device, optional, allow_cpu)
```
|
============================================================================================================
SOURCE CODE FILE: memory.py
LINES: 1
SIZE: 1.77 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\mtia\memory.py
ENCODING: utf-8
```py
# pyre-strict
r"""This package adds support for device memory management implemented in MTIA."""
from typing import Any, Optional
import torch
from . import _device_t, is_initialized
from ._utils import _get_device_index
def memory_stats(device: Optional[_device_t] = None) -> dict[str, Any]:
r"""Return a dictionary of MTIA memory allocator statistics for a given device.
Args:
device (torch.device, str, or int, optional) selected device. Returns
statistics for the current device, given by current_device(),
if device is None (default).
"""
if not is_initialized():
return {}
return torch._C._mtia_memoryStats(_get_device_index(device, optional=True))
def max_memory_allocated(device: Optional[_device_t] = None) -> int:
r"""Return the maximum memory allocated in bytes for a given device.
Args:
device (torch.device, str, or int, optional) selected device. Returns
statistics for the current device, given by current_device(),
if device is None (default).
"""
if not is_initialized():
return 0
return memory_stats(device).get("dram", 0).get("peak_bytes", 0)
def reset_peak_memory_stats(device: Optional[_device_t] = None) -> None:
r"""Reset the peak memory stats for a given device.
Args:
device (torch.device, str, or int, optional) selected device. Returns
statistics for the current device, given by current_device(),
if device is None (default).
"""
if not is_initialized():
return
torch._C._mtia_resetPeakMemoryStats(_get_device_index(device, optional=True))
__all__ = [
"memory_stats",
"max_memory_allocated",
"reset_peak_memory_stats",
]
```
|
=========================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 2.94 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\multiprocessing\__init__.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""torch.multiprocessing is a wrapper around the native :mod:`multiprocessing` module.
It registers custom reducers, that use shared memory to provide shared
views on the same data in different processes. Once the tensor/storage is moved
to shared_memory (see :func:`~torch.Tensor.share_memory_`), it will be possible
to send it to other processes without making any copies.
The API is 100% compatible with the original module - it's enough to change
``import multiprocessing`` to ``import torch.multiprocessing`` to have all the
tensors sent through the queues or shared via other mechanisms, moved to shared
memory.
Because of the similarity of APIs we do not document most of this package
contents, and we recommend referring to very good docs of the original module.
"""
import multiprocessing
import sys
import torch
from .reductions import init_reductions
__all__ = ["set_sharing_strategy", "get_sharing_strategy", "get_all_sharing_strategies"]
from multiprocessing import * # noqa: F403
__all__ += multiprocessing.__all__ # noqa: PLE0605 type: ignore[attr-defined]
# This call adds a Linux specific prctl(2) wrapper function to this module.
# See https://github.com/pytorch/pytorch/pull/14391 for more information.
torch._C._multiprocessing_init()
"""Add helper function to spawn N processes and wait for completion of any of
them. This depends `mp.get_context` which was added in Python 3.4."""
from .spawn import (
ENV_VAR_PARALLEL_START,
ProcessContext,
ProcessExitedException,
ProcessRaisedException,
spawn,
SpawnContext,
start_processes,
)
if sys.platform == "darwin" or sys.platform == "win32":
_sharing_strategy = "file_system"
_all_sharing_strategies = {"file_system"}
else:
_sharing_strategy = "file_descriptor"
_all_sharing_strategies = {"file_descriptor", "file_system"}
def set_sharing_strategy(new_strategy):
"""Set the strategy for sharing CPU tensors.
Args:
new_strategy (str): Name of the selected strategy. Should be one of
the values returned by :func:`get_all_sharing_strategies()`.
"""
global _sharing_strategy
assert new_strategy in _all_sharing_strategies
_sharing_strategy = new_strategy
def get_sharing_strategy():
"""Return the current strategy for sharing CPU tensors."""
return _sharing_strategy
def get_all_sharing_strategies():
"""Return a set of sharing strategies supported on a current system."""
return _all_sharing_strategies
def _set_thread_name(name: str) -> None:
"""Set the name of the current thread.
Args:
name (str): Name of the current thread.
"""
torch._C._set_thread_name(name)
def _get_thread_name() -> str:
"""Get the name of the current thread.
Returns:
str: Name of the current thread.
"""
return torch._C._get_thread_name()
init_reductions()
```
|
========================================================================================================================
SOURCE CODE FILE: _atfork.py
LINES: 1
SIZE: 0.81 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\multiprocessing\_atfork.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import sys
__all__ = ["register_after_fork"]
if sys.platform == "win32":
import multiprocessing.util as _util
def _register(func):
def wrapper(arg):
func()
_util.register_after_fork(_register, wrapper)
else:
import os
def _register(func):
os.register_at_fork(after_in_child=func)
def register_after_fork(func):
"""Register a callable to be executed in the child process after a fork.
Note:
In python < 3.7 this will only work with processes created using the
``multiprocessing`` module. In python >= 3.7 it also works with
``os.fork()``.
Args:
func (function): Function taking no arguments to be called in the child after fork
"""
_register(func)
```
|
=====================================================================================================================
SOURCE CODE FILE: pool.py
LINES: 1
SIZE: 1.75 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\multiprocessing\pool.py
ENCODING: utf-8
```py
import multiprocessing.pool
import multiprocessing.util as util
from .queue import SimpleQueue
def clean_worker(*args, **kwargs):
import gc
multiprocessing.pool.worker(*args, **kwargs)
# Regular multiprocessing workers don't fully clean up after themselves,
# so we have to explicitly trigger garbage collection to make sure that all
# destructors are called...
gc.collect()
class Pool(multiprocessing.pool.Pool):
"""Pool implementation which uses our version of SimpleQueue.
This lets us pass tensors in shared memory across processes instead of
serializing the underlying data.
"""
def _setup_queues(self):
self._inqueue = SimpleQueue()
self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def _repopulate_pool(self):
"""Increase the number of pool processes to the specified number.
Bring the number of pool processes up to the specified number, for use after
reaping workers which have exited.
"""
for _ in range(self._processes - len(self._pool)):
# changed worker -> clean_worker
args = (
self._inqueue,
self._outqueue,
self._initializer,
self._initargs,
self._maxtasksperchild,
)
if hasattr(self, "_wrap_exception"):
args += (self._wrap_exception,)
w = self.Process(target=clean_worker, args=args)
self._pool.append(w)
w.name = w.name.replace("Process", "PoolWorker")
w.daemon = True
w.start()
util.debug("added worker")
```
|
======================================================================================================================
SOURCE CODE FILE: queue.py
LINES: 1
SIZE: 1.48 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\multiprocessing\queue.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import io
import multiprocessing.queues
import pickle
from multiprocessing.reduction import ForkingPickler
class ConnectionWrapper:
"""Proxy class for _multiprocessing.Connection which uses ForkingPickler for object serialization."""
def __init__(self, conn):
self.conn = conn
def send(self, obj):
buf = io.BytesIO()
ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(obj)
self.send_bytes(buf.getvalue())
def recv(self):
buf = self.recv_bytes()
return pickle.loads(buf)
def __getattr__(self, name):
if "conn" in self.__dict__:
return getattr(self.conn, name)
raise AttributeError(f"'{type(self).__name__}' object has no attribute 'conn'")
class Queue(multiprocessing.queues.Queue):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._reader: ConnectionWrapper = ConnectionWrapper(self._reader)
self._writer: ConnectionWrapper = ConnectionWrapper(self._writer)
self._send = self._writer.send
self._recv = self._reader.recv
class SimpleQueue(multiprocessing.queues.SimpleQueue):
def _make_methods(self):
if not isinstance(self._reader, ConnectionWrapper):
self._reader: ConnectionWrapper = ConnectionWrapper(self._reader)
self._writer: ConnectionWrapper = ConnectionWrapper(self._writer)
super()._make_methods() # type: ignore[misc]
```
|
===========================================================================================================================
SOURCE CODE FILE: reductions.py
LINES: 1
SIZE: 23.23 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\multiprocessing\reductions.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import multiprocessing
import os
import threading
from multiprocessing import reduction
from multiprocessing.util import register_after_fork
from typing import Union
import torch
from torch._namedtensor_internals import check_serializing_named_tensor
try:
# Early load resource_sharer to prevent a partially initialized instance
# from being inherited in a forked child process. The reduce_storage method
# requires this module indirectly through DupFd(). The built-in mp.Queue
# class pickles arguments in a background thread which may overlap with the
# fork.
import multiprocessing.resource_sharer
except ImportError:
pass
class StorageWeakRef:
r"""A weak reference to a Storage.
The cdata member is a Python number containing the integer representation of
the Storage pointer.
"""
__slots__ = ["cdata", "_free_weak_ref"]
def __init__(self, storage):
self.cdata = storage._weak_ref()
# Save a direct reference to _free_weak_ref because the `torch` module
# might be cleared during Python shutdown before this module is cleared.
self._free_weak_ref = torch.Storage._free_weak_ref # type: ignore[attr-defined]
@classmethod
def from_weakref(cls, cdata):
instance = cls.__new__(cls)
instance.cdata = cdata
instance._free_weak_ref = torch.Storage._free_weak_ref # type: ignore[attr-defined]
return instance
def expired(self):
return torch.Storage._expired(self.cdata) # type: ignore[attr-defined]
def __del__(self):
self._free_weak_ref(self.cdata)
def __hash__(self):
return self.cdata
def __eq__(self, other):
if id(self) == id(other):
return True
return self.cdata == other.cdata
class SharedCache(dict):
"""Dictionary from multiprocessing handles to StorageWeakRef."""
def __init__(self) -> None:
# free_dead_references() is called if the len exceeds the current
# limit. The limit scales with the number of remaining live objects.
self.limit = 128
# `fork` inherits lock state, so in case we fork when the lock is held,
# we register a function to reset the lock to a new object to avoid
# possible deadlocks, following python multiprocessing library design.
self._after_fork()
register_after_fork(self, SharedCache._after_fork)
def _after_fork(self):
self.lock = threading.Lock()
def get(self, key): # type: ignore[override]
with self.lock:
return dict.get(self, key)
def __setitem__(self, key, storage_ref):
with self.lock:
dict.__setitem__(self, key, storage_ref)
if len(self) > self.limit:
self.free_dead_references()
def free_dead_references(self):
live = 0
for key, storage_ref in list(self.items()):
if storage_ref.expired():
del self[key]
else:
live += 1
self.limit = max(128, live * 2)
# mapping from handles to StorageWeakRef objects
shared_cache = SharedCache()
def rebuild_event(device, handle):
return torch.cuda.Event.from_ipc_handle(device, handle)
def reduce_event(event):
handle = event.ipc_handle()
return (rebuild_event, (event.device, handle))
def rebuild_tensor(cls, storage, metadata):
storage_offset, size, stride, requires_grad = metadata
t = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
if cls == torch.nn.parameter.Parameter:
# we have to pass requires_grad into constructor, rather than set it as an
# attribute later, because it's an important check for Integer Tensors to
# have requires_grad=False (or else they raise an error)
t = torch.nn.parameter.Parameter(t, requires_grad=requires_grad)
else:
t.requires_grad = requires_grad
return t
def rebuild_meta_tensor(
tensor_cls,
tensor_size,
tensor_stride,
tensor_offset,
dtype,
storage_size_bytes,
requires_grad,
):
untyped_storage = torch.UntypedStorage(storage_size_bytes, device="meta")
typed_storage = torch.TypedStorage(
wrap_storage=untyped_storage, dtype=dtype, _internal=True
)
t = torch._utils._rebuild_tensor(
typed_storage,
tensor_offset,
tensor_size,
tensor_stride,
)
if tensor_cls == torch.nn.parameter.Parameter:
# It is crucial for integer tensors to receive
# the requires_grad=False as an argument in the constructor
t = torch.nn.parameter.Parameter(t, requires_grad=requires_grad)
else:
t.requires_grad = requires_grad
return t
def rebuild_cuda_tensor(
tensor_cls,
tensor_size,
tensor_stride,
tensor_offset,
storage_cls,
dtype,
storage_device,
storage_handle,
storage_size_bytes,
storage_offset_bytes,
requires_grad,
ref_counter_handle,
ref_counter_offset,
event_handle,
event_sync_required,
):
# If storage_handle is None, storage points to nullptr.
if storage_handle is None or storage_size_bytes == 0:
storage = storage_cls(0, dtype=dtype, device=storage_device, _internal=True)
else:
storage = storage_from_cache(
storage_cls, (storage_handle, storage_offset_bytes)
)
if storage is None:
torch.cuda._lazy_init()
storage = storage_cls._new_shared_cuda(
storage_device,
storage_handle,
storage_size_bytes,
storage_offset_bytes,
ref_counter_handle,
ref_counter_offset,
event_handle,
event_sync_required,
)
shared_cache[(storage_handle, storage_offset_bytes)] = StorageWeakRef(
storage
)
else:
# We already ref counting this Storage, but producer needs new ref-counters to be released.
storage_cls._release_ipc_counter(
ref_counter_handle, ref_counter_offset, device=storage_device
)
_storage = (
storage
if isinstance(storage, torch.UntypedStorage)
else storage._untyped_storage
)
t = torch._utils._rebuild_tensor(
torch.storage.TypedStorage(wrap_storage=_storage, dtype=dtype, _internal=True),
tensor_offset,
tensor_size,
tensor_stride,
)
if tensor_cls == torch.nn.parameter.Parameter:
# It is crucial for integer tensors to receive
# the requires_grad=False as an argument in the constructor
t = torch.nn.parameter.Parameter(t, requires_grad=requires_grad)
else:
t.requires_grad = requires_grad
return t
def reduce_tensor(tensor):
if tensor.requires_grad and not tensor.is_leaf:
raise RuntimeError(
"Cowardly refusing to serialize non-leaf tensor which requires_grad, "
"since autograd does not support crossing process boundaries. "
"If you just want to transfer the data, call detach() on the tensor "
"before serializing (e.g., putting it on the queue)."
)
check_serializing_named_tensor(tensor)
torch.utils.hooks.warn_if_has_hooks(tensor)
# Note [CUDA IPC and the caching allocator]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# When you send a CUDA tensor over IPC, you might expect that you will
# get out the same storage from the other end. However, the CUDA caching
# allocator makes it difficult to preserve this invariant. Consider
# the following situation: a tensor of size 0x100 points to offset 0x20 of
# a storage at 0xA100 of size 0x100. (For simplicity, all of these
# sizes are given in bytes). HOWEVER, with the caching allocator, this storage
# might be part of a larger cudaMalloc allocation 0xA000 of size 0x4000.
#
# When we want to send this CUDA tensor over IPC, we must send the
# *entire* cudaMalloc allocation, i.e., the 0xA000 region, not just
# the storage 0xA100 (because that is what CUDA supports). So, on the
# other end, there simply isn't any way to say, "Wait, you gave me
# a bigger region (0xA000) than the one I wanted (0xA100)".
#
# OK, so if you sent the cudaMalloc allocation, can you just wrap that up as
# one storage itself? No, because this cudaMalloc allocation might contain
# storages of mixed types: float, bytes, double... If you make the entire
# allocation a single storage of a type A, we'll hit an error when constructing
# a tensor of type B on the storage.
#
# cudaIpcMemHandle is an identifier to access the sender cudaMalloc allocation on the
# receiver side. However, cudaIpcMemHandles from each device in a given process may
# only be opened by one context per device per other process.
# If we open and close a memory handle multiples times in a process, CUDA is allowed
# to give it a different address; similarly, once we close the memory, we're not
# allowed to access it(and the storage/tensor built on top of it), even if it is
# still live in the original process. As we cannot make a cudaMalloc allocation
# to a single storage in one go, this requires us to cache the device pointer for
# each cudaIpcMemHandle on C++ side to reconstruct types of storages, while keep
# the old ones alives.
# See [https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html]
#
# This is fine, because all we need to do is to save our position in the allocation,
# and reconstruct storage and tensor from it.
# 0xA000 -> -------CUDA Allocation------
# | |
# | |
# | |
# | |
# 0xA100 -> --------storage1 begin------
# | |
# 0xA120 -> --------tensor1 begin ------
# | |
# | |
# | |
# | |
# | |
# 0xA160 -> --------tensor1 end---------
# | |
# | |
# | |
# 0xA200 -> --------storage1 end--------
# | |
# 0xE000 -> --------CUDA allocation-----
#
# To send tensor1, the following info are required from sender to receiver for
# storage recontruction.
# 1. cudaIpcMemHandle of 0xA000(which can be mapped to a basePtr in receiver process).
# basePtr may not be exactly 0xA000 since it's a different process.
# 2. offset(0xA100) of storage1 in the CUDA allocation.
# 3. size of storage1(0x100).
#
# On receiver side:
# 1. Get the devPtr of the MemHandle to access the memory, reconstruct a storage
# of the same type using (basePtr, offset, size).
# 2. we can reconstruct the tensor on top of the reconstructed storage
# Tensor(size=0x040, offset=0x020, storage=Storage(data=basePtr+0xA100, size=0x0100))
#
# This strategy has a few implications:
#
# 1. When we serialize a CUDA tensor for IPC, we cannot do it all in one
# go (non-compositionally), and this requires to have a global map
# memHandle -> devPtr for each process.
#
# 2. We MUST NOT let the new IPC tensor be resizable. Originally, a resize
# of the storage beyond 0x100 would merely have caused us to do a
# reallocation. You don't really want to do this, but if you did,
# all that would happen is that you would lose IPC sharing. But if
# you do this in the new world, we will happily let you write out of
# bounds of your "allocation", clobbering unrelated data in the cached
# allocator block. BAD!
#
# By the way, in old versions of PyTorch, we supported this situation
# natively using a "storage view", which permitted multiple storages to be
# views on each other. But this was the *only* use of storage views, so we
# eliminated it so that we could just use tensor views to implement the same
# thing.
#
# TODO: Handle distinguishing between subclass and non-subclass versions of NT better
# https://github.com/pytorch/pytorch/issues/110543
from torch.nested._internal.nested_tensor import NestedTensor
if tensor.is_nested and not isinstance(tensor, NestedTensor):
return reduce_nested_tensor(tensor)
if tensor.layout in {
torch.sparse_coo,
torch.sparse_csr,
torch.sparse_bsr,
torch.sparse_csc,
torch.sparse_bsc,
}:
return reduce_sparse_tensor(tensor)
storage = tensor._typed_storage()
if storage._untyped_storage.device.type == "cuda":
(
device,
handle,
storage_size_bytes,
storage_offset_bytes,
ref_counter_handle,
ref_counter_offset,
event_handle,
event_sync_required,
) = storage._share_cuda_()
tensor_offset = tensor.storage_offset()
shared_cache[handle] = StorageWeakRef(storage)
# _backward_hooks purposely omitted here, see
# Note [Don't serialize hooks]
return (
rebuild_cuda_tensor,
(
type(tensor),
tensor.size(),
tensor.stride(),
tensor_offset, # tensor offset in its storage
type(storage),
tensor.dtype,
device,
handle, # identifier which CUDA allocation is the storage in.
storage_size_bytes, # size(in bytes) of the storage
storage_offset_bytes, # offset(in bytes) of the storage in the CUDA allocation
tensor.requires_grad,
ref_counter_handle,
ref_counter_offset,
event_handle,
event_sync_required,
),
)
elif storage._untyped_storage.device.type == "meta":
return (
rebuild_meta_tensor,
(
type(tensor),
tensor.size(),
tensor.stride(),
tensor.storage_offset(),
tensor.dtype,
tensor.untyped_storage().size(),
tensor.requires_grad,
),
)
# _backward_hooks purposely omitted here, see Note [Don't serialize hooks]
metadata = (
tensor.storage_offset(),
tensor.size(),
tensor.stride(),
tensor.requires_grad,
)
return (rebuild_tensor, (type(tensor), storage, metadata))
def rebuild_nested_tensor(
rebuild_buffer_func,
rebuild_buffer_args,
rebuild_sizes_func,
rebuild_sizes_args,
rebuild_strides_func,
rebuild_strides_args,
rebuild_offsets_func,
rebuild_offsets_args,
):
buffer = rebuild_buffer_func(*rebuild_buffer_args)
sizes = rebuild_sizes_func(*rebuild_sizes_args)
strides = rebuild_strides_func(*rebuild_strides_args)
offsets = rebuild_offsets_func(*rebuild_offsets_args)
return torch._nested_view_from_buffer_copy(buffer, sizes, strides, offsets)
def reduce_nested_tensor(nt):
rebuild_buffer_func, rebuild_buffer_args = reduce_tensor(nt.values())
rebuild_sizes_func, rebuild_sizes_args = reduce_tensor(nt._nested_tensor_size())
rebuild_strides_func, rebuild_strides_args = reduce_tensor(
nt._nested_tensor_strides()
)
rebuild_offsets_func, rebuild_offsets_args = reduce_tensor(
nt._nested_tensor_storage_offsets()
)
return (
rebuild_nested_tensor,
(
rebuild_buffer_func,
rebuild_buffer_args,
rebuild_sizes_func,
rebuild_sizes_args,
rebuild_strides_func,
rebuild_strides_args,
rebuild_offsets_func,
rebuild_offsets_args,
),
)
def rebuild_sparse_coo_tensor(
rebuild_indices_func,
rebuild_indices_args,
rebuild_values_func,
rebuild_values_args,
shape,
is_coalesced,
):
indices = rebuild_indices_func(*rebuild_indices_args)
values = rebuild_values_func(*rebuild_values_args)
return torch.sparse_coo_tensor(indices, values, shape, is_coalesced=is_coalesced)
def rebuild_sparse_compressed_tensor(
rebuild_compressed_indices_func,
rebuild_compressed_indices_args,
rebuild_plain_indices_func,
rebuild_plain_indices_args,
rebuild_values_func,
rebuild_values_args,
shape,
layout,
):
compressed_indices = rebuild_compressed_indices_func(
*rebuild_compressed_indices_args
)
plain_indices = rebuild_plain_indices_func(*rebuild_plain_indices_args)
values = rebuild_values_func(*rebuild_values_args)
return torch.sparse_compressed_tensor(
compressed_indices, plain_indices, values, shape, layout=layout
)
def reduce_sparse_tensor(sparse):
if sparse.layout is torch.sparse_coo:
rebuild_indices_func, rebuild_indices_args = reduce_tensor(sparse._indices())
rebuild_values_func, rebuild_values_args = reduce_tensor(sparse._values())
return (
rebuild_sparse_coo_tensor,
(
rebuild_indices_func,
rebuild_indices_args,
rebuild_values_func,
rebuild_values_args,
sparse.shape,
sparse.is_coalesced(),
),
)
else:
if sparse.layout in {torch.sparse_csr, torch.sparse_bsr}:
compressed_indices = sparse.crow_indices()
plain_indices = sparse.col_indices()
elif sparse.layout in {torch.sparse_csc, torch.sparse_bsc}:
compressed_indices = sparse.ccol_indices()
plain_indices = sparse.row_indices()
else:
raise NotImplementedError(sparse.layout)
(
rebuild_compressed_indices_func,
rebuild_compressed_indices_args,
) = reduce_tensor(compressed_indices)
rebuild_plain_indices_func, rebuild_plain_indices_args = reduce_tensor(
plain_indices
)
rebuild_values_func, rebuild_values_args = reduce_tensor(sparse.values())
return (
rebuild_sparse_compressed_tensor,
(
rebuild_compressed_indices_func,
rebuild_compressed_indices_args,
rebuild_plain_indices_func,
rebuild_plain_indices_args,
rebuild_values_func,
rebuild_values_args,
sparse.shape,
sparse.layout,
),
)
def fd_id(fd):
# Returns a tuple which uniquely identifies a file descriptor. In Mac OS,
# this doesn't work with shared memory handles, which is why we don't
# support the "file_descriptor" sharing method on that platform.
stat = os.fstat(fd)
return (stat.st_ino, stat.st_dev)
def storage_from_cache(cls, key):
storage_ref = shared_cache.get(key)
if storage_ref is None:
return None
return torch.UntypedStorage._new_with_weak_ptr(storage_ref.cdata)
def rebuild_storage_fd(cls, df, size):
fd = df.detach()
try:
storage = storage_from_cache(cls, fd_id(fd))
if storage is not None:
return storage
storage = cls._new_shared_fd_cpu(fd, size)
shared_cache[fd_id(fd)] = StorageWeakRef(storage)
return storage
finally:
os.close(fd)
def rebuild_storage_filename(cls, manager, handle, size, dtype=None):
storage: Union[torch.TypedStorage, torch.UntypedStorage] = storage_from_cache(
cls, handle
)
if storage is not None:
return storage._shared_decref()
if dtype is None:
storage = torch.UntypedStorage._new_shared_filename_cpu(manager, handle, size)
else:
byte_size = size * torch._utils._element_size(dtype)
untyped_storage: torch.UntypedStorage = (
torch.UntypedStorage._new_shared_filename_cpu(manager, handle, byte_size)
)
storage = torch.TypedStorage(
wrap_storage=untyped_storage, dtype=dtype, _internal=True
)
shared_cache[handle] = StorageWeakRef(storage)
return storage._shared_decref()
def rebuild_storage_empty(cls):
return cls()
def rebuild_typed_storage(storage, dtype):
return torch.storage.TypedStorage(wrap_storage=storage, dtype=dtype, _internal=True)
# Use for torch.storage.TypedStorage
def reduce_typed_storage(storage):
return (rebuild_typed_storage, (storage._untyped_storage, storage.dtype))
def rebuild_typed_storage_child(storage, storage_type):
return storage_type(wrap_storage=storage, _internal=True)
# Use for child classes of torch.storage.TypedStorage, like torch.FloatStorage
def reduce_typed_storage_child(storage):
return (rebuild_typed_storage_child, (storage._untyped_storage, type(storage)))
def reduce_storage(storage):
from . import get_sharing_strategy
if storage.is_cuda:
raise RuntimeError(
"Cannot pickle CUDA storage; try pickling a CUDA tensor instead"
)
elif storage.device.type == "meta":
raise RuntimeError(
"Cannot pickle meta storage; try pickling a meta tensor instead"
)
elif get_sharing_strategy() == "file_system":
metadata = storage._share_filename_cpu_()
cache_key = metadata[1]
rebuild = rebuild_storage_filename
if isinstance(storage, torch.TypedStorage):
metadata += (storage.dtype,)
storage._shared_incref()
elif storage.size() == 0:
# This is special cased because Empty tensors
# (with size 0) cannot be mmapped.
return (rebuild_storage_empty, (type(storage),))
else:
fd, size = storage._share_fd_cpu_()
df = multiprocessing.reduction.DupFd(fd)
cache_key = fd_id(fd)
metadata = (df, size)
rebuild = rebuild_storage_fd # type: ignore[assignment]
shared_cache[cache_key] = StorageWeakRef(storage)
return (rebuild, (type(storage),) + metadata)
def init_reductions():
reduction.register(torch.cuda.Event, reduce_event)
for t in torch._storage_classes:
if t.__name__ == "UntypedStorage":
reduction.register(t, reduce_storage)
else:
reduction.register(t, reduce_typed_storage_child)
reduction.register(torch.storage.TypedStorage, reduce_typed_storage)
for t in torch._tensor_classes:
reduction.register(t, reduce_tensor)
# TODO: Maybe this should be in tensor_classes? :)
reduction.register(torch.Tensor, reduce_tensor)
from torch.nn.parameter import Parameter
reduction.register(Parameter, reduce_tensor)
```
|
======================================================================================================================
SOURCE CODE FILE: spawn.py
LINES: 6
SIZE: 12.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\multiprocessing\spawn.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import logging
import multiprocessing
import multiprocessing.connection
import os
import pickle
import signal
import sys
import tempfile
import time
import warnings
from concurrent.futures import as_completed, ThreadPoolExecutor
from typing import Optional
from . import _prctl_pr_set_pdeathsig # type: ignore[attr-defined]
ENV_VAR_PARALLEL_START = "TORCH_MP_PARALLEL_START"
log = logging.getLogger(__name__)
__all__ = [
"ProcessContext",
"ProcessException",
"ProcessExitedException",
"ProcessRaisedException",
"spawn",
"SpawnContext",
"start_processes",
]
class ProcessException(Exception):
__slots__ = ["error_index", "error_pid"]
def __init__(self, msg: str, error_index: int, pid: int):
super().__init__(msg)
self.msg = msg
self.error_index = error_index
self.pid = pid
def __reduce__(self):
return type(self), (self.msg, self.error_index, self.pid)
class ProcessRaisedException(ProcessException):
"""Exception raised when a process failed due to an exception raised by the code."""
def __init__(
self,
msg: str,
error_index: int,
error_pid: int,
):
super().__init__(msg, error_index, error_pid)
class ProcessExitedException(ProcessException):
"""Exception raised when a process failed due to signal or exited with a specific code."""
__slots__ = ["exit_code"]
def __init__(
self,
msg: str,
error_index: int,
error_pid: int,
exit_code: int,
signal_name: Optional[str] = None,
):
super().__init__(msg, error_index, error_pid)
self.exit_code = exit_code
self.signal_name = signal_name
def __reduce__(self):
return (
type(self),
(self.msg, self.error_index, self.pid, self.exit_code, self.signal_name),
)
def _wrap(fn, i, args, error_file):
# prctl(2) is a Linux specific system call.
# On other systems the following function call has no effect.
# This is set to ensure that non-daemonic child processes can
# terminate if their parent terminates before they do.
_prctl_pr_set_pdeathsig(signal.SIGINT)
try:
fn(i, *args)
except KeyboardInterrupt:
pass # SIGINT; Killed by parent, do nothing
except Exception:
# Propagate exception to parent process, keeping original traceback
import traceback
with open(error_file, "wb") as fh:
pickle.dump(traceback.format_exc(), fh)
sys.exit(1)
class ProcessContext:
def __init__(self, processes, error_files):
self.error_files = error_files
self.processes = processes
self.sentinels = {
process.sentinel: index for index, process in enumerate(processes)
}
def pids(self):
return [int(process.pid) for process in self.processes]
def _join_procs_with_timeout(self, timeout: float):
"""Attempt to join all processes with a shared timeout."""
end = time.monotonic() + timeout
for process in self.processes:
time_to_wait = max(0, end - time.monotonic())
process.join(time_to_wait)
def join(
self, timeout: Optional[float] = None, grace_period: Optional[float] = None
):
r"""Join one or more processes within spawn context.
Attempt to join one or more processes in this spawn context.
If one of them exited with a non-zero exit status, this function
kills the remaining processes (optionally with a grace period)
and raises an exception with the cause of the first process exiting.
Returns ``True`` if all processes have been joined successfully,
``False`` if there are more processes that need to be joined.
Args:
timeout (float): Wait this long (in seconds) before giving up on waiting.
grace_period (float): When any processes fail, wait this long (in seconds)
for others to shutdown gracefully before terminating them. If they
still don't exit, wait another grace period before killing them.
"""
# Ensure this function can be called even when we're done.
if len(self.sentinels) == 0:
return True
# Wait for any process to fail or all of them to succeed.
ready = multiprocessing.connection.wait(
self.sentinels.keys(),
timeout=timeout,
)
error_index = None
for sentinel in ready:
index = self.sentinels.pop(sentinel)
process = self.processes[index]
process.join()
if process.exitcode != 0:
error_index = index
break
# Return if there was no error.
if error_index is None:
# Return whether or not all processes have been joined.
return len(self.sentinels) == 0
# An error occurred. Clean-up all processes before returning.
# First, allow a grace period for processes to shutdown themselves.
if grace_period is not None:
self._join_procs_with_timeout(grace_period)
# Then, terminate processes that are still alive. Try SIGTERM first.
for process in self.processes:
if process.is_alive():
log.warning("Terminating process %s via signal SIGTERM", process.pid)
process.terminate()
# Try SIGKILL if the process isn't going down after another grace_period.
# The reason is related to python signal handling is limited
# to main thread and if that is in c/c++ land and stuck it won't
# to handle it. We have seen processes getting stuck not handling
# SIGTERM for the above reason.
self._join_procs_with_timeout(30 if grace_period is None else grace_period)
for process in self.processes:
if process.is_alive():
log.warning(
"Unable to shutdown process %s via SIGTERM , forcefully exiting via SIGKILL",
process.pid,
)
process.kill()
process.join()
# The file will only be created if the process crashed.
failed_process = self.processes[error_index]
if not os.access(self.error_files[error_index], os.R_OK):
exitcode = self.processes[error_index].exitcode
if exitcode < 0:
try:
name = signal.Signals(-exitcode).name
except ValueError:
name = f"<Unknown signal {-exitcode}>"
raise ProcessExitedException(
f"process {error_index:d} terminated with signal {name}",
error_index=error_index,
error_pid=failed_process.pid,
exit_code=exitcode,
signal_name=name,
)
else:
raise ProcessExitedException(
f"process {error_index:d} terminated with exit code {exitcode:d}",
error_index=error_index,
error_pid=failed_process.pid,
exit_code=exitcode,
)
with open(self.error_files[error_index], "rb") as fh:
original_trace = pickle.load(fh)
msg = f"\n\n-- Process {error_index:d} terminated with the following error:\n"
msg += original_trace
raise ProcessRaisedException(msg, error_index, failed_process.pid)
class SpawnContext(ProcessContext):
def __init__(self, processes, error_files):
warnings.warn("SpawnContext is renamed to ProcessContext since 1.4 release.")
super().__init__(processes, error_files)
# Note: [start_processes]
# mp.start_processes handles both start_method='spawn' and 'fork'. It's supposed to be a
# more generalized API than mp.spawn. Currently we only document mp.spawn as it's the
# CUDA compatible start_method. However, in environments like Ipython notebooks, 'fork'
# works better than 'spawn'. Every helper function we created for mp.spawn is indeed
# general enough, and backends like XLA can reuse them in Colab notebooks as well.
# Currently we only add this API first, we can consider adding it to documentation as
# needed in the future.
def start_processes(
fn,
args=(),
nprocs=1,
join=True,
daemon=False,
start_method="spawn",
):
# To speed up performance in certain cases (see https://github.com/pytorch/pytorch/issues/133010),
# this func will start processes in parallel if start_method is 'forkserver'.
# Please opt in to this perf optimization by setting env var (TORCH_MP_PARALLEL_START) to 1.
# todo: investigate why spawn does not work with threadpool and raises SIGINT
if (
start_method == "forkserver"
and os.environ.get(ENV_VAR_PARALLEL_START, "0") == "1"
):
log.info("Starting processes in parallel.")
start_parallel = True
else:
# Set env var TORCH_MP_PARALLEL_START to 0 to disable parallel start
start_parallel = False
mp = multiprocessing.get_context(start_method)
error_files = [None] * nprocs
processes = [None] * nprocs
def start_process(i):
# Each process is assigned a file to write tracebacks to. We
# use the file being non-empty to indicate an exception
# occurred (vs an expected shutdown). Note: this previously
# used a multiprocessing.Queue but that can be prone to
# deadlocks, so we went with a simpler solution for a one-shot
# message between processes.
tf = tempfile.NamedTemporaryFile(
prefix="pytorch-errorfile-", suffix=".pickle", delete=False
)
tf.close()
os.unlink(tf.name)
process = mp.Process(
target=_wrap,
args=(fn, i, args, tf.name),
daemon=daemon,
)
process.start()
return i, process, tf.name
if not start_parallel:
for i in range(nprocs):
idx, process, tf_name = start_process(i)
error_files[idx] = tf_name
processes[idx] = process
else:
with ThreadPoolExecutor(max_workers=nprocs) as executor:
futures = [executor.submit(start_process, i) for i in range(nprocs)]
for fut in as_completed(futures):
idx, process, tf_name = fut.result()
# idx and process rank needs to be the same.
error_files[idx] = tf_name
processes[idx] = process
context = ProcessContext(processes, error_files)
if not join:
return context
# Loop on join until it returns True or raises an exception.
while not context.join():
pass
def spawn(fn, args=(), nprocs=1, join=True, daemon=False, start_method="spawn"):
r"""Spawns ``nprocs`` processes that run ``fn`` with ``args``.
If one of the processes exits with a non-zero exit status, the
remaining processes are killed and an exception is raised with the
cause of termination. In the case an exception was caught in the
child process, it is forwarded and its traceback is included in
the exception raised in the parent process.
Args:
fn (function): Function is called as the entrypoint of the
spawned process. This function must be defined at the top
level of a module so it can be pickled and spawned. This
is a requirement imposed by multiprocessing.
The function is called as ``fn(i, *args)``, where ``i`` is
the process index and ``args`` is the passed through tuple
of arguments.
args (tuple): Arguments passed to ``fn``.
nprocs (int): Number of processes to spawn.
join (bool): Perform a blocking join on all processes.
daemon (bool): The spawned processes' daemon flag. If set to True,
daemonic processes will be created.
start_method (str): (deprecated) this method will always use ``spawn``
as the start method. To use a different start method
use ``start_processes()``.
Returns:
None if ``join`` is ``True``,
:class:`~ProcessContext` if ``join`` is ``False``
"""
if start_method != "spawn":
msg = (
f"This method only supports start_method=spawn (got: {start_method}).\n"
"To use a different start_method use:\n\t\t"
" torch.multiprocessing.start_processes(...)"
)
warnings.warn(msg, FutureWarning, stacklevel=2)
return start_processes(fn, args, nprocs, join, daemon, start_method="spawn")
```
|
================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 21.90 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nested\__init__.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from typing import Optional, Union
import torch
import torch.nn.functional as F
from torch import SymInt, Tensor
from torch._C import _add_docstr, _nested # type: ignore[attr-defined]
from torch.types import _device as Device, _dtype as DType
__all__ = [
"to_padded_tensor",
"as_nested_tensor",
"nested_tensor",
"nested_tensor_from_jagged",
"narrow",
"masked_select",
]
# Allowlist these for weights_only load of NJT
from ._internal.nested_tensor import _rebuild_njt, NestedTensor as _NestedTensor
torch.serialization.add_safe_globals([_NestedTensor, _rebuild_njt])
def as_nested_tensor(
ts: Union[Tensor, list[Tensor], tuple[Tensor, ...]],
dtype: Optional[DType] = None,
device: Optional[Device] = None,
layout=None,
) -> Tensor:
r"""
Constructs a nested tensor preserving autograd history from a tensor or a list / tuple of
tensors.
If a nested tensor is passed, it will be returned directly unless the device / dtype / layout
differ. Note that converting device / dtype will result in a copy, while converting layout
is not currently supported by this function.
If a non-nested tensor is passed, it is treated as a batch of constituents of consistent size.
A copy will be incurred if the passed device / dtype differ from those of the input OR if
the input is non-contiguous. Otherwise, the input's storage will be used directly.
If a tensor list is provided, tensors in the list are always copied during construction of
the nested tensor.
Args:
ts (Tensor or List[Tensor] or Tuple[Tensor]): a tensor to treat as a nested tensor OR a
list / tuple of tensors with the same ndim
Keyword arguments:
dtype (:class:`torch.dtype`, optional): the desired type of returned nested tensor.
Default: if None, same :class:`torch.dtype` as leftmost tensor in the list.
device (:class:`torch.device`, optional): the desired device of returned nested tensor.
Default: if None, same :class:`torch.device` as leftmost tensor in the list
layout (:class:`torch.layout`, optional): the desired layout of returned nested tensor.
Only strided and jagged layouts are supported. Default: if None, the strided layout.
Example::
>>> a = torch.arange(3, dtype=torch.float, requires_grad=True)
>>> b = torch.arange(5, dtype=torch.float, requires_grad=True)
>>> nt = torch.nested.as_nested_tensor([a, b])
>>> nt.is_leaf
False
>>> fake_grad = torch.nested.nested_tensor([torch.ones_like(a), torch.zeros_like(b)])
>>> nt.backward(fake_grad)
>>> a.grad
tensor([1., 1., 1.])
>>> b.grad
tensor([0., 0., 0., 0., 0.])
>>> c = torch.randn(3, 5, requires_grad=True)
>>> nt2 = torch.nested.as_nested_tensor(c)
"""
is_tensor_list = isinstance(ts, (list, tuple)) and all(
isinstance(t, Tensor) for t in ts
)
if not isinstance(ts, Tensor) and not is_tensor_list:
raise TypeError(
"as_nested_tensor(): Expected first argument to be a tensor or a list / tuple of tensors "
)
# convert tuple -> list if needed
if is_tensor_list and not isinstance(ts, list):
ts = list(ts)
if isinstance(ts, Tensor) and ts.dim() < 2:
raise RuntimeError(
"as_nested_tensor(): Expected tensor argument to have dim() > 1"
)
if isinstance(ts, Tensor) and ts.is_nested:
if layout == ts.layout:
# return input directly or input copied to device / dtype
return ts.to(device=device, dtype=dtype)
else:
# TODO: Just use nt.to(layout=layout) when it exists.
raise RuntimeError(
"as_nested_tensor(): Converting between nested tensor layouts is not supported"
)
if layout is None:
layout = torch.strided
if layout == torch.strided:
if isinstance(ts, Tensor):
# contiguous() might be necessary to get flattened view.
# we could probably be more precise about when to do this as an optimization
buffer = ts.contiguous().view(-1).to(device=device, dtype=dtype)
nested_sizes = torch.tensor([t.shape for t in ts])
return torch._nested_view_from_buffer(
buffer,
nested_sizes,
*torch._nested_compute_contiguous_strides_offsets(nested_sizes),
)
else:
assert isinstance(ts, list)
return torch._nested_tensor_from_tensor_list(ts, dtype, None, device, None)
elif layout == torch.jagged:
if isinstance(ts, Tensor):
if device is None:
device = ts.device
# contiguous() might be necessary to get flattened view.
# we could probably be more precise about when to do this as an optimization
values = ts.contiguous().flatten(0, 1).to(device=device, dtype=dtype)
batch_size = ts.shape[0]
seq_len = ts.shape[1]
offsets = torch.arange(
0, batch_size * seq_len + 1, seq_len, device=device, dtype=torch.int64
)
from torch.nested._internal.nested_tensor import (
nested_view_from_values_offsets,
)
return nested_view_from_values_offsets(
values, offsets, min_seqlen=seq_len, max_seqlen=seq_len
)
else:
from torch.nested._internal.nested_tensor import jagged_from_list
assert isinstance(ts, list)
nt, _ = jagged_from_list(ts, offsets=None, device=device, dtype=dtype)
return nt
else:
raise RuntimeError(
f"Specified layout is unsupported for nested tensors: {layout}"
)
# Note: This not only adds doc strings for the nested ops, but
# also connects the torch.nested Python namespace to the torch._C._nested builtins.
to_padded_tensor = _add_docstr(
_nested.nested_to_padded_tensor,
r"""
to_padded_tensor(input, padding, output_size=None, out=None) -> Tensor
Returns a new (non-nested) Tensor by padding the :attr:`input` nested tensor.
The leading entries will be filled with the nested data,
while the trailing entries will be padded.
.. warning::
:func:`to_padded_tensor` always copies the underlying data,
since the nested and the non-nested tensors differ in memory layout.
Args:
padding (float): The padding value for the trailing entries.
Keyword args:
output_size (Tuple[int]): The size of the output tensor.
If given, it must be large enough to contain all nested data;
else, will infer by taking the max size of each nested sub-tensor along each dimension.
out (Tensor, optional): the output tensor.
Example::
>>> nt = torch.nested.nested_tensor([torch.randn((2, 5)), torch.randn((3, 4))])
nested_tensor([
tensor([[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276],
[-1.9967, -1.0054, 1.8972, 0.9174, -1.4995]]),
tensor([[-1.8546, -0.7194, -0.2918, -0.1846],
[ 0.2773, 0.8793, -0.5183, -0.6447],
[ 1.8009, 1.8468, -0.9832, -1.5272]])
])
>>> pt_infer = torch.nested.to_padded_tensor(nt, 0.0)
tensor([[[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276],
[-1.9967, -1.0054, 1.8972, 0.9174, -1.4995],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]],
[[-1.8546, -0.7194, -0.2918, -0.1846, 0.0000],
[ 0.2773, 0.8793, -0.5183, -0.6447, 0.0000],
[ 1.8009, 1.8468, -0.9832, -1.5272, 0.0000]]])
>>> pt_large = torch.nested.to_padded_tensor(nt, 1.0, (2, 4, 6))
tensor([[[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276, 1.0000],
[-1.9967, -1.0054, 1.8972, 0.9174, -1.4995, 1.0000],
[ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000],
[ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]],
[[-1.8546, -0.7194, -0.2918, -0.1846, 1.0000, 1.0000],
[ 0.2773, 0.8793, -0.5183, -0.6447, 1.0000, 1.0000],
[ 1.8009, 1.8468, -0.9832, -1.5272, 1.0000, 1.0000],
[ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]]])
>>> pt_small = torch.nested.to_padded_tensor(nt, 2.0, (2, 2, 2))
RuntimeError: Value in output_size is less than NestedTensor padded size. Truncation is not supported.
""",
)
def nested_tensor(
tensor_list,
*,
dtype=None,
layout=None,
device=None,
requires_grad=False,
pin_memory=False,
) -> Tensor:
r"""
Constructs a nested tensor with no autograd history (also known as a "leaf tensor", see
:ref:`Autograd mechanics <autograd-mechanics>`) from :attr:`tensor_list` a list of tensors.
Args:
tensor_list (List[array_like]): a list of tensors, or anything that can be passed to torch.tensor,
where each element of the list has the same dimensionality.
Keyword arguments:
dtype (:class:`torch.dtype`, optional): the desired type of returned nested tensor.
Default: if None, same :class:`torch.dtype` as leftmost tensor in the list.
layout (:class:`torch.layout`, optional): the desired layout of returned nested tensor.
Only strided and jagged layouts are supported. Default: if None, the strided layout.
device (:class:`torch.device`, optional): the desired device of returned nested tensor.
Default: if None, same :class:`torch.device` as leftmost tensor in the list
requires_grad (bool, optional): If autograd should record operations on the
returned nested tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned nested tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> a = torch.arange(3, dtype=torch.float, requires_grad=True)
>>> b = torch.arange(5, dtype=torch.float, requires_grad=True)
>>> nt = torch.nested.nested_tensor([a, b], requires_grad=True)
>>> nt.is_leaf
True
"""
if layout is None:
layout = torch.strided
if layout == torch.strided:
return _nested.nested_tensor(
tensor_list,
dtype=dtype,
device=device,
requires_grad=requires_grad,
pin_memory=pin_memory,
)
elif layout == torch.jagged:
# Need to wrap lists of scalars as tensors
list_of_tensors = [
t if isinstance(t, Tensor) else torch.as_tensor(t) for t in tensor_list
]
from torch.nested._internal.nested_tensor import jagged_from_list
with torch.no_grad():
nt, _ = jagged_from_list(
list_of_tensors, offsets=None, device=device, dtype=dtype
)
nt.requires_grad_(requires_grad)
if pin_memory:
nt = nt.pin_memory() # type: ignore[assignment]
return nt
else:
raise RuntimeError(
f"Specified layout is unsupported for nested tensors: {layout}"
)
def narrow(
tensor: Tensor,
dim: int,
start: Union[int, Tensor],
length: Union[int, Tensor],
layout=torch.strided,
) -> Tensor:
r"""
Constructs a nested tensor (which might be a view) from :attr:`tensor`, a strided tensor. This follows
similar semantics to torch.Tensor.narrow, where in the :attr:`dim`-th dimension the new nested tensor
shows only the elements in the interval `[start, start+length)`. As nested representations
allow for a different `start` and `length` at each 'row' of that dimension, :attr:`start` and :attr:`length`
can also be tensors of shape `tensor.shape[0]`.
There's some differences depending on the layout you use for the nested tensor. If using strided layout,
torch.narrow will do a copy of the narrowed data into a contiguous NT with strided layout, while
jagged layout narrow() will create a non-contiguous view of your original strided tensor. This particular
representation is really useful for representing kv-caches in Transformer models, as specialized
SDPA kernels can deal with format easily, resulting in performance improvements.
Args:
tensor (:class:`torch.Tensor`): a strided tensor, which will be used as the underlying data
for the nested tensor if using the jagged layout or will be copied for the strided layout.
dim (int): the dimension where narrow will be applied. Only `dim=1` is supported for the
jagged layout, while strided supports all dim
start (Union[int, :class:`torch.Tensor`]): starting element for the narrow operation
length (Union[int, :class:`torch.Tensor`]): number of elements taken during the narrow op
Keyword arguments:
layout (:class:`torch.layout`, optional): the desired layout of returned nested tensor.
Only strided and jagged layouts are supported. Default: if None, the strided layout.
Example::
>>> starts = torch.tensor([0, 1, 2, 3, 4], dtype=torch.int64)
>>> lengths = torch.tensor([3, 2, 2, 1, 5], dtype=torch.int64)
>>> narrow_base = torch.randn(5, 10, 20)
>>> nt_narrowed = torch.nested.narrow(narrow_base, 1, starts, lengths, layout=torch.jagged)
>>> nt_narrowed.is_contiguous()
False
"""
if not isinstance(start, (int, SymInt, Tensor)):
raise RuntimeError("start must be an integer or a tensor")
if not isinstance(length, (int, SymInt, Tensor)):
raise RuntimeError("length must be an integer or a tensor")
if layout == torch.strided:
if isinstance(start, Tensor) or isinstance(length, Tensor):
raise RuntimeError(
"start and length must be integers for the strided layout NT impl"
)
# TODO: switch to as_nested_tensor(tensor) when it is available
nt = as_nested_tensor(torch.unbind(tensor), layout=torch.strided).narrow(
dim, start, length
)
elif layout == torch.jagged:
if dim != 1:
raise RuntimeError("jagged layout only supports dim=1")
from torch.nested._internal.nested_tensor import jagged_from_tensor_and_lengths
if isinstance(start, (int, SymInt)):
start = torch.tensor([start], device=tensor.device, dtype=torch.int64)
if isinstance(length, (int, SymInt)):
length = torch.tensor([length], device=tensor.device, dtype=torch.int64)
nt, _, _ = jagged_from_tensor_and_lengths(tensor, start, length)
else:
raise RuntimeError(
f"Specified layout is unsupported for nested narrow: {layout}"
)
return nt
def nested_tensor_from_jagged(
values: Tensor,
offsets: Optional[Tensor] = None,
lengths: Optional[Tensor] = None,
jagged_dim: Optional[int] = None,
min_seqlen: Optional[int] = None,
max_seqlen: Optional[int] = None,
) -> Tensor:
r"""
Constructs a jagged layout nested tensor from the given jagged components. The jagged layout
consists of a required values buffer with the jagged dimension packed into a single dimension.
The offsets / lengths metadata determines how this dimension is split into batch elements
and are expected to be allocated on the same device as the values buffer.
Expected metadata formats:
* offsets: Indices within the packed dimension splitting it into heterogeneously-sized
batch elements. Example: [0, 2, 3, 6] indicates that a packed jagged dim of size 6
should be conceptually split into batch elements of length [2, 1, 3]. Note that both the
beginning and ending offsets are required for kernel convenience (i.e. shape batch_size + 1).
* lengths: Lengths of the individual batch elements; shape == batch_size. Example: [2, 1, 3]
indicates that a packed jagged dim of size 6 should be conceptually split into batch
elements of length [2, 1, 3].
Note that it can be useful to provide both offsets and lengths. This describes a nested tensor
with "holes", where the offsets indicate the start position of each batch item and the length
specifies the total number of elements (see example below).
The returned jagged layout nested tensor will be a view of the input values tensor.
Args:
values (:class:`torch.Tensor`): The underlying buffer in the shape of
(sum_B(*), D_1, ..., D_N). The jagged dimension is packed into a single dimension,
with the offsets / lengths metadata used to distinguish batch elements.
offsets (optional :class:`torch.Tensor`): Offsets into the jagged dimension of shape B + 1.
lengths (optional :class:`torch.Tensor`): Lengths of the batch elements of shape B.
jagged_dim (optional int): Indicates which dimension in values is the packed jagged
dimension. If None, this is set to dim=1 (i.e. the dimension immediately following
the batch dimension). Default: None
min_seqlen (optional int): If set, uses the specified value as the cached minimum sequence
length for the returned nested tensor. This can be a useful alternative to computing
this value on-demand, possibly avoiding a GPU -> CPU sync. Default: None
max_seqlen (optional int): If set, uses the specified value as the cached maximum sequence
length for the returned nested tensor. This can be a useful alternative to computing
this value on-demand, possibly avoiding a GPU -> CPU sync. Default: None
Example::
>>> values = torch.randn(12, 5)
>>> offsets = torch.tensor([0, 3, 5, 6, 10, 12])
>>> nt = nested_tensor_from_jagged(values, offsets)
>>> # 3D shape with the middle dimension jagged
>>> nt.shape
torch.Size([5, j2, 5])
>>> # Length of each item in the batch:
>>> offsets.diff()
tensor([3, 2, 1, 4, 2])
>>> values = torch.randn(6, 5)
>>> offsets = torch.tensor([0, 2, 3, 6])
>>> lengths = torch.tensor([1, 1, 2])
>>> # NT with holes
>>> nt = nested_tensor_from_jagged(values, offsets, lengths)
>>> a, b, c = nt.unbind()
>>> # Batch item 1 consists of indices [0, 1)
>>> torch.equal(a, values[0:1, :])
True
>>> # Batch item 2 consists of indices [2, 3)
>>> torch.equal(b, values[2:3, :])
True
>>> # Batch item 3 consists of indices [3, 5)
>>> torch.equal(c, values[3:5, :])
True
"""
from torch.fx._symbolic_trace import is_fx_tracing
if is_fx_tracing():
raise RuntimeError(
"torch.nested.nested_tensor_from_jagged does not support tracing with fx.symbolic_trace. "
"Use fx.wrap to wrap the function that calls nested_tensor_from_jagged."
)
if offsets is None:
if lengths is None:
raise RuntimeError(
"nested_tensor_from_jagged(): At least one of offsets or lengths is required."
)
else:
# TODO: Truly support offsets=None at some point?
# For now, just convert lengths -> offsets for kernel convenience
offsets = F.pad(lengths.cumsum(0), (1, 0))
lengths = None
if jagged_dim is None:
jagged_dim = 1
from torch.nested._internal.nested_tensor import (
nested_view_from_values_offsets_lengths,
)
return nested_view_from_values_offsets_lengths(
values,
offsets,
lengths,
ragged_idx=jagged_dim,
min_seqlen=min_seqlen,
max_seqlen=max_seqlen,
)
def masked_select(tensor: Tensor, mask: Tensor) -> Tensor:
r"""
Constructs a nested tensor given a strided tensor input and a strided mask, the resulting jagged layout nested tensor
will have values retain values where the mask is equal to True. The dimensionality of the mask is preserved and is
represented with the offsets, this is unlike :func:`masked_select` where the output is collapsed to a 1D tensor.
Args:
tensor (:class:`torch.Tensor`): a strided tensor from which the jagged layout nested tensor is constructed from.
mask (:class:`torch.Tensor`): a strided mask tensor which is applied to the tensor input
Example::
>>> tensor = torch.randn(3, 3)
>>> mask = torch.tensor([[False, False, True], [True, False, True], [False, False, True]])
>>> nt = torch.nested.masked_select(tensor, mask)
>>> nt.shape
torch.Size([3, j4])
>>> # Length of each item in the batch:
>>> nt.offsets().diff()
tensor([1, 2, 1])
>>> tensor = torch.randn(6, 5)
>>> mask = torch.tensor([False])
>>> nt = torch.nested.masked_select(tensor, mask)
>>> nt.shape
torch.Size([6, j5])
>>> # Length of each item in the batch:
>>> nt.offsets().diff()
tensor([0, 0, 0, 0, 0, 0])
"""
if tensor.layout != torch.strided:
raise RuntimeError(
f"torch.nested.masked_select requires a strided tensor, given {tensor.layout}"
)
if mask.layout != torch.strided:
raise RuntimeError(
f"torch.nested.masked_select requires a strided mask, given: {mask.layout}"
)
res_values = tensor.masked_select(mask)
expanded_mask = mask.expand(tensor.shape)
res_lengths = expanded_mask.sum(dim=tensor.ndim - 1).view(-1)
from torch.nested._internal.nested_tensor import nested_view_from_values_offsets
return nested_view_from_values_offsets(
values=res_values,
offsets=F.pad(res_lengths.cumsum(dim=0), (1, 0)),
)
```
|
==========================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nested\_internal\__init__.py
ENCODING: utf-8
```py
```
|
============================================================================================================================
SOURCE CODE FILE: nested_int.py
LINES: 1
SIZE: 3.23 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nested\_internal\nested_int.py
ENCODING: utf-8
```py
from typing import * # noqa: F403
import torch
from torch.fx.experimental._constant_symnode import ConstantIntNode
__all__ = ["NestedIntNode"]
# Python version of aten/src/ATen/core/NestedIntSymNodeImpl.cpp
def _eq(lhs: Any, rhs: Any) -> bool:
return (
isinstance(lhs, NestedIntNode)
and isinstance(rhs, NestedIntNode)
and lhs.t_id == rhs.t_id
and lhs.coeff == rhs.coeff
)
def _ge(lhs: Any, rhs: Any) -> bool:
if isinstance(rhs, NestedIntNode) and isinstance(lhs, NestedIntNode):
if lhs.t_id == rhs.t_id:
return lhs.coeff >= rhs.coeff
raise ValueError("ge: relation is indeterminate")
elif isinstance(lhs, NestedIntNode):
if rhs.is_constant() and rhs.constant_int() <= 2:
return True
raise ValueError("ge: relation is indeterminate")
elif isinstance(rhs, NestedIntNode):
if lhs.is_constant() and lhs.constant_int() < 2:
return False
raise ValueError("ge: relation is indeterminate")
else:
raise ValueError("inputs unsupported")
class NestedIntNode:
def __init__(self, t_id: int, coeff: int):
self.t_id = t_id
self.coeff = coeff
def nested_int_coeff(self) -> int:
return self.coeff
def maybe_as_int(self) -> Optional[int]:
return None
def is_int(self) -> bool:
return True
def is_float(self) -> bool:
return False
def is_bool(self) -> bool:
return False
def is_nested_int(self) -> bool:
return True
def clone(self) -> "NestedIntNode":
return self
def _str(self) -> Any:
if self.coeff == 1:
return f"j{self.t_id}"
return f"{self.coeff}*j{self.t_id}"
def str(self) -> Any:
return self._str()
def __str__(self) -> Any:
return self._str()
def __repr__(self) -> Any:
return self._str()
def _graph_repr(self) -> Any:
return self._str()
def mul(self, other: Any) -> "NestedIntNode":
if other.is_constant():
other = other.constant_int()
else:
raise ValueError(f"unsupported: {type(other)}")
return NestedIntNode(self.t_id, self.coeff * other)
def eq(self, other: Any) -> Any:
return torch._C._get_constant_bool_symnode(_eq(self, other))
def ne(self, other: Any) -> Any:
return torch._C._get_constant_bool_symnode(not _eq(self, other))
def gt(self, other: Any) -> Any:
return torch._C._get_constant_bool_symnode(not _ge(other, self))
def lt(self, other: Any) -> Any:
return torch._C._get_constant_bool_symnode(not _ge(self, other))
def le(self, other: Any) -> Any:
return torch._C._get_constant_bool_symnode(_ge(other, self))
def ge(self, other: Any) -> Any:
return torch._C._get_constant_bool_symnode(_ge(self, other))
def is_symbolic(self) -> bool:
return False
def nested_int(self) -> int:
return self.t_id
def is_constant(self) -> bool:
return False
def wrap_int(self, num: int) -> ConstantIntNode:
assert type(num) is int
return ConstantIntNode(num)
```
|
===============================================================================================================================
SOURCE CODE FILE: nested_tensor.py
LINES: 1
SIZE: 24.50 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nested\_internal\nested_tensor.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from typing import * # noqa: F403
import torch
from torch._C import DispatchKey, DispatchKeySet
from torch._prims_common import is_expandable_to
from torch.nested._internal.nested_int import NestedIntNode
from torch.utils.weak import WeakTensorKeyDictionary
_tensor_id_counter = 0
_tensor_symint_registry = WeakTensorKeyDictionary()
def get_tensor_symint(tensor, *, coeff=1):
from torch._subclasses.fake_tensor import FakeTensor
from torch._subclasses.functional_tensor import mb_unwrap_functional_tensor
# NB: Only FakeTensor is associated with a memo
tensor = mb_unwrap_functional_tensor(tensor)
if isinstance(tensor, FakeTensor):
return tensor.get_nested_int(coeff=coeff)
global _tensor_id_counter
tensor_symint = _tensor_symint_registry.get(tensor)
if tensor_symint is None:
tensor_symint = torch.SymInt(NestedIntNode(_tensor_id_counter, coeff))
_tensor_id_counter += 1
_tensor_symint_registry[tensor] = tensor_symint
return tensor_symint
# SDPA metadata; max / min seqlens are needed for e.g. flash
def _get_sdpa_extreme_seqlen(func, tensor):
return int(func(tensor).item())
def _store_val_in_tensor(val) -> torch.Tensor:
# hack to get dynamic shapes support: store in a (val, 0) shaped tensor
return torch.zeros(val, 0)
def _load_val_from_tensor(t: torch.Tensor):
return t.shape[0]
# serialization function must be defined at top level
def _rebuild_njt(constructor_kwargs):
return NestedTensor(**constructor_kwargs)
class NestedTensor(torch.Tensor):
_values: torch.Tensor # type: ignore[assignment]
_offsets: torch.Tensor
_lengths: Optional[torch.Tensor]
# NOTE [ Nested ints for ragged sizes and strides ]
#
# Jagged layout tensors are tensors that represent a n-dim tensor with a
# ragged dimension, but are backed by an (n-1)-dim tensor underneath, e.g.,
# a jagged tensor with outer shape [B, x, D] is represented internally by a
# tensor with shape [sum(x), D] where we introduce what we call a nested int
# denoted as "x" here (but sometimes denoted with "*" to
# represent the ragged dimension, and sum(x) represents the dim of the inner
# tensor or equivalently the sum of all the sizes of the constituent
# tensors' varying lengths.
#
# We also use nested ints to represent the strides of this tensor.
# For example, a jagged tensor with shape [B, x, D] can be strided in two
# ways: [xD, D, 1] and [x, 1, sum(x)], where xD represents x multiplied by D
_size: tuple[int, ...]
_strides: tuple[int, ...]
# Indicates that the nth dimension is ragged
_ragged_idx: int
_metadata_cache: Dict[str, Any]
@staticmethod
def __new__(
cls,
values,
offsets,
*,
lengths=None,
**kwargs,
):
ks = DispatchKeySet(DispatchKey.NestedTensor)
ks = ks.add(DispatchKey.AutogradNestedTensor)
# Only support jagged for now.
assert offsets is not None
assert offsets.ndim == 1
assert not isinstance(values, NestedTensor)
assert values.device == offsets.device
# Query cache for the symint associated with offsets or lengths
# (create a new one if needed).
ragged_source = offsets if lengths is None else lengths
ragged_size = get_tensor_symint(ragged_source, coeff=1)
_ragged_idx = kwargs.get("_ragged_idx", 1)
B = offsets.shape[0] - 1
if lengths is not None:
assert B == lengths.shape[0]
# subtract 1 to convert to values dim space
r = _ragged_idx - 1
_size = (B, *values.shape[:r], ragged_size, *values.shape[r + 1 :])
stride = values.stride()
_strides = (ragged_size * stride[r], *stride)
r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
cls,
_size,
_strides,
0,
torch.contiguous_format,
values.dtype,
torch.jagged,
values.device,
False,
kwargs.get("requires_grad", False),
"sizes",
False,
True, # dispatch_layout
ks,
# don't try to calculate storage based on non-zero size
storage_size=values.untyped_storage().size(),
)
r._ragged_idx = _ragged_idx
r._size = _size
r._strides = _strides
return r
def __init__(self, values, offsets, *, lengths=None, **kwargs):
super().__init__()
self._values = values
self._offsets = offsets
self._lengths = lengths
# holds properties that are computed lazily
self._metadata_cache = kwargs.get("_metadata_cache") or {}
# collapsed ragged dim must always be dynamic
torch._dynamo.maybe_mark_dynamic(self, self._ragged_idx)
torch._dynamo.maybe_mark_dynamic(self._values, self._ragged_idx - 1)
# min / max sequence length should be dynamic if present
max_seqlen_tensor = self._metadata_cache.get("max_seqlen", None)
if max_seqlen_tensor is not None:
torch._dynamo.mark_dynamic(max_seqlen_tensor, 0)
min_seqlen_tensor = self._metadata_cache.get("min_seqlen", None)
if min_seqlen_tensor is not None:
torch._dynamo.mark_dynamic(min_seqlen_tensor, 0)
def values(self):
# dispatch to get proper view relationship
return torch._nested_get_values(self) # type: ignore[attr-defined]
def offsets(self):
return self._offsets
def lengths(self):
return self._lengths
# Private accessor functions for min / max sequence length. They're
# purposefully not @properties because those don't work with PT2 (yet).
# These compute / cache if not present.
# TODO: Revisit this when @properties are better supported by PT2. I think the ideal
# state would be to have public @properties for min / max sequence length that compile
# (including setters).
def _get_max_seqlen(self):
max_seqlen_tensor = self._max_seqlen_tensor
if max_seqlen_tensor is None:
# compute & cache
max_val = _get_sdpa_extreme_seqlen(
torch.max,
self._offsets.diff() if self._lengths is None else self._lengths,
)
max_seqlen_tensor = _store_val_in_tensor(max_val)
self._metadata_cache["max_seqlen"] = max_seqlen_tensor
return _load_val_from_tensor(max_seqlen_tensor)
def _get_min_seqlen(self):
min_seqlen_tensor = self._min_seqlen_tensor
if min_seqlen_tensor is None:
# compute & cache
min_val = _get_sdpa_extreme_seqlen(
torch.min,
self._offsets.diff() if self._lengths is None else self._lengths,
)
min_seqlen_tensor = _store_val_in_tensor(min_val)
self._metadata_cache["min_seqlen"] = min_seqlen_tensor
return _load_val_from_tensor(min_seqlen_tensor)
# Private accessors used for treating min / max seqlen as inner tensors for
# flatten / unflatten. These must be properties to work with the traceable wrapper
# subclass logic. These do not compute / cache if not present.
@property
def _max_seqlen_tensor(self) -> Optional[torch.Tensor]:
return self._metadata_cache.get("max_seqlen", None)
@property
def _min_seqlen_tensor(self) -> Optional[torch.Tensor]:
return self._metadata_cache.get("min_seqlen", None)
# These are old private @property accessors that are kept around for internal BC
# reasons. TODO: Remove these!
@property
def _max_seqlen(self):
return self._get_max_seqlen()
@property
def _min_seqlen(self):
return self._get_min_seqlen()
# Convenience accessors that return a min / max seqlen if one is present and do NOT
# compute / cache them if they're not.
@property
def _maybe_max_seqlen(self) -> Optional[int]:
mt = self._max_seqlen_tensor
return None if mt is None else _load_val_from_tensor(mt)
@property
def _maybe_min_seqlen(self) -> Optional[int]:
mt = self._min_seqlen_tensor
return None if mt is None else _load_val_from_tensor(mt)
def __repr__(self): # type: ignore[override]
# We should implement this in torch/_tensor_str.py instead
grad_fn_str = (
f", requires_grad={self.requires_grad}" if self.requires_grad else ""
)
if self.grad_fn:
grad_fn_str = f", grad_fn={self.grad_fn}"
return f"NestedTensor(size={self._size}, offsets={self._offsets}{grad_fn_str}, contiguous={self._lengths is None})"
# TODO: Remove this in favor of the default tensor subclass serialization logic.
# We don't do this today because of https://github.com/pytorch/pytorch/issues/125622.
def __reduce_ex__(self, proto):
state = torch._utils._get_obj_state(self)
# Cached PyCapsules for sizes / strides are not serializable.
# See Note [Tensor Subclass custom size/stride caching strategy]
self._clear_non_serializable_cached_data()
# SymNodes are not serializable
assert "_size" in state and "_strides" in state
state = dict(state)
del state["_size"]
del state["_strides"]
func = _rebuild_njt
constructor_kwargs = {
"values": self._values,
"offsets": self._offsets,
"lengths": self._lengths,
"_ragged_idx": self._ragged_idx,
"_metadata_cache": self._metadata_cache,
"requires_grad": self.requires_grad,
}
args = (constructor_kwargs,)
return (torch._tensor._rebuild_from_type_v2, (func, type(self), args, state))
def __tensor_flatten__(self):
ctx = {
"requires_grad": self.requires_grad,
"ragged_idx": self._ragged_idx,
}
inner_tensors = ["_values", "_offsets"]
if self._lengths is not None:
inner_tensors.append("_lengths")
if self._min_seqlen_tensor is not None:
inner_tensors.append("_min_seqlen_tensor")
if self._max_seqlen_tensor is not None:
inner_tensors.append("_max_seqlen_tensor")
return inner_tensors, ctx
@staticmethod
def __tensor_unflatten__(inner_tensors: Dict, meta, outer_size, outer_stride):
from torch._subclasses.fake_tensor import FakeTensor
# inner tensors: _values, _offsets, [_lengths], [_min_seqlen], [_max_seqlen]
assert len(inner_tensors) >= 2 and len(inner_tensors) <= 5
values = inner_tensors["_values"]
offsets = inner_tensors["_offsets"]
lengths = inner_tensors.get("_lengths", None)
min_seqlen_tensor = inner_tensors.get("_min_seqlen_tensor", None)
max_seqlen_tensor = inner_tensors.get("_max_seqlen_tensor", None)
metadata_cache = {}
if min_seqlen_tensor is not None:
metadata_cache["min_seqlen"] = min_seqlen_tensor
if max_seqlen_tensor is not None:
metadata_cache["max_seqlen"] = max_seqlen_tensor
ragged_idx = meta["ragged_idx"]
# Alternatively, we could make it the caller's responsibility to
# cache it. But this heuristic seems simple enough.
ragged_source = offsets if lengths is None else lengths
if isinstance(ragged_source, FakeTensor):
ragged_size = outer_size[ragged_idx]
ragged_source.nested_int_memo = ragged_size
return NestedTensor(
values,
offsets=offsets,
lengths=lengths,
requires_grad=meta["requires_grad"],
_ragged_idx=ragged_idx,
_metadata_cache=metadata_cache,
)
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
# If you're wondering why there's a nested tensor with one of its
# size = -1, see note: [NJT outer_size in AOTDispatcher]
kwargs = {} if kwargs is None else kwargs
# Lazy import to avoid circular dependency
from .ops import lookup_jagged
fn = lookup_jagged(func, *args, **kwargs)
if fn is not None:
return fn(*args, **kwargs)
# Poor man's redispatch for composite ops. This becomes relevant under inference
# mode, where disabling autograd key dispatch prevents decomposition.
all_dks = (
# We want to handle both the cases where NestedTensor overrides the
# composite implicit autograd kernel, and the case where it doesn't.
# Prioritize calling into NestedTensor's kernel if it exists.
torch._C.DispatchKey.CompositeImplicitAutogradNestedTensor,
torch._C.DispatchKey.CompositeImplicitAutograd,
)
for dk in all_dks:
if torch._C._dispatch_has_kernel_for_dispatch_key(func.name(), dk):
with torch.overrides.enable_reentrant_dispatch():
return func._op_dk(dk, *args, **kwargs)
raise NotImplementedError(func)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
from torch.fx.experimental.proxy_tensor import maybe_enable_thunkify
from .ops import jagged_torch_function
# This should be removed after
# https://github.com/pytorch/pytorch/pull/125941/ lands
with maybe_enable_thunkify():
try:
return jagged_torch_function(func, *args, **kwargs)
except NotImplementedError:
pass
with torch._C.DisableTorchFunctionSubclass():
return func(*args, **kwargs)
# NB: These fake view autograd.Functions are superseded by real view ops. Don't use them!
# TODO: Remove ViewBufferFromNested, ViewNestedFromBuffer, and buffer_from_jagged once the
# internal BC period has passed.
# Not actually a view!
class ViewBufferFromNested(torch.autograd.Function):
@staticmethod
def forward(ctx, x: NestedTensor): # type: ignore[override]
ctx.save_for_backward(x.offsets())
ctx.metadata_cache = x._metadata_cache
ctx.ragged_idx = x._ragged_idx
return x._values
@staticmethod
def backward(ctx, gO: torch.Tensor): # type: ignore[override]
(offsets,) = ctx.saved_tensors
return NestedTensor(
gO,
offsets=offsets,
_metadata_cache=ctx.metadata_cache,
_ragged_idx=ctx.ragged_idx,
)
# Not actually a view!
class ViewNestedFromBuffer(torch.autograd.Function):
@staticmethod
def forward(
ctx,
values: torch.Tensor,
offsets: torch.Tensor,
metadata_cache: Optional[Dict[str, Any]] = None,
): # type: ignore[override]
# maintain BC with this usages of this where the seqlens are stuffed
# directly into the metadata cache as non-Tensors / ints
if metadata_cache is not None:
min_seqlen = metadata_cache.get("min_seqlen", None)
max_seqlen = metadata_cache.get("max_seqlen", None)
if min_seqlen is not None and not isinstance(min_seqlen, torch.Tensor):
metadata_cache["min_seqlen"] = _store_val_in_tensor(min_seqlen)
if max_seqlen is not None and not isinstance(max_seqlen, torch.Tensor):
metadata_cache["max_seqlen"] = _store_val_in_tensor(max_seqlen)
return NestedTensor(
values.detach(),
offsets=offsets,
_metadata_cache=metadata_cache,
)
@staticmethod
def backward(ctx, gO: NestedTensor): # type: ignore[override]
return gO._values, None, None
def buffer_from_jagged(jagged):
return ViewBufferFromNested.apply(jagged)
# Need to make it obvious that users should be passing in offsets
def jagged_from_list(
tensors: List[torch.Tensor],
offsets: Optional[torch.Tensor],
dtype=None,
device=None,
) -> tuple[NestedTensor, torch.Tensor]:
"""Constructs a NestedTensor backed by jagged layout from a list of tensors"""
if len(tensors) == 0:
raise RuntimeError("Cannot construct a nested tensor from an empty tensor list")
if not len(set(t.dtype for t in tensors)) == 1: # noqa: C401
raise RuntimeError(
"When constructing a nested tensor, all tensors in list must have the same dtype"
)
if not len(set(t.device for t in tensors)) == 1: # noqa: C401
raise RuntimeError(
"When constructing a nested tensor, all tensors in list must be on the same device"
)
if not len(set(t.dim() for t in tensors)) == 1: # noqa: C401
raise RuntimeError(
"When constructing a nested tensor, all tensors in list must have the same dim"
)
component_dim = tensors[0].dim()
if component_dim == 0:
raise RuntimeError(
"Cannot construct a nested tensor from a list of zero-dim tensors"
)
# Check that the NT is representable by the jagged layout, which
# allows for a single ragged dimension after the batch dim.
# e.g. (B, *, D_0, ..., D_N), (B, D_0, *, ..., D_N), etc.
sizes = [t.shape for t in tensors]
ragged_idx = None
for d in range(component_dim):
dim_is_ragged = any(size[d] != sizes[0][d] for size in sizes)
if dim_is_ragged:
if ragged_idx is None:
# add 1 to convert to outer NJT dim space
ragged_idx = d + 1
else:
raise RuntimeError(
"Cannot represent given tensor list as a nested tensor with the jagged layout. "
"Note that the jagged layout only allows for a single ragged dimension. "
"For example: (B, *, D_0, D_1, ..., D_N), with ragged * dim."
)
# allow for a rectangular NJT and default the ragged dim next to the batch dim
if ragged_idx is None:
ragged_idx = 1
# Set properties appropriately.
values = torch.cat(tensors, dim=(ragged_idx - 1))
to_kwargs = {}
if device is not None:
to_kwargs["device"] = device
if dtype is not None:
to_kwargs["dtype"] = dtype
values = values.to(**to_kwargs)
# Calculate jagged offsets if not provided.
if offsets is None:
# Jagged layout specifies that offsets are stored as int64 on the same device as values.
# TODO: An alternative way to construct offsets is to use F.pad. This avoids creating
# an extra leaf tensor during the forward, potentially resolving compatibility issues.
offsets = torch.cat(
[
torch.zeros(1, dtype=torch.int64, device=values.device),
torch.tensor(
[s[ragged_idx - 1] for s in sizes], device=values.device
).cumsum(dim=0),
]
)
# compute this now since it's easy
min_seqlen = min(t.shape[ragged_idx - 1] for t in tensors)
max_seqlen = max(t.shape[ragged_idx - 1] for t in tensors)
ret_nt = nested_view_from_values_offsets(
values,
offsets,
min_seqlen=min_seqlen,
max_seqlen=max_seqlen,
ragged_idx=ragged_idx,
)
return (ret_nt, offsets) # type: ignore[return-value]
def jagged_from_tensor_and_lengths(
tensor: torch.Tensor, starts: torch.Tensor, lengths: torch.Tensor
) -> tuple[NestedTensor, torch.Tensor, Optional[torch.Tensor]]:
"""Constructs a NestedTensor backed by jagged layout from a tensor, starts of sequences, and sequence lengths"""
batch_size = tensor.shape[0]
if is_expandable_to(starts.shape, (batch_size,)) and is_expandable_to(
lengths.shape, (batch_size,)
):
start_list = starts.expand(batch_size)
length_list = lengths.expand(batch_size)
else:
raise RuntimeError(
"When constructing a jagged nested tensor using narrow(), "
"your start and length must be Tensors that broadcast to input.shape[0]"
)
# Calculate jagged offsets
assert (
len(tensor.shape) >= 2
), "tensor must at least be 2D for the nested narrow op to work"
max_seq_len = tensor.shape[1]
offset_lengths = max_seq_len * torch.arange(
0, batch_size, dtype=torch.int64, device=tensor.device
)
# Jagged layout specifies that offsets are stored as int64 on the same device as values.
offsets = torch.cat(
[
start_list + offset_lengths,
(start_list[-1] + offset_lengths[-1] + length_list[-1]).unsqueeze(0),
]
)
# Reshape buffer to flatten the 1st and 2nd dimension (view used to enforce non-copy)
if len(tensor.shape) > 2:
values = tensor.view(-1, *tensor.shape[2:])
else:
values = tensor.view(-1)
# Check if offsets and lengths make it possibly contiguous and return a regular NT
is_contiguous = True
orig_dim = tensor.shape[1]
if torch.any(length_list[1:-1].ne(orig_dim)):
is_contiguous = False
if torch.any(offsets[1:-2].diff().ne(orig_dim)):
is_contiguous = False
if offsets[0] + length_list[0] != orig_dim:
is_contiguous = False
actual_max_seqlen = int(torch.max(lengths).item())
min_seqlen = int(torch.min(lengths).item())
if is_contiguous:
ret_nt = nested_view_from_values_offsets(
values[offsets[0] : offsets[-1]],
offsets - offsets[0],
min_seqlen=min_seqlen,
max_seqlen=actual_max_seqlen,
)
else:
ret_nt = nested_view_from_values_offsets_lengths(
values,
offsets,
length_list,
min_seqlen=min_seqlen,
max_seqlen=actual_max_seqlen,
)
return (ret_nt, offsets, None if is_contiguous else length_list)
# NB: A dummy arg is required so that NestedTensor.__torch_dispatch__() is invoked
# for _nested_view_from_values_offsets(). Sizes don't matter much, but they shouldn't be
# 0/1 because the dummy can be fake-ified and we want to avoid specializing.
# This arg is otherwise unused.
_dummy_instance: Optional[torch.Tensor] = None
def _nt_view_dummy() -> torch.Tensor:
global _dummy_instance
if _dummy_instance is None:
_dummy_instance = NestedTensor(
values=torch.zeros(3, 3, device="meta"),
offsets=torch.zeros(3, device="meta", dtype=torch.int64),
).detach()
return _dummy_instance
def nested_view_from_values_offsets(
values, offsets, ragged_idx=1, min_seqlen=None, max_seqlen=None
):
min_seqlen_tensor = None
if min_seqlen is not None:
min_seqlen_tensor = _store_val_in_tensor(min_seqlen)
max_seqlen_tensor = None
if max_seqlen is not None:
max_seqlen_tensor = _store_val_in_tensor(max_seqlen)
return torch._nested_view_from_jagged( # type: ignore[attr-defined]
values,
offsets,
_nt_view_dummy(),
None,
ragged_idx,
min_seqlen_tensor,
max_seqlen_tensor,
) # type: ignore[return-value]
def nested_view_from_values_offsets_lengths(
values, offsets, lengths, ragged_idx=1, min_seqlen=None, max_seqlen=None
):
min_seqlen_tensor = None
if min_seqlen is not None:
min_seqlen_tensor = _store_val_in_tensor(min_seqlen)
max_seqlen_tensor = None
if max_seqlen is not None:
max_seqlen_tensor = _store_val_in_tensor(max_seqlen)
return torch._nested_view_from_jagged( # type: ignore[attr-defined]
values,
offsets,
_nt_view_dummy(),
lengths,
ragged_idx,
min_seqlen_tensor,
max_seqlen_tensor,
) # type: ignore[return-value]
def nested_from_padded(
padded, offsets, ragged_idx=1, min_seqlen=None, max_seqlen=None, sum_S=None
):
min_seqlen_tensor = None
if min_seqlen is not None:
min_seqlen_tensor = _store_val_in_tensor(min_seqlen)
max_seqlen_tensor = None
if max_seqlen is not None:
max_seqlen_tensor = _store_val_in_tensor(max_seqlen)
return torch._nested_from_padded_tensor(
padded,
offsets,
_nt_view_dummy(),
ragged_idx,
min_seqlen_tensor,
max_seqlen_tensor,
sum_S,
)
```
|
=====================================================================================================================
SOURCE CODE FILE: ops.py
LINES: 1
SIZE: 98.01 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nested\_internal\ops.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import functools
import math
import operator
from typing import * # noqa: F403
from typing import Optional
import torch
import torch.nn.functional as F
from torch.fx.operator_schemas import normalize_function
from torch.nested._internal.sdpa import jagged_scaled_dot_product_attention
from .nested_tensor import NestedTensor
__all__: list[Any] = []
JAGGED_OPS_TABLE: Dict[Any, Any] = {}
def _outer_to_inner_dim(ndim, dim, ragged_dim, canonicalize=False):
from torch._prims_common import canonicalize_dims
if isinstance(dim, (tuple, list)):
output = type(dim)(_outer_to_inner_dim(ndim, d, ragged_dim) for d in dim)
# ensure no duplicates, which can result from both batch and ragged mapping to 0
return type(output)(dict.fromkeys(output))
if canonicalize:
dim = canonicalize_dims(ndim, dim)
assert dim >= 0 and dim < ndim
# Map dim=0 (AKA batch dim) -> packed dim i.e. outer ragged dim - 1.
# For other dims, subtract 1 to convert to inner space.
return ragged_dim - 1 if dim == 0 else dim - 1
def _wrap_jagged_dim(
ndim,
dim,
ragged_dim,
op_name,
convert_to_inner_dim=True,
allow_ragged_dim=False,
allow_batch_dim=False,
):
from torch._prims_common import canonicalize_dims
wrapped = canonicalize_dims(ndim, dim)
if wrapped == ragged_dim and not allow_ragged_dim:
raise RuntimeError(f"{op_name}(): not supported for NestedTensor on ragged dim")
elif wrapped == 0 and not allow_batch_dim:
raise RuntimeError(f"{op_name}(): not supported for NestedTensor on dim=0")
ret = (
_outer_to_inner_dim(ndim, wrapped, ragged_dim)
if convert_to_inner_dim
else wrapped
)
if allow_batch_dim:
# Need to disambiguate whether we're operating on the batch dim or not.
# Operating on dim=1 -> dim=0 after the inner dim conversion.
operating_on_batch = wrapped == 0
return (ret, operating_on_batch)
return ret
def _wrap_jagged_dims(ndim, dims, op_name, ragged_idx=1):
"""
For NestedTensor operators,
wraps dimensions to non-negative values,
and returns metadata related to reduction dimension(s).
"""
from torch._prims_common import canonicalize_dims
assert isinstance(
dims, (tuple, list)
), f"_wrap_jagged_dims(): cannot iterate over dimensions of type {type(dims)}"
wrapped_dims = [
canonicalize_dims(ndim, d) for d in dims
] # convert all indices to non-negative values
operate_on_batch = 0 in wrapped_dims
operate_on_ragged = ragged_idx in wrapped_dims
operate_on_non_batch = any(d != 0 and d != ragged_idx for d in wrapped_dims)
# ensure no duplicates, which can result from both batch and ragged mapping to 0
outer_to_inner_dim = tuple(
dict.fromkeys(_outer_to_inner_dim(ndim, d, ragged_idx) for d in wrapped_dims)
)
return outer_to_inner_dim, operate_on_batch, operate_on_ragged, operate_on_non_batch
def check_schema(schema_str: str, func, *args, **kwargs) -> None:
named_arg_types = schema_str.split(", ")
num_optional_args = [x.endswith("?") for x in named_arg_types].count(True)
min_args = len(named_arg_types) - num_optional_args
# special case: ellipses allows for any number of unchecked args at the end
if named_arg_types[-1] == "...":
named_arg_types = named_arg_types[:-1]
else:
if not (len(args) >= min_args and len(args) <= len(named_arg_types)):
raise ValueError(
f"NestedTensor {func.__name__}({schema_str}): expected at least {min_args} "
f"arguments and at most {len(named_arg_types)} arguments, but got: "
f"{len(args)} arguments"
)
arg_type_check_fns = {
"t": lambda x: isinstance(x, torch.Tensor) and not isinstance(x, NestedTensor),
"jt": lambda x: isinstance(x, NestedTensor)
and x._lengths is None
and x._ragged_idx == 1, # ops with "jt" require contiguous JT only
"jt_all": lambda x: isinstance(
x, NestedTensor
), # ops with "jt_all" can accept all kinds of JT
"any": lambda x: True,
}
for i, named_arg_type in enumerate(named_arg_types):
name, arg_type = named_arg_type.split(": ")
is_optional = arg_type.endswith("?")
normalized_arg_type = arg_type[:-1] if is_optional else arg_type
if normalized_arg_type not in arg_type_check_fns.keys():
raise AssertionError(f"Unknown arg type: {normalized_arg_type}")
if i >= len(args):
if not is_optional:
raise ValueError(
f"NestedTensor {func.__name__}({schema_str}) "
f"missing required argument: {name}"
)
continue
_check_fn = arg_type_check_fns[normalized_arg_type]
def check_fn(x, is_optional=is_optional):
if is_optional:
return x is None or _check_fn(x)
else:
return _check_fn(x)
if not check_fn(args[i]):
type_to_desc = {
"t": "tensor",
"t?": "optional tensor",
"jt": "contiguous jagged layout NestedTensor",
"jt_all": "jagged layout NestedTensor",
"any": "<any type>",
}
raise ValueError(
f"NestedTensor {func.__name__}({schema_str}): expected {name} to be a "
f"{type_to_desc[arg_type]}"
)
def check_ragged_dim_same(
func, a: NestedTensor, a_name: str, b: NestedTensor, b_name: str
) -> None:
# Calling into .shape here
if a._size[a._ragged_idx] != b._size[b._ragged_idx]:
raise RuntimeError(
f"NestedTensor {func.__name__}: expected {a_name} and {b_name} to have the "
"same exact offsets tensor."
)
# returns True if the raggedness-relevant portions of the NT shape
# match those of the specified size
def raggedness_matches(nt, size):
end = nt._ragged_idx + 1
nt_ragged = nt._size[:end]
size_ragged = size[:end]
return len(nt_ragged) == len(size_ragged) and (
all(ns == s or s == -1 for ns, s in zip(nt_ragged, size_ragged))
)
def squeeze_leading_ones(t):
# Note: [ Squeezing leading ones ]
#
# Squeeze leading ones from t.
#
# We want:
# (B, j0, ?, ?) + (1, 1, ?, ?) -> (B, j0, ?, ?)
# (B, j0, ?, ?) + (1, 1, 1, ?, ?) -> (1, B, j0, ?, ?) (not yet supported)
#
# 1) Squeeze extra ones and grab values from NT
# (1, 1, ?, ?) -> (?, ?) and (sum(*), ?, ?) -> (B, j0, ?, ?)
# 2) Do dense broadcasting:
# (sum(*), ?, ?) + (?, ?) -> (sum(*), ?, ?)
# 3) Construct nested tensor
# (sum(*), ?, ?) -> (B, j0, ?, ?)
#
# If unsqueezing on the 0th dim becomes supported, we would unsqueeze
# at step (4) and we would need to update this function to record how
# many ones we unsqueezed.
while t.dim() > 0 and t.shape[0] == 1:
t = t.squeeze(0)
return t
def register_func(tables, aten_ops, schema_str):
if not isinstance(aten_ops, list):
aten_ops = [aten_ops]
if not isinstance(tables, list):
tables = [tables]
def wrapper(func):
for aten_op in aten_ops:
def get_inner(aten_op):
def inner(*args, **kwargs):
check_schema(schema_str, func, *args, **kwargs)
return func(aten_op, *args, **kwargs)
return inner
for table in tables:
table[aten_op] = get_inner(aten_op)
return func
return wrapper
register_jagged_func = functools.partial(register_func, JAGGED_OPS_TABLE)
def lookup_jagged(func, *args, **kwargs) -> Optional[Callable]:
dispatch_func = JAGGED_OPS_TABLE.get(func, None)
if dispatch_func is not None:
return dispatch_func
# Handle pointwise fallbacks
if torch.Tag.pointwise in func.tags:
from torch.fx.experimental.symbolic_shapes import is_nested_int
# No pointwise ops legitimately accept nested int inputs. Without this check,
# they will be incorrectly interpreted as tensors.
# See https://github.com/pytorch/pytorch/issues/138496
for arg in args:
if is_nested_int(arg):
raise RuntimeError(
f"NestedTensor {func.__name__}: invalid argument {arg}"
)
# Assume there aren't additional tensors that aren't the "unary/binary" args
num_tensor_args = sum(isinstance(x, torch.Tensor) for x in args)
if num_tensor_args == 1:
# Build up the check schema string. The first tensor arg is assumed to be
# an NJT and other args are sent through as-is.
schema_parts = []
for arg in func._schema.arguments:
if isinstance(arg.type, torch.TensorType):
schema_parts.append(f"{arg.name}: jt_all")
break
else:
schema_parts.append(f"{arg.name}: any")
schema_parts.append("...")
check_schema_str = ", ".join(schema_parts)
check_schema(check_schema_str, func, *args, **kwargs)
return functools.partial(jagged_unary_pointwise, func)
elif num_tensor_args == 2:
check_schema("lhs: any, rhs: any, ...", func, *args, **kwargs)
return functools.partial(jagged_binary_pointwise, func)
return None
def extract_kwargs(arg):
kwargs = {
"offsets": arg.offsets(),
"lengths": arg.lengths(),
"_metadata_cache": arg._metadata_cache,
"_ragged_idx": arg._ragged_idx,
}
return kwargs
def jagged_unary_pointwise(func, *args, **kwargs):
# assume if we get here that there is a single NJT input in the args
njt = next(arg for arg in args if isinstance(arg, NestedTensor))
return NestedTensor(
func(*(arg._values if arg is njt else arg for arg in args), **kwargs),
**extract_kwargs(njt),
)
def jagged_binary_pointwise(func, *args, **kwargs):
a, b = args[0], args[1]
assert isinstance(a, NestedTensor) or isinstance(b, NestedTensor)
mismatch_error_msg = (
"cannot call binary pointwise function {} with inputs of shapes {} and {}"
)
# a is NT, b is NT
if isinstance(a, NestedTensor) and isinstance(b, NestedTensor):
# ex: (B, j0, D) + (B, j0, D)
# ex: (B, j0, D) + (B, j0, 1)
if raggedness_matches(a, b._size):
return NestedTensor(
func(a._values, b._values, *args[2:], **kwargs), **extract_kwargs(a)
)
raise RuntimeError(mismatch_error_msg.format(func.__name__, a._size, b._size))
# either a is NT or b is NT at this point
a_is_nt = isinstance(a, NestedTensor)
extracted_kwargs = extract_kwargs(a) if a_is_nt else extract_kwargs(b)
# === Handle broadcasting across the batch / ragged dims ===
# Easy case: take advantage of pre-existing broadcasting logic
# ex: (B, j0, ?, ?) + (?) -> (B, j0, ?, ?)
# ex: (B, j0, ?, ?) + (?, ?) -> (B, j0, ?, ?)
# ex: (B, j0, ?, ?) + (1, 1, ?, ?) -> (B, j0, ?, ?)
nt, t = (a, b) if a_is_nt else (b, a)
# See Note: [ Squeezing leading ones ]
if t.dim() > nt.dim():
raise NotImplementedError("NYI: broadcasting NT with T with larger dim")
t_squeezed = squeeze_leading_ones(t)
if nt.dim() >= t_squeezed.dim() + 2:
lhs, rhs = (nt._values, t_squeezed) if a_is_nt else (t_squeezed, nt._values)
return NestedTensor(func(lhs, rhs, *args[2:], **kwargs), **extracted_kwargs)
# Harder case: do manual broadcasting when NT dim == non-NT dim
# ex: (B, j0, D_0, D_1) + (B, 1, D_0, D_1) -> (B, j0, D_0, D_1)
if a.dim() == b.dim():
# ex: (B, j0, D_0, D_1) + (1, 1, D_0, D_1) -> should
# be (B, j0, D_0, D_1) but not yet supported
if a.shape[0] != b.shape[0]:
raise RuntimeError(
mismatch_error_msg.format(func.__name__, a.shape, b.shape)
)
from .nested_tensor import nested_from_padded
# handle broadcasting via padded dense -> jagged conversion
min_seqlen = nt._maybe_min_seqlen
max_seqlen = nt._maybe_max_seqlen
padded_max_S = max_seqlen
total_L = nt._values.shape[nt._ragged_idx - 1]
if padded_max_S is None:
# use upper bound on max seqlen if it's not present
padded_max_S = total_L
# convert dense tensor -> jagged
t = t.expand(
[x if i != nt._ragged_idx else padded_max_S for i, x in enumerate(t.shape)]
)
t_as_nt = nested_from_padded(
t,
offsets=nt._offsets,
ragged_idx=nt._ragged_idx,
sum_S=total_L,
min_seqlen=min_seqlen,
max_seqlen=max_seqlen,
)
# function call with two NJTs
lhs, rhs = (nt, t_as_nt) if a_is_nt else (t_as_nt, nt)
return func(lhs, rhs, *args[2:], **kwargs)
# ex: (B, j0, D_0, D_1) + (A, B, 1, D_0, D_1) -> error because this breaks the invariant
# that ragged dim is wrt left-most batch dim
raise RuntimeError(mismatch_error_msg.format(func.__name__, a.shape, b.shape))
def jagged_torch_function(func, *args, **kwargs):
# SDPA has special kernels that handle nested tensors.
# Dispatch to the correct implementation here
if func is torch._C._nn.scaled_dot_product_attention:
return jagged_scaled_dot_product_attention(*args, **kwargs)
if func.__name__ == "apply_":
func(args[0]._values, *args[1:], **kwargs)
return args[0]
# Handle flatten() here because it's CompositeImplicit.
if func.__name__ == "flatten":
def _flatten_sig(input, start_dim=0, end_dim=-1):
pass
_, new_kwargs = normalize_function( # type: ignore[misc]
_flatten_sig, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
# NB: stay in outer dim space because we're going to redispatch on a NT input
start_dim = _wrap_jagged_dim(
inp.dim(),
new_kwargs["start_dim"],
inp._ragged_idx,
"flatten",
convert_to_inner_dim=False,
)
end_dim = _wrap_jagged_dim(
inp.dim(),
new_kwargs["end_dim"],
inp._ragged_idx,
"flatten",
convert_to_inner_dim=False,
)
if start_dim == end_dim:
return inp
product = functools.reduce(operator.mul, inp.shape[start_dim : end_dim + 1])
new_shape = (*inp.shape[:start_dim], product, *inp.shape[end_dim + 1 :])
return inp.reshape(*new_shape)
# Handle nested-specific input validation for CompositeImplicit rms_norm
if func.__name__ == "rms_norm":
def _rms_norm_sig(input, normalized_shape, weight=None, eps=None):
pass
_, new_kwargs = normalize_function( # type: ignore[misc]
_rms_norm_sig, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
normalized_shape = new_kwargs.pop("normalized_shape")
# can't normalize over the ragged dim (yet)
max_normalizable = inp.dim() - inp._ragged_idx - 1
if len(normalized_shape) > max_normalizable:
raise ValueError(
"rms_norm(): Normalization over the ragged dim not supported for nested tensors"
)
with torch._C.DisableTorchFunctionSubclass():
return func(*args, **kwargs)
raise NotImplementedError(func)
@register_jagged_func(
[
torch.ops.aten.is_non_overlapping_and_dense.default,
torch.ops.aten.sym_size.default,
torch.ops.aten.dim.default,
torch.ops.aten.numel.default,
torch.ops.aten.sym_numel.default,
torch.ops.aten.sym_stride.default,
torch.ops.aten.sym_storage_offset.default,
],
"self: jt_all",
)
def tensor_attr_supported_getter(func, *args, **kwargs):
if func == torch.ops.aten.is_non_overlapping_and_dense.default:
return False
if func == torch.ops.aten.sym_size.default:
return args[0]._size
if func == torch.ops.aten.dim.default:
return len(args[0]._size)
if func in (torch.ops.aten.sym_numel.default, torch.ops.aten.numel.default):
if args[0]._lengths is not None:
return int(sum(args[0]._lengths) * math.prod(args[0]._size[2:]))
return args[0]._values.numel()
if func == torch.ops.aten.sym_stride.default:
return args[0]._strides
if func == torch.ops.aten.sym_storage_offset.default:
return args[0]._values.storage_offset()
@register_jagged_func(torch.ops.prim.layout.default, "self: jt_all")
def prim_layout_default(func, *args, **kwargs):
return torch.jagged
@register_jagged_func(
[torch.ops.aten.size.default],
"self: jt_all",
)
def tensor_attr_unsupported_getter(func, *args, **kwargs):
if func == torch.ops.aten.size.default:
raise RuntimeError(
"NestedTensor does not support directly calling torch.ops.aten.size; "
"please use `nested_tensor.size()` instead."
)
@register_jagged_func(torch.ops.aten.is_contiguous.default, "self: jt_all")
def is_contiguous_general(func, *args, **kwargs):
from torch._prims_common import is_contiguous_for_memory_format
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
# If created from narrow() check for lengths
if inp.lengths() is not None:
return False
new_kwargs["memory_format"] = new_kwargs.get(
"memory_format", torch.contiguous_format
)
if new_kwargs["memory_format"] == torch.preserve_format:
return True
return is_contiguous_for_memory_format(inp._values, **new_kwargs)
register_jagged_func(
torch.ops.aten.is_contiguous.memory_format, "self: jt_all, memory_format: any?"
)(is_contiguous_general)
@register_jagged_func(
torch.ops.aten.clone.default, "input: jt_all, memory_format: any?"
)
def clone_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
new_meta = extract_kwargs(inp)
if inp._lengths is not None:
if new_kwargs["memory_format"] == torch.contiguous_format:
# need to copy to remove "holes" non-contiguity / lengths metadata
# TODO: write a kernel for this
from .nested_tensor import jagged_from_list
# TODO: We probably want the output to have the same ragged structure / nested int.
assert (
inp._ragged_idx == 1
), "NJT with ragged_idx != 1 not supported for contiguous clone"
contig, _ = jagged_from_list(inp.unbind(), offsets=None)
return contig
return NestedTensor(func(inp._values, **new_kwargs), **new_meta)
@register_jagged_func(torch.ops.aten.linear.default, "input: jt, weight: t, bias: t?")
def linear_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
@register_jagged_func(
torch.ops.aten.linear_backward.default,
"self: jt, grad_output: jt, weight: t, output_mask: any",
)
def linear_backward_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
grad_output = new_kwargs.pop("grad_output")
weight = new_kwargs.pop("weight")
output_mask = new_kwargs.pop("output_mask")
ds, dw, db = None, None, None
check_ragged_dim_same(func, inp, "self", grad_output, "grad_output")
if output_mask[0]:
ds = NestedTensor(
torch.matmul(grad_output._values, weight), **extract_kwargs(grad_output)
)
if output_mask[1]:
# NB: Fold dims of values for input and grad_output to treat them as 2D. This
# trick avoids materializing large intermediates and immediately reducing over
# them via sum(). This is equivalent to computing:
# torch.matmul(grad_output._values.transpose(-2, -1), inp._values)
# and then summing over the leading dimensions to get a 2D weight grad.
grad_2d = grad_output._values.reshape(-1, weight.size(0))
input_2d = inp._values.reshape(-1, weight.size(1))
dw = torch.matmul(grad_2d.t(), input_2d)
if output_mask[2]:
# Sum over all but the last dim to get a 1D bias grad. We cannot
# rely on the autograd engine to reduce for us, because returning a
# tensor aliasing the input would violate the aten signature annotation
reduce_dims = tuple(range(grad_output._values.ndim - 1))
if reduce_dims == ():
db = grad_output._values.clone()
else:
db = torch.sum(grad_output._values, reduce_dims, keepdim=False)
return (ds, dw, db)
@register_jagged_func(torch.ops.aten.to.dtype, "input: jt_all, dtype: any")
def to_dtype(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
@register_jagged_func(torch.ops.aten._to_copy.default, "self: jt_all")
def to_copy_default(func, *args, **kwargs):
from .nested_tensor import _tensor_symint_registry
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
# don't change layout
new_kwargs.pop("layout")
new_values = func(inp._values, **new_kwargs)
new_offsets = inp._offsets.to(device=new_values.device)
new_lengths = None
if inp._lengths is not None:
new_lengths = inp._lengths.to(device=new_values.device)
from torch._subclasses.fake_tensor import FakeTensor
from torch._subclasses.functional_tensor import (
FunctionalTensor,
mb_unwrap_functional_tensor,
)
ragged_source = inp._offsets if inp._lengths is None else inp._lengths
new_thing = new_offsets if new_lengths is None else new_lengths
if isinstance(new_thing, (FakeTensor, FunctionalTensor)):
# Temporary hack until we have the union find
tgt = mb_unwrap_functional_tensor(new_thing)
src = mb_unwrap_functional_tensor(ragged_source)
tgt.nested_int_memo = src.nested_int_memo
else:
_tensor_symint_registry[new_thing] = _tensor_symint_registry[ragged_source]
inp_kwargs = extract_kwargs(inp)
inp_kwargs["offsets"] = new_offsets
inp_kwargs["lengths"] = new_lengths
output = NestedTensor(new_values, **inp_kwargs)
return output
@register_jagged_func(
torch.ops.aten.copy_.default, "self: jt_all, src: jt_all, non_blocking: any?"
)
def copy_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
src = new_kwargs.pop("src")
if inp._size != src._size:
# try to recursively copy_ on unbound components to get around nested int mismatch
# TODO: eventually do a direct copy when this is possible
inp_comps = inp.unbind()
inp_comp_shapes = [c.shape for c in inp_comps]
src_comps = src.unbind()
src_comp_shapes = [c.shape for c in src_comps]
if inp_comp_shapes != src_comp_shapes:
raise RuntimeError(
"copy_(): expected compatible input and src shapes, but got: "
f"{inp.shape} and {src.shape}"
)
for inp_comp, src_comp in zip(inp_comps, src_comps):
inp_comp.copy_(src_comp)
# AOTD allows mutations of inputs only, (not views of the inputs).
# NJT.values() returns _values.detach() to workaround some issues.
# To keep mutation in the graph, AOTD manually calls copy_ on the input (NJT).
# Here we directly mutate self._values to not emit .detach() in the graph, which would make it non-compilable.
inp._values.copy_(src._values)
return inp
register_jagged_func(torch.ops.aten.detach.default, "self: jt_all")(
jagged_unary_pointwise
)
@register_jagged_func(
[
torch.ops.aten.empty_like.default,
torch.ops.aten.ones_like.default,
torch.ops.aten.zeros_like.default,
torch.ops.aten.rand_like.default,
torch.ops.aten.randn_like.default,
],
"self: jt_all",
)
def like_factory_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
# Default layout is technically torch.strided but only jagged is supported here.
# Rather than force users to specify the layout, assume jagged.
# This should be set to strided for redispatching on values.
new_kwargs["layout"] = torch.strided
new_values = func(inp._values, **new_kwargs)
new_offsets = inp._offsets.to(device=new_values.device)
new_lengths = None
if inp._lengths is not None:
new_lengths = inp._lengths.to(device=new_values.device)
output_kwargs = extract_kwargs(inp)
if "offsets" in output_kwargs:
output_kwargs["offsets"] = new_offsets
if "lengths" in output_kwargs:
output_kwargs["lengths"] = new_lengths
if inp.device != new_values.device:
# Update the nested int registry to indicate that the ragged structure is the same
# between the two offsets / lengths on different devices.
from torch._subclasses.fake_tensor import FakeTensor
from torch._subclasses.functional_tensor import (
FunctionalTensor,
mb_unwrap_functional_tensor,
)
from .nested_tensor import _tensor_symint_registry
ragged_source = inp._offsets if inp._lengths is None else inp._lengths
new_thing = new_offsets if new_lengths is None else new_lengths
if isinstance(new_thing, (FakeTensor, FunctionalTensor)):
# Temporary hack until we have the union find
tgt = mb_unwrap_functional_tensor(new_thing)
src = mb_unwrap_functional_tensor(ragged_source)
tgt.nested_int_memo = src.nested_int_memo
else:
_tensor_symint_registry[new_thing] = _tensor_symint_registry[ragged_source]
return NestedTensor(new_values, **output_kwargs)
register_jagged_func(torch.ops.aten.full_like.default, "self: jt_all, fill_value: any")(
like_factory_default
)
register_jagged_func(torch.ops.aten.randint_like.default, "self: jt_all, high: any")(
like_factory_default
)
register_jagged_func(
torch.ops.aten.randint_like.low_dtype, "self: jt_all, low: any, high: any"
)(like_factory_default)
@register_jagged_func(torch.ops.aten.zero_.default, "self: jt_all")
def zero__default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
func(inp._values)
return inp
@register_jagged_func(
torch.ops.aten._softmax.default, "self: jt_all, dim: any, half_to_float: any"
)
def _softmax_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
if isinstance(new_kwargs["dim"], tuple):
raise RuntimeError(
"softmax(): not supported for dimensions of type 'tuple' for NestedTensor"
)
inp = new_kwargs.pop("input")
(
new_kwargs["dim"],
reduce_on_batch,
reduce_on_ragged,
_reduce_on_non_batch,
) = _wrap_jagged_dims(
inp.dim(),
(new_kwargs["dim"],),
"softmax",
inp._ragged_idx,
)
if reduce_on_batch:
raise RuntimeError(
"softmax(): not supported when reducing across the batch dimension for NestedTensor"
)
if reduce_on_ragged and inp._ragged_idx > 1:
raise RuntimeError(
"softmax(): not supported when reducing along the ragged dimension for ragged_idx > 1 for NestedTensor"
)
if reduce_on_ragged and inp._lengths is not None:
raise RuntimeError(
"softmax(): not supported where lengths is not None "
+ "if reducing across the ragged dimension for NestedTensor"
)
new_kwargs["dim"] = new_kwargs["dim"][
0
] # torch.softmax takes in the reduction dimension as an integer
if reduce_on_ragged:
padded_softmax_values = torch.nn.functional.softmax(
torch.ops.aten._jagged_to_padded_dense_forward(
inp._values.reshape(
inp._values.shape[0], -1
), # values are required to be 2D tensors for j2pd
[inp._offsets],
max_lengths=[inp._max_seqlen], # max length of ragged dimension
padding_value=float("-inf"), # e^-inf = 0
),
dim=inp._ragged_idx,
)
softmax_values = torch.ops.aten._padded_dense_to_jagged_forward(
padded_softmax_values,
[inp._offsets],
total_L=inp._values.shape[
0
], # providing this parameter helps avoid a GPU/CPU sync
).reshape(
-1, *inp._values.shape[1:]
) # expand softmax_values back to original shape (inp._values.shape)
return NestedTensor(softmax_values, **extract_kwargs(inp))
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
@register_jagged_func(
torch.ops.aten._softmax_backward_data.default,
"grad_output: jt, output: jt, dim: any, input_dtype: any",
)
def _softmax_backward(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
grad_out = new_kwargs.pop("grad_output")
output = new_kwargs.pop("output")
return NestedTensor(
func(grad_out._values, output._values, **new_kwargs), **extract_kwargs(grad_out)
)
@register_jagged_func(
torch.ops.aten.native_dropout.default, "self: jt, float: any, train: any?"
)
def native_dropout_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
out1, out2 = func(inp._values, **new_kwargs)
return (
NestedTensor(out1, **extract_kwargs(inp)),
NestedTensor(out2, **extract_kwargs(inp)),
)
@register_jagged_func(
torch.ops.aten.native_dropout_backward.default,
"grad_output: jt, mask: jt, scale: any",
)
def native_dropout_backward_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
grad_output = new_kwargs.pop("grad_output")
mask = new_kwargs.pop("mask")
return NestedTensor(
func(grad_output._values, mask._values, **new_kwargs),
**extract_kwargs(grad_output),
)
@register_jagged_func(
torch.ops.aten.prod.dim_int,
"self: jt_all, dim: any, keepdim: any?, dtype: any?",
)
def prod_dim_int(func, *args, **kwargs):
return _apply_reduction(func, "prod", 1, *args, **kwargs)
@register_jagged_func(torch.ops.aten.prod.default, "self: jt_all, dtype: any?")
def prod_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
return func(inp._values, **new_kwargs)
@register_jagged_func(
torch.ops.aten.split.Tensor, "self: jt, split_size: any, dim: any?"
)
def split_tensor(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
new_kwargs["dim"] = _wrap_jagged_dim(
inp.dim(), new_kwargs["dim"], inp._ragged_idx, "split"
)
return tuple(
NestedTensor(values=x, **extract_kwargs(inp))
for x in func(inp._values, **new_kwargs)
)
@register_jagged_func(
torch.ops.aten.split_with_sizes.default, "self: jt, split_sizes: any, dim: any?"
)
def split_with_sizes_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
new_kwargs["dim"] = _wrap_jagged_dim(
inp.dim(), new_kwargs["dim"], inp._ragged_idx, "split_with_sizes"
)
return [
NestedTensor(values=x, **extract_kwargs(inp))
for x in func(inp._values, **new_kwargs)
]
@register_jagged_func(
torch.ops.aten.narrow.default, "self: jt, dim: any, start: any, length: any"
)
def narrow(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
dim = _wrap_jagged_dim(inp.dim(), new_kwargs["dim"], inp._ragged_idx, "narrow")
values = func(
inp._values,
dim=dim,
start=new_kwargs["start"],
length=new_kwargs["length"],
)
return NestedTensor(values, **extract_kwargs(inp))
@register_jagged_func(torch.ops.aten.chunk.default, "self: jt, chunks: any, dim: any?")
def chunk_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
new_kwargs["dim"], operating_on_batch = _wrap_jagged_dim(
inp.dim(), new_kwargs["dim"], inp._ragged_idx, "chunk", allow_batch_dim=True
)
if operating_on_batch:
chunks = new_kwargs["chunks"]
# get _offsets of the chunks
lengths = inp._offsets.diff()
chunked_lengths = lengths.chunk(chunks)
chunked_offsets = [torch.cumsum(x, dim=0) for x in chunked_lengths]
chunked_offsets = [F.pad(x, (1, 0), value=0) for x in chunked_offsets] # type: ignore[arg-type]
nested_kwargs = [
{"offsets": per_offsets, "_ragged_idx": inp._ragged_idx}
for per_offsets in chunked_offsets
]
# get _values of the chunks
split_sizes = [x.sum().item() for x in chunked_lengths]
chunk_values = inp._values.split(split_sizes)
# Note that the actual number of chunks returned is not necessarily the same as
# the input number; it can be counter-intuitive, but it matches dense behavior.
return [
NestedTensor(values=chunk_values[i], **(nested_kwargs[i]))
for i in range(0, len(chunk_values))
]
else:
return [
NestedTensor(values=x, **extract_kwargs(inp))
for x in func(inp._values, **new_kwargs)
]
@register_jagged_func(torch.ops.aten.unbind.int, "self: jt_all, dim: any?")
def unbind_int(func, *args, **kwargs):
# Note that this specializes on the length of the offsets
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
dim = new_kwargs["dim"]
if dim != 0:
raise RuntimeError("unbind(): only supported for NestedTensor on dim=0")
inp = new_kwargs.pop("input")
values = inp.values()
offsets = inp.offsets()
lengths = inp.lengths()
ragged_idx = inp._ragged_idx
def _torch_check(_lengths: list[int], _offsets: Optional[list[int]] = None):
# This torch._check and torch._check_is_size are needed for torch.compile
# symbolic shapes processing.
# offsets and lengths are symbolic variables during compilation,
# we guarantee the correct offsets/lengths correspondence:
# sum of lengths <= total ragged_dim_size
# every length and offset are size-like variable (allows sym shapes to reason it as [2, inf))
# offset[i] + length[i] <= ragged_dim_size, for unbind and split dim correctness
# offsets[i] <= ragged_dim_size
lengths_sum = 0
ragged_dim_size = values.shape[ragged_idx - 1]
for i in range(len(_lengths)):
torch._check_is_size(_lengths[i])
torch._check(_lengths[i] <= ragged_dim_size)
lengths_sum += _lengths[i]
if _offsets is not None:
torch._check(
_offsets[i] + _lengths[i] <= ragged_dim_size,
lambda: "unbind(): nested tensor offsets and lengths do not match ragged_idx dimension",
)
torch._check(lengths_sum <= ragged_dim_size)
if _offsets is not None:
for i in range(len(_offsets)):
torch._check_is_size(_offsets[i])
torch._check(_offsets[i] <= ragged_dim_size)
if lengths is None:
lengths_scalars = offsets.diff().tolist()
_torch_check(lengths_scalars)
return torch.split(values, lengths_scalars, dim=(ragged_idx - 1))
if ragged_idx <= 0:
raise RuntimeError(
"unbind(): nested tensor ragged_idx out of bounds (should be >= 1)"
)
lengths_scalars = lengths.tolist()
offsets_scalars = offsets.tolist()
_torch_check(lengths_scalars, offsets_scalars)
return [
torch.narrow(
values,
dim=(ragged_idx - 1),
start=offsets_scalars[i],
length=lengths_scalars[i],
)
for i in range(lengths.shape[0])
]
@register_jagged_func(torch.ops.aten.squeeze.dim, "self: jt, dim: any")
def squeeze_dim(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
values = inp._values
new_kwargs["dim"] = _wrap_jagged_dim(
len(inp._size), new_kwargs["dim"], inp._ragged_idx, "squeeze"
)
return NestedTensor(func(values, **new_kwargs), **extract_kwargs(inp))
@register_jagged_func(torch.ops.aten.unsqueeze.default, "self: jt_all, dim: any")
def unsqueeze_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
values = inp._values
# Account for collapsed jagged dim
dim = new_kwargs["dim"]
new_kwargs["dim"] = _wrap_jagged_dim(
len(inp._size) + 1, dim, inp._ragged_idx, "unsqueeze", allow_ragged_dim=True
)
# ragged_idx changes if a dimension is added before it
output_kwargs = extract_kwargs(inp)
if new_kwargs["dim"] <= inp._ragged_idx - 1:
output_kwargs["_ragged_idx"] += 1
return NestedTensor(func(values, **new_kwargs), **output_kwargs)
@register_jagged_func(torch.ops.aten.cat.default, "tensors: any, dim: any")
def cat_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
tensors = new_kwargs.pop("tensors")
# Convert any non-nested to nested
nested = [t for t in tensors if t.is_nested]
assert len(nested) > 0
first = nested[0]
tensors = [t if t.is_nested else t.expand_as(first) for t in tensors]
# Account for collapsed jagged dim
dim = new_kwargs["dim"]
new_kwargs["dim"] = _wrap_jagged_dim(
len(first.shape), dim, first._ragged_idx, "cat"
)
return NestedTensor(
func([t._values for t in tensors], **new_kwargs), **extract_kwargs(tensors[0])
)
@register_jagged_func(torch.ops.aten.matmul.default, "self: any, other: any")
def matmul_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
other = new_kwargs.pop("other")
def _unbind_impl(a, b):
return [
func(a_comp, b_comp) for (a_comp, b_comp) in zip(a.unbind(), b.unbind())
]
def _padded_impl(a, b):
if a.is_nested:
nt = a
else:
nt = b
from .nested_tensor import nested_from_padded
min_seqlen = nt._maybe_min_seqlen
max_seqlen = nt._maybe_max_seqlen
padded_max_S = max_seqlen
total_L = nt._values.shape[nt._ragged_idx - 1]
if padded_max_S is None:
# use upper bound on max seqlen if it's not present
padded_max_S = total_L
padded_shape = (
*nt.shape[: nt._ragged_idx],
padded_max_S,
*nt.shape[nt._ragged_idx + 1 :],
)
padded_nt = nt.to_padded_tensor(0.0, output_size=padded_shape)
if a.is_nested:
padded_t = func(padded_nt, b)
else:
padded_t = func(a, padded_nt)
return nested_from_padded(
padded_t,
offsets=nt._offsets,
ragged_idx=nt._ragged_idx,
sum_S=total_L,
min_seqlen=min_seqlen,
max_seqlen=max_seqlen,
)
# TODO: Back these with proper kernels (e.g. grouped GEMM)
# NJT x dense
if inp.is_nested and not other.is_nested:
# (B, j1, D) x (B, D, E) => (B, j1, E)
if (
inp.dim() >= 3
and inp.dim() == other.dim()
and inp._ragged_idx < inp.dim() - 1
):
# convert to padded for this
return _padded_impl(inp, other)
# Support broadcasting the dense:
# (B, j1, D) x (D, E) => (B, j1, E)
# (B, j1, D, E) x (E, F) => (B, j1, D, F)
# etc.
elif (
other.dim() == 2
and inp.dim() > other.dim()
and inp._ragged_idx < inp.dim() - 1
):
return NestedTensor(
func(inp._values, other, **new_kwargs), **extract_kwargs(inp)
)
# Dense x NJT
elif not inp.is_nested and other.is_nested:
# (B, D, E) x (B, E, j1) => (B, E, j1)
if other.dim() >= 3 and other.dim() == inp.dim() and other._ragged_idx >= 2:
# convert to padded for this
return _padded_impl(inp, other)
# Support broadcasting the dense:
# (D, E) x (B, E, j1) => (B, D, j1)
# (D, E) x (B, E, j1, F) => (B, D, j1, F)
# etc.
elif inp.dim() == 2 and other.dim() > inp.dim() and other._ragged_idx >= 2:
return NestedTensor(
func(inp, other._values, **new_kwargs), **extract_kwargs(other)
)
# NJT x NJT
elif inp.is_nested and other.is_nested:
# Support ragged batch dim:
# (B, j1, D, E) x (B, j1, E, F) => (B, j1, D, F), etc.
if inp.dim() > 3 and other.dim() > 3 and raggedness_matches(inp, other._size):
return NestedTensor(func(inp._values, other._values), **extract_kwargs(inp))
# Support reducing over ragged with dense output:
# (B, D, j1) x (B, j1, E) => (B, D, E)
elif (
inp.dim() == 3
and other.dim() == 3
and inp._ragged_idx == 2
and other._ragged_idx == 1
and inp.size(inp._ragged_idx) == other.size(other._ragged_idx)
):
# do unbind for this; can't use padded conversion due to j1 in last dim
return torch.stack(_unbind_impl(inp, other))
raise RuntimeError(
f"matmul(): not supported between inputs of shapes {inp._size} and {other.shape}"
)
@register_jagged_func(torch.ops.aten.bmm.default, "self: jt_all, mat2: any")
def bmm_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
other = new_kwargs.pop("mat2")
if inp.dim() != 3:
raise ValueError("bmm(): input must be 3D")
if other.dim() != 3:
raise ValueError("bmm(): mat2 must be 3D")
return matmul_default(torch.ops.aten.matmul.default, inp, other)
@register_jagged_func(
torch.ops.aten.expand.default, "self: jt_all, size: any, implicit: any?"
)
def expand_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
size = new_kwargs["size"]
assert ("implicit" not in new_kwargs) or (not new_kwargs.pop("implicit"))
if not raggedness_matches(inp, size):
raise RuntimeError(f"expand(): cannot expand shape {inp._size} -> {size}")
expand_arg = [-1 if d == inp._ragged_idx else size[d] for d in range(1, inp.dim())]
return NestedTensor(func(inp._values, expand_arg), **extract_kwargs(inp))
@register_jagged_func(torch.ops.aten.expand_as.default, "self: t, other: jt")
def expand_as_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
other = new_kwargs.pop("other")
return NestedTensor(func(inp, other._values), **extract_kwargs(other))
@register_jagged_func(torch.ops.aten.broadcast_to.default, "self: jt_all, size: any")
def broadcast_to(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
size = new_kwargs.pop("size")
if len(size) <= inp.dim():
return inp.expand([*(1 for _ in range(inp.dim() - len(size))), *size])
raise ValueError(
"broadcast_to(): broadcasting to a higher-dim shape is currently not supported "
"for nested tensors with the jagged layout"
)
@register_jagged_func(torch.ops.aten.broadcast_tensors.default, "tensors: any")
def broadcast_tensors(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
tensors = new_kwargs.pop("tensors")
if len(tensors) == 0:
raise ValueError("broadcast_tensors(): expected at least one tensor input")
if len(tensors) == 1:
return tensors[0]
outs = []
broadcast_shape = torch.broadcast_shapes(*(t.shape for t in tensors))
# Pull out the first NJT. If broadcast_shapes() worked, the nested ints are compatible.
njt = next(t for t in tensors if isinstance(t, NestedTensor))
for t in tensors:
if t.is_nested:
outs.append(t.broadcast_to(broadcast_shape))
elif t.dim() < len(broadcast_shape):
outs.append(
NestedTensor(t.broadcast_to(njt._values.shape), **extract_kwargs(njt))
)
else:
raise ValueError(
"broadcast_tensors(): broadcasting nested tensors with dense tensors of equal "
"or higher dim is not currently supported"
)
return tuple(outs)
@register_jagged_func(
torch.ops.aten.where.self, "condition: jt_all, self: any, other: any"
)
def where_self(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
condition = new_kwargs.pop("condition")
inp = new_kwargs.pop("input")
other = new_kwargs.pop("other")
# if the tensors aren't compatible, broadcast_tensors() will let us know
condition, inp, other = torch.broadcast_tensors(condition, inp, other)
return NestedTensor(
func(condition._values, inp._values, other._values, **new_kwargs),
**extract_kwargs(condition),
)
@register_jagged_func(torch.ops.aten._pin_memory.default, "self: jt, device: any?")
def _pin_memory_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
@register_jagged_func(torch.ops.aten.is_pinned.default, "self: jt, device: any?")
def is_pinned_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
return func(inp._values, **new_kwargs)
@register_jagged_func(
torch.ops.aten.is_same_size.default, "self: jt_all, other: jt_all"
)
def is_same_size_default(func, *args, **kwargs):
return args[0]._size == args[1]._size
def _apply_reduction(func, func_name, identity_element, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
# some ops use dim=None to indicate a full reduction; some use an empty dim list
full_reduction = new_kwargs["dim"] is None or (
isinstance(new_kwargs["dim"], (tuple, list)) and len(new_kwargs["dim"]) == 0
)
if full_reduction:
out = func(inp._values, **new_kwargs)
if new_kwargs.get("keepdim", False):
if isinstance(out, (tuple, list)):
# some ops return multiple things; unsqueeze all of them
out = type(out)(o.unsqueeze(inp._ragged_idx) for o in out)
else:
out = out.unsqueeze(inp._ragged_idx)
return out
# some ops support lists of dims; some don't
dim_to_convert = new_kwargs["dim"]
is_dimlist = isinstance(new_kwargs["dim"], (tuple, list))
if not is_dimlist:
dim_to_convert = [dim_to_convert]
(
converted_dim,
reduce_on_batch,
reduce_on_ragged,
reduce_on_non_batch,
) = _wrap_jagged_dims(
inp.dim(),
dim_to_convert,
f"{func_name}",
inp._ragged_idx,
)
if not is_dimlist:
# convert back from list
converted_dim = converted_dim[0]
new_kwargs["dim"] = converted_dim
if reduce_on_ragged and inp._lengths is not None:
raise RuntimeError(
f"{func_name}(): reducing across the ragged dimension is not supported "
"for non-contiguous nested tensors with holes"
)
from torch.utils._pytree import tree_map
# raggedness reduced away --> return dense tensor
if reduce_on_ragged:
# reduction cases: (batch, ragged), (batch, ragged, non-batch), etc.
if reduce_on_batch:
# no need to read offsets --> apply sum directly on values
out = func(inp._values, **new_kwargs)
if new_kwargs.get("keepdim", False):
# some ops return multiple things; unsqueeze all of them
out = tree_map(lambda o: o.unsqueeze(0), out)
return out
else:
# invalid reduction cases: (ragged, non-batch), etc.
if reduce_on_non_batch:
raise RuntimeError(
f"{func_name}(): reducing along a ragged and non-batch dimension "
"is not supported for nested tensors"
)
# reduction cases: (ragged)
# convert to padded dense and reduce
new_kwargs.pop("dim")
dim_to_pass = [inp._ragged_idx] if is_dimlist else inp._ragged_idx
return func(
inp.to_padded_tensor(identity_element), dim=dim_to_pass, **new_kwargs
)
# raggedness preserved --> return nested tensor
else:
# invalid reduction cases: (batch), (batch, non-batch), etc.
if reduce_on_batch:
raise RuntimeError(
f"{func_name}(): reducing along the batch dimension but not "
"the ragged dimension is not supported for nested tensors"
)
# reduction cases: (non-batch), (non-batch, non-batch), etc.
# apply sum directly on values
out = func(inp._values, **new_kwargs)
out_kwargs = extract_kwargs(inp)
if not new_kwargs.get("keepdim", False):
# dims are reduced away -> ragged_idx of output needs to be reevaluated
dimlist = (
new_kwargs["dim"]
if isinstance(new_kwargs["dim"], (tuple, list))
else [new_kwargs["dim"]]
)
for d in dimlist:
# adjust for all dims reduced before the ragged dim
if d < inp._ragged_idx - 1:
out_kwargs["_ragged_idx"] -= 1
# some ops return multiple things; wrap each of them as an NJT
return tree_map(lambda o: NestedTensor(o, **out_kwargs), out)
@register_jagged_func(torch.ops.aten.sum.default, "self: jt_all, dtype: any?")
def sum_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
return func(inp._values, **new_kwargs)
@register_jagged_func(
torch.ops.aten.sum.dim_IntList,
"self: jt_all, dim: any?, keepdim: any?, dtype: any?",
)
def sum_dim_IntList(func, *args, **kwargs):
return _apply_reduction(func, "sum", 0, *args, **kwargs)
@register_jagged_func(
torch.ops.aten.transpose.int, "self: jt_all, dim0: any, dim1: any"
)
def transpose_int(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
from torch._prims_common import canonicalize_dims
inp = new_kwargs.pop("input")
dim0, dim1 = canonicalize_dims(inp.dim(), (new_kwargs["dim0"], new_kwargs["dim1"]))
# To support the SDPA API, inputs need to have the ragged idx transposed to dim 2
# instead of 1, although the internal Flash and mem-effn implementations will
# use the inputs with raggedness in dim 1.
if dim0 == inp._ragged_idx or dim1 == inp._ragged_idx:
if dim0 == 0 or dim1 == 0:
raise ValueError(
"Transpose is not supported on the batch dimension for jagged NT"
)
if dim0 == inp._ragged_idx:
to_dim = dim1
else:
to_dim = dim0
inp_kwargs = extract_kwargs(inp)
inp_kwargs["_ragged_idx"] = to_dim
return NestedTensor(
inp.values().transpose(
_outer_to_inner_dim(len(inp._size), dim0, inp._ragged_idx),
_outer_to_inner_dim(len(inp._size), dim1, inp._ragged_idx),
),
**inp_kwargs,
)
new_kwargs["dim0"] = _wrap_jagged_dim(
inp.dim(), new_kwargs["dim0"], inp._ragged_idx, "transpose"
)
new_kwargs["dim1"] = _wrap_jagged_dim(
inp.dim(), new_kwargs["dim1"], inp._ragged_idx, "transpose"
)
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
@register_jagged_func(torch.ops.aten.permute.default, "self: jt_all, dims: any")
def permute_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
dims = new_kwargs.pop("dims")
inp_kwargs = extract_kwargs(inp)
inp_dim = len(inp._size)
# The first two checks are the same as the checks in the normal permute implementation
if inp_dim != len(dims):
raise ValueError(
f"permute(): number of dimensions in the tensor input ({inp_dim}) "
+ f"does not match the length of the desired ordering of dimensions ({len(dims)}).",
)
from torch._prims_common import canonicalize_dims
canonicalized_dims = canonicalize_dims(inp_dim, dims)
if len(canonicalized_dims) != len(set(canonicalized_dims)):
raise ValueError("permute(): duplicate dims are not allowed.")
if inp._lengths is not None:
raise ValueError(
"permute(): not supported on jagged layout nested tensor with holes"
)
if canonicalized_dims[0] != 0:
raise ValueError(
"Permute is not supported on the batch dimension for jagged NT"
)
inp_kwargs["_ragged_idx"] = canonicalized_dims.index(inp._ragged_idx)
inner_dims = [
_outer_to_inner_dim(inp_dim, dim, inp._ragged_idx)
for dim in canonicalized_dims[1:]
]
new_kwargs["dims"] = inner_dims
return NestedTensor(func(inp._values, **new_kwargs), **inp_kwargs)
@register_jagged_func(
[torch.ops.aten.view.default, torch.ops.aten._unsafe_view.default],
"self: jt_all, size: any",
)
def view_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
size = new_kwargs.pop("size")
if inp._ragged_idx != 1 and tuple(inp._size) != tuple(size):
raise RuntimeError(
f"view(): does not support ragged_idx != 1 except when inp._size == size. "
f"inp._size is ({inp._size}) and size is ({size})."
)
# Ensure specified size still includes batch and ragged dims
if len(size) < 3 or not raggedness_matches(inp, size):
raise RuntimeError(f"view(): cannot view shape {inp._size} as {size}")
# outer size: the size of the NT, e.g. [3, j0, 10]
# inner size: the size of the values, e.g. [8, 10] (e.g. for offsets = [0, 3, 5, 8])
# this function gets inner_size[inner_idx] for a given inner_idx.
#
# example: for outer size [a, b, c, j0, d, e, f]
# assume that j0 is ragged, other are concrete integers
# and ragged_idx=3
# inner size will be [b, c, inp._values.size(ragged_idx), d, e, f]
# therefore:
# inner_size[0] = outer_size[1]
# inner_size[1] = outer_size[2]
# inner_size[0] = inp._values.size(ragged_idx - 1)
# inner_size[3] = outer_size[4]
# inner_size[4] = outer_size[5]
def get_inner_size(inner_idx):
nonlocal inp, size
if inner_idx == inp._ragged_idx - 1:
return inp._values.size(inner_idx)
else:
return size[inner_idx + 1]
inner_size = [get_inner_size(i) for i in range(len(size) - 1)]
# Preserve inference-mode-ness of input.
# TODO: Do this for all other views!
with torch.inference_mode(inp.is_inference()):
return NestedTensor(func(inp._values, inner_size), **extract_kwargs(inp))
@register_jagged_func(
torch.ops.aten.native_layer_norm.default,
"input: jt_all, normalized_shape: any, weight: any?, bias: any?, eps: any",
)
def native_layer_norm_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
if inp.dim() <= 2:
raise RuntimeError(
"layer_norm(): not supported for NestedTensor objects with 2 or fewer dimensions"
)
normalized_shape = new_kwargs["normalized_shape"]
ragged_size = inp.shape[inp._ragged_idx]
num_dims_not_normalized = inp.dim() - len(normalized_shape)
if (
num_dims_not_normalized == 0
): # error if trying to normalize over the batch dimension
raise RuntimeError(
"layer_norm(): not supported when normalizing over the batch dimension for NestedTensor"
)
if ragged_size in normalized_shape and inp._lengths is not None:
raise RuntimeError(
"layer_norm(): not supported where lengths is not None if operating on the ragged dimension for NestedTensor"
)
if (
ragged_size in normalized_shape
): # special handling for normalizing over the ragged dimension
padded_input = torch.ops.aten._jagged_to_padded_dense_forward(
inp._values.flatten(
start_dim=inp._ragged_idx
), # _jagged_to_padded_dense_forward requires values to be a 2D tensor
[inp._offsets],
max_lengths=[inp._max_seqlen], # max length of ragged dimension
)
padded_mask = torch.ops.aten._jagged_to_padded_dense_forward(
torch.ones((inp._values.shape[0], 1), device=inp.device, dtype=inp.dtype),
[inp._offsets],
max_lengths=[inp._max_seqlen], # max length of ragged dimension
).expand(
padded_input.shape
) # mask elements outside of the ragged dimension and expand to the same shape as padded input (3D dense tensor)
ragged_lengths = (
inp._offsets.diff().unsqueeze(1).unsqueeze(1) * padded_input.shape[2]
) # ragged dim * inner dim, since we sum over dims (1, 2) (the layer on which we normalize)
mean = (
torch.sum(
padded_input,
dim=(1, 2),
keepdim=True,
)
/ ragged_lengths
) # a sum over (1, 2) ensures layer norm, whereas a sum over (1) would be an instance norm
padded_normalized = (
padded_input - mean
) * padded_mask # mask elements outside of the ragged dimension size for correct variance calculation
variance = (
torch.sum(
torch.square(padded_normalized),
dim=(1, 2),
keepdim=True,
)
/ ragged_lengths
) # a sum over (1, 2) ensures layer norm, whereas a sum over (1) would be an instance norm
std = torch.sqrt(variance + new_kwargs["eps"])
padded_layer_norm = padded_normalized / std
jagged_layer_norm_values = torch.ops.aten._padded_dense_to_jagged_forward(
padded_layer_norm,
[inp._offsets],
total_L=inp._values.shape[
0
], # providing this parameter helps avoid a GPU/CPU sync
).unflatten(
-1, inp.shape[inp._ragged_idx + 1 :]
) # unflatten last dimension back into original nested tensor shape, e.g. (B, *, WH) --> (B, *, W, H)
return (
NestedTensor(jagged_layer_norm_values, **extract_kwargs(inp)),
mean,
std,
)
output, mean, std = func(inp._values, **new_kwargs)
return (NestedTensor(output, **extract_kwargs(inp)), mean, std)
@register_jagged_func(
torch.ops.aten.native_layer_norm_backward.default,
"grad_out: jt, input: jt, normalized_shape: any, mean: any, rstd: any, weight: any?, bias: any?, output_mask: any",
)
def native_layer_norm_backward_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
grad_out = new_kwargs.pop("grad_out")
inp = new_kwargs.pop("input")
d_input, d_gamma, d_beta = func(grad_out._values, inp._values, **new_kwargs)
if d_input is None:
return (None, d_gamma, d_beta)
return (NestedTensor(d_input, **extract_kwargs(inp)), d_gamma, d_beta)
@register_jagged_func(torch.ops.aten.select.int, "self: jt_all, dim: any, index: any")
def select_int(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
new_kwargs["dim"], operating_on_batch = _wrap_jagged_dim(
inp.dim(), new_kwargs["dim"], inp._ragged_idx, "select", allow_batch_dim=True
)
# handle batch dim slicing via unbind() for now
# TODO: make this more efficient
if operating_on_batch:
return inp.unbind()[new_kwargs["index"]]
if inp._lengths is not None:
raise ValueError(
"select(): not yet supported on dim != 0 for non-contiguous nested tensor with holes"
)
# if selecting before the ragged dim, adjust output ragged_idx
out_kwargs = extract_kwargs(inp)
if new_kwargs["dim"] < inp._ragged_idx - 1:
out_kwargs["_ragged_idx"] -= 1
return NestedTensor(func(inp._values, **new_kwargs), **out_kwargs)
@register_jagged_func(
torch.ops.aten.slice.Tensor,
"self: jt, dim: any?, start: any?, end: any?, step: any?",
)
def slice_tensor(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
new_kwargs["dim"] = _wrap_jagged_dim(
inp.dim(), new_kwargs["dim"], inp._ragged_idx, "slice"
)
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
@register_jagged_func(
torch.ops.aten.index_put.default,
"input: jt_all, indices: any, values: t, accumulate: any?",
)
@register_jagged_func(
torch.ops.aten.index_put_.default,
"input: jt_all, indices: any, values: t, accumulate: any?",
)
def index_put_(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp: NestedTensor = new_kwargs.pop("input")
# For index_put_ to work, we add together the indices of the ragged dimension
# and the batch dimension, adding the offsets of each ragged dimension to its
# indices
indices = new_kwargs.pop("indices")
assert len(indices) <= inp.dim()
if len(indices) < inp._ragged_idx + 1:
if not inp.is_contiguous():
raise RuntimeError(
"index_put(): If ragged dimension is not part of indices, this only works on contiguous NJTs"
)
# Ragged dim is NOT part of indices, we need to pad the nested tensor to apply func
from .nested_tensor import nested_from_padded
min_seqlen = inp._maybe_min_seqlen
max_seqlen = inp._maybe_max_seqlen
padded_max_S = max_seqlen
total_L = inp._values.shape[inp._ragged_idx - 1]
if padded_max_S is None:
# use upper bound on max seqlen if it's not present
padded_max_S = total_L
padded_shape = (
*inp.shape[: inp._ragged_idx],
padded_max_S,
*inp.shape[inp._ragged_idx + 1 :],
)
padded_inp = inp.to_padded_tensor(0.0, output_size=padded_shape)
new_njt = nested_from_padded(
func(padded_inp, indices, **new_kwargs),
offsets=inp._offsets,
ragged_idx=inp._ragged_idx,
sum_S=total_L,
min_seqlen=min_seqlen,
max_seqlen=max_seqlen,
)
if func == torch.ops.aten.index_put_.default:
inp._values.copy_(new_njt.values())
return inp
return new_njt
# We can run on the underlying values directly
# Validate indices
if inp.lengths() is None:
lengths = inp.offsets().diff()
else:
lengths = inp.lengths()
torch._assert_async(
torch.all(indices[inp._ragged_idx] < lengths),
"Some indices in the ragged dimension are out of bounds!",
)
# Recompute indices for _values
ragged_indices = inp.offsets()[indices[0]] + indices[inp._ragged_idx]
func_indices = (
# before ragged dim
indices[1 : inp._ragged_idx]
# ragged dim (combined with batch)
+ [ragged_indices]
# after ragged dim
+ indices[inp._ragged_idx + 1 :]
)
if func == torch.ops.aten.index_put_.default:
inp._values = func(inp._values, func_indices, **new_kwargs)
return inp
return NestedTensor(
func(inp._values, func_indices, **new_kwargs),
**extract_kwargs(inp),
)
@register_jagged_func(
torch.ops.aten.convolution.default,
"input: jt, weight: t, bias: t?, stride: any, padding: any, "
"dilation: any, transposed: any, output_padding: any, groups: any",
)
def convolution_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
@register_jagged_func(
torch.ops.aten.mean.dim, "self: jt_all, dim: any?, keepdim: any?, dtype: any?"
)
def mean_dim(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs["input"]
(_, reduce_on_batch, reduce_on_ragged, reduce_on_non_batch) = _wrap_jagged_dims(
inp.dim(),
new_kwargs["dim"],
"mean",
inp._ragged_idx,
)
if reduce_on_ragged and not reduce_on_batch:
assert not reduce_on_non_batch
# calculate an intermediate sum and leave the dim in for normalization purposes
keepdim = new_kwargs["keepdim"]
new_kwargs["keepdim"] = True
intermediate_sum = _apply_reduction(
torch.ops.aten.sum.dim_IntList, "mean", 0, **new_kwargs
)
# normalize by sequence lengths
lengths = inp._lengths if inp._lengths is not None else inp._offsets.diff()
for _ in range(intermediate_sum.dim() - 1):
lengths = lengths.unsqueeze(-1)
out = intermediate_sum / lengths
if not keepdim:
out = out.squeeze(inp._ragged_idx)
return out
# at this point, we're just redispatching on the values buffer
# since we expect it to be unused, specify a weird intermediate value to
# hopefully make errors obvious
intermediate_value = 0.42
return _apply_reduction(func, "mean", intermediate_value, **new_kwargs)
@register_jagged_func(torch.ops.aten.mean.default, "self: jt_all, dtype: any?")
def mean_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
return func(inp._values, **new_kwargs)
@register_jagged_func(torch.ops.aten.any.dims, "self: jt_all, dim: any?, keepdim: any?")
def any_dims(func, *args, **kwargs):
return _apply_reduction(func, "any", False, *args, **kwargs)
@register_jagged_func(torch.ops.aten.any.dim, "self: jt_all, dim: any, keepdim: any?")
def any_dim(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
# wrap dim in list to redispatch to dims overload
new_kwargs["dim"] = [new_kwargs["dim"]]
return any_dims(torch.ops.aten.any.dims, **new_kwargs)
@register_jagged_func(torch.ops.aten.all.dims, "self: jt_all, dim: any?, keepdim: any?")
def all_dims(func, *args, **kwargs):
return _apply_reduction(func, "all", True, *args, **kwargs)
@register_jagged_func(torch.ops.aten.all.dim, "self: jt_all, dim: any, keepdim: any?")
def all_dim(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
# wrap dim in list to redispatch to dims overload
new_kwargs["dim"] = [new_kwargs["dim"]]
return all_dims(torch.ops.aten.all.dims, **new_kwargs)
@register_jagged_func(
[
torch.ops.aten.all.default,
torch.ops.aten.any.default,
torch.ops.aten.max.default,
torch.ops.aten.min.default,
],
"self: jt_all",
)
def all_any_max_min_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
return func(inp._values, **new_kwargs)
@register_jagged_func(torch.ops.aten.min.dim, "self: jt_all, dim: any, keepdim: any?")
def min_dim(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
dtype_max = torch.finfo(new_kwargs["input"].dtype).max
return _apply_reduction(func, "min", dtype_max, *args, **kwargs)
@register_jagged_func(torch.ops.aten.max.dim, "self: jt_all, dim: any, keepdim: any?")
def max_dim(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
dtype_min = torch.finfo(new_kwargs["input"].dtype).min
return _apply_reduction(func, "max", dtype_min, *args, **kwargs)
@register_jagged_func(
torch.ops.aten.amin.default, "self: jt_all, dim: any?, keepdim: any?"
)
def amin_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
dtype_max = torch.finfo(new_kwargs["input"].dtype).max
return _apply_reduction(func, "amin", dtype_max, *args, **kwargs)
@register_jagged_func(
torch.ops.aten.amax.default, "self: jt_all, dim: any?, keepdim: any?"
)
def amax_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
dtype_min = torch.finfo(new_kwargs["input"].dtype).min
return _apply_reduction(func, "amax", dtype_min, *args, **kwargs)
@register_jagged_func(
torch.ops.aten.argmin.default, "self: jt_all, dim: any?, keepdim: any?"
)
def argmin_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
dtype_max = torch.finfo(new_kwargs["input"].dtype).max
return _apply_reduction(func, "argmin", dtype_max, *args, **kwargs)
@register_jagged_func(
torch.ops.aten.argmax.default, "self: jt_all, dim: any?, keepdim: any?"
)
def argmax_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
dtype_min = torch.finfo(new_kwargs["input"].dtype).min
return _apply_reduction(func, "argmax", dtype_min, *args, **kwargs)
@register_jagged_func(
torch.ops.aten.value_selecting_reduction_backward.default,
"grad: jt_all, dim: any, indices: jt_all, sizes: any, keepdim: any",
)
def value_selecting_reduction_backward_default(func, *args, **kwargs):
from torch.fx.experimental.symbolic_shapes import is_nested_int
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
grad = new_kwargs.pop("grad")
new_kwargs["grad"] = grad._values
indices = new_kwargs.pop("indices")
new_kwargs["indices"] = indices._values
# should always succeed; sizes should contain a nested int
ragged_idx = next(i for i, s in enumerate(new_kwargs["sizes"]) if is_nested_int(s))
# convert dim -> values-space dim
new_kwargs["dim"] = _wrap_jagged_dim(
len(new_kwargs["sizes"]),
new_kwargs["dim"],
ragged_idx,
"value_selecting_reduction_backward",
)
# convert saved NJT sizes -> values-space sizes
sizes = new_kwargs.pop("sizes")
sizes[ragged_idx] = indices._values.size(indices._ragged_idx - 1)
sizes = sizes[1:]
new_kwargs["sizes"] = sizes
output_kwargs = extract_kwargs(indices)
output_kwargs["_ragged_idx"] = ragged_idx
return NestedTensor(func(**new_kwargs), **output_kwargs)
@register_jagged_func(torch.ops.aten.stack.default, "tensors: any, dim: any")
def stack_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
# guaranteed this is non-empty if we got here
tensors = new_kwargs.pop("tensors")
for t in tensors:
if not isinstance(t, NestedTensor):
raise RuntimeError("stack(): expected all nested tensors inputs")
if t.dim() != tensors[0].dim():
raise RuntimeError(
"stack(): expected all nested tensors to have the same dim"
)
if not raggedness_matches(t, tensors[0].shape):
raise RuntimeError(
"stack(): expected all nested tensors to have the same nested structure"
)
new_kwargs["dim"] = _wrap_jagged_dim(
tensors[0].dim() + 1, new_kwargs["dim"], tensors[0]._ragged_idx, "stack"
)
return NestedTensor(
func([t._values for t in tensors], **new_kwargs), **extract_kwargs(tensors[0])
)
@register_jagged_func(
torch.ops.aten.embedding.default,
"weight: t, indices: jt, padding_idx: any?, scale_grad_by_freq: any?, sparse: any?",
)
def embedding_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
# guaranteed this is non-empty if we got here
indices = new_kwargs.pop("indices")
weight = new_kwargs.pop("weight")
return NestedTensor(
func(weight, indices._values, **new_kwargs), **extract_kwargs(indices)
)
@register_jagged_func(
torch.ops.aten.embedding_dense_backward.default,
"grad_output: jt, indices: jt, num_weights: any, padding_idx: any, scale_grad_by_freq: any",
)
def embedding_dense_backward_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
indices = new_kwargs.pop("indices")
grad_output = new_kwargs.pop("grad_output")
return func(grad_output._values, indices._values, **new_kwargs)
@register_jagged_func(
[
torch.ops.aten.values.default,
torch.ops.aten._nested_get_values.default,
],
"self: jt_all",
)
def values_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
# TODO: Handle inference mode properly.
# See https://github.com/pytorch/pytorch/issues/112024#issuecomment-1779554292
return inp._values.detach()
@register_jagged_func(torch.ops.aten.all.default, "self: jt_all")
def all_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
return func(inp._values)
@register_jagged_func(
torch.ops.aten.to_padded_tensor.default,
"self: jt_all, padding: any, output_size: any?",
)
def to_padded_tensor_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
if inp._lengths is not None:
raise RuntimeError(
"to_padded_tensor(): not supported for nested tensors with holes"
)
# TODO: Handle the rest of output_size
output_size = new_kwargs["output_size"]
if output_size is not None:
max_seq_len = output_size[inp._ragged_idx]
else:
max_seq_len = (
inp._max_seqlen
if inp._max_seqlen_tensor is not None
else inp._values.size(0)
)
# only 2D values with ragged packed dim=0 is supported by the underlying FBGEMM
# kernel so do shape gymnastics if needed
values = inp.values()
if inp._ragged_idx > 1:
values = values.transpose(inp._ragged_idx - 1, 0)
values_shape = values.shape
if values.dim() > 2:
values = values.flatten(start_dim=1)
elif values.dim() == 1:
values = values.unsqueeze(-1)
# NB: The CUDA kernel for jagged -> padded dense conversion does not support
# integer / bool types; work around this by casting to half.
is_bool = values.dtype is torch.bool
if is_bool and values.is_cuda:
values = values.to(torch.half)
padded_out = torch.ops.aten._jagged_to_padded_dense_forward(
values,
[inp._offsets],
[max_seq_len],
new_kwargs["padding"],
)
if is_bool and padded_out.is_cuda:
padded_out = padded_out.to(torch.bool)
# shape gymnastics part 2
if len(values_shape) > 2:
padded_out = padded_out.unflatten(-1, values_shape[1:])
elif len(values_shape) == 1:
padded_out = padded_out.squeeze(-1)
if inp._ragged_idx > 1:
padded_out = padded_out.transpose(inp._ragged_idx, 1)
return padded_out
@register_jagged_func(
torch.ops.aten._nested_from_padded_tensor.default,
"padded: t, offsets: t, dummy: jt, ragged_idx: any?, min_seqlen: any?, max_seqlen: any?, sum_S: any?",
)
def _nested_from_padded_tensor_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
padded, offsets = new_kwargs["padded"], new_kwargs["offsets"]
ragged_idx = new_kwargs.get("ragged_idx", 1)
# only 3D padded with ragged packed dim=0 is supported by the underlying FBGEMM
# kernel so do shape gymnastics
if ragged_idx > 1:
padded = padded.transpose(ragged_idx, 1)
padded_ragged_dim1_shape = padded.shape
if padded.dim() > 3:
padded = padded.flatten(start_dim=2)
elif padded.dim() < 3:
padded = padded.unsqueeze(-1)
# NB: The CUDA kernel for padded dense -> jagged conversion does not support
# integer / bool types; work around this by casting to half.
is_bool = padded.dtype is torch.bool
if is_bool and padded.is_cuda:
padded = padded.to(torch.half)
values = torch.ops.aten._padded_dense_to_jagged_forward(
padded, [offsets], new_kwargs["sum_S"]
)
if is_bool and values.is_cuda:
values = values.to(torch.bool)
# shape gymnastics part 2
if len(padded_ragged_dim1_shape) > 3:
values = values.unflatten(-1, padded_ragged_dim1_shape[2:])
elif len(padded_ragged_dim1_shape) < 3:
values = values.squeeze(-1)
if ragged_idx > 1:
values = values.transpose(ragged_idx - 1, 0)
min_seqlen = new_kwargs["min_seqlen"]
max_seqlen = new_kwargs["max_seqlen"]
metadata_cache = {}
if min_seqlen is not None:
metadata_cache["min_seqlen"] = min_seqlen
if max_seqlen is not None:
metadata_cache["max_seqlen"] = max_seqlen
return NestedTensor(
values,
offsets,
_ragged_idx=ragged_idx,
_metadata_cache=metadata_cache,
)
@register_jagged_func(
torch.ops.aten._nested_view_from_jagged.default,
"values: t, offsets: t, dummy: jt_all, lengths: t?, ragged_idx: any?, min_seqlen: t?, max_seqlen: t?",
)
def _nested_view_from_jagged_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
values, offsets, lengths = (
new_kwargs["input"],
new_kwargs["offsets"],
new_kwargs["lengths"],
)
ragged_idx = new_kwargs["ragged_idx"]
min_seqlen = new_kwargs["min_seqlen"]
max_seqlen = new_kwargs["max_seqlen"]
metadata_cache = {}
if min_seqlen is not None:
metadata_cache["min_seqlen"] = min_seqlen
if max_seqlen is not None:
metadata_cache["max_seqlen"] = max_seqlen
return NestedTensor(
values,
offsets,
lengths=lengths,
_ragged_idx=ragged_idx,
_metadata_cache=metadata_cache,
)
@register_jagged_func(torch.ops.aten._nested_get_offsets.default, "self: jt_all")
def _nested_get_offsets(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
return inp._offsets
@register_jagged_func(torch.ops.aten._nested_get_lengths.default, "self: jt_all")
def _nested_get_lengths(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
return inp._lengths
@register_jagged_func(torch.ops.aten._nested_get_ragged_idx.default, "self: jt_all")
def _nested_get_ragged_idx(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
return inp._ragged_idx
@register_jagged_func(torch.ops.aten._nested_get_min_seqlen.default, "self: jt_all")
def _nested_get_min_seqlen(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
return inp._metadata_cache.get("min_seqlen", None)
@register_jagged_func(torch.ops.aten._nested_get_max_seqlen.default, "self: jt_all")
def _nested_get_max_seqlen(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
return inp._metadata_cache.get("max_seqlen", None)
# If a section of the Nested Tensor is fully masked out we still retain the section with a length of 0
@register_jagged_func(torch.ops.aten.masked_select.default, "self: jt, mask: any")
def masked_select_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
mask = new_kwargs.pop("mask")
if inp.ndim > 2:
raise RuntimeError("masked_select only support 2-D selections currently")
elif inp.shape != mask.shape:
raise RuntimeError(
f"Mask with shape {mask.shape} is not compatible with input's shape {inp.shape}"
)
res_values = inp._values.masked_select(mask.values())
mask_cumsum = F.pad(mask.values().cumsum(dim=0), (1, 0)) # type: ignore[arg-type]
args = extract_kwargs(inp)
args["offsets"] = mask_cumsum[inp._offsets]
return NestedTensor(
values=res_values,
**args,
)
@register_jagged_func(
torch.ops.aten._nested_select_backward.default,
"grad_output: t, self: jt_all, dim: any, index: any",
)
def _nested_select_backward_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
grad_output = new_kwargs.pop("grad_output")
grad_input = torch.zeros_like(inp, dtype=grad_output.dtype)
grad_input.select(new_kwargs["dim"], new_kwargs["index"]).copy_(grad_output)
return grad_input
@register_jagged_func(torch.ops.aten.record_stream.default, "self: jt_all, s: any")
def record_stream_default(func, *args, **kwargs):
inp = args[0]
stream = args[1]
# ensure all components live until stream computation completes
func(inp._values, stream)
func(inp._offsets, stream)
if inp._lengths is not None:
func(inp._lengths, stream)
@register_jagged_func(
[
torch.ops.aten.new_empty.default,
torch.ops.aten.new_zeros.default,
torch.ops.aten.new_ones.default,
],
"self: jt_all, size: any, dtype: any?, layout: any?, device: any?, pin_memory: any?",
)
def new_empty_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
if len(new_kwargs["size"]) == 0:
return func(inp._values, **new_kwargs)
raise RuntimeError("new_empty() not supported for NJT with shape != ()")
@register_jagged_func(
[
torch.ops.aten.elu_backward.default,
torch.ops.aten.hardshrink_backward.default,
torch.ops.aten.hardsigmoid_backward.default,
torch.ops.aten.hardtanh_backward.default,
torch.ops.aten.softplus_backward.default,
torch.ops.aten.softshrink_backward.default,
],
"self: jt_all, ...",
)
def activation_backward(func, *args, **kwargs):
# first NJT arg is expected to be grad_output
grad_output = next(arg for arg in args if isinstance(arg, NestedTensor))
return NestedTensor(
func(
*(arg._values if isinstance(arg, NestedTensor) else arg for arg in args),
**kwargs,
),
**extract_kwargs(grad_output),
)
@register_jagged_func(torch.ops.aten.fill.Scalar, "self: jt_all, value: any")
def fill_Scalar(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
@register_jagged_func(torch.ops.aten.fill_.Scalar, "self: jt_all, value: any")
def fill__Scalar(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
func(inp._values, **new_kwargs)
return inp
@register_jagged_func(torch.ops.aten.frexp.Tensor, "self: jt_all")
def frexp_Tensor(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
inp = new_kwargs.pop("input")
output_kwargs = extract_kwargs(inp)
mantissa, exponent = func(inp._values)
return NestedTensor(mantissa, **output_kwargs), NestedTensor(
exponent, **output_kwargs
)
@register_jagged_func(
torch.ops.aten.matmul_backward.default,
"grad: any, self: any, other: any, mask: any",
)
def matmul_backward_default(func, *args, **kwargs):
_, new_kwargs = normalize_function( # type: ignore[misc]
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
grad = new_kwargs.pop("grad")
inp = new_kwargs.pop("input")
other = new_kwargs.pop("other")
grad_input_mask = new_kwargs.pop("mask")
if grad is None:
return (None, None)
grad_self = None
if grad_input_mask[0]:
grad_self = torch.matmul(grad, other.transpose(-1, -2))
grad_other = None
if grad_input_mask[1]:
grad_other = torch.matmul(inp.transpose(-1, -2), grad)
return (grad_self, grad_other)
from torch._higher_order_ops.flex_attention import (
flex_attention as flex_attention_hop,
flex_attention_backward as flex_attention_backward_hop,
)
from torch.fx.graph_module import GraphModule
@flex_attention_hop.py_impl(NestedTensor) # type: ignore[misc]
def flex_njt(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
score_mod: Callable,
block_mask: Tuple,
scale: float,
kernel_options: Dict[str, Any],
score_mod_other_buffers: Tuple = (),
mask_mod_other_buffers: Tuple = (),
) -> Tuple[torch.Tensor, torch.Tensor]:
assert query.dim() == 4 and key.dim() == 4 and value.dim() == 4
# TODO: Support this if needed; determine if NJT buffers need be unwrapped as dense.
if any(
isinstance(buf, torch.Tensor) and buf.is_nested
for buf in score_mod_other_buffers + mask_mod_other_buffers
):
raise RuntimeError(
"flex_attention(): Nested tensor score_mod / mask_mod buffers are not "
"currently supported. Please file an issue if this is important to you."
)
# need to pass dense tensor of shape (B, n_heads, sum(seq_len), D)
output = flex_attention_hop(
query.values().unsqueeze(0),
key.values().unsqueeze(0),
value.values().unsqueeze(0),
score_mod=score_mod,
block_mask=block_mask,
scale=scale,
kernel_options=kernel_options,
score_mod_other_buffers=score_mod_other_buffers,
mask_mod_other_buffers=mask_mod_other_buffers,
)
# wrap outputs as NJT
output_njt = torch.nested.nested_tensor_from_jagged(
output[0].transpose(1, 2).squeeze(0),
query._offsets, # type: ignore[attr-defined]
query._lengths, # type: ignore[attr-defined]
min_seqlen=query._maybe_min_seqlen, # type: ignore[attr-defined]
max_seqlen=query._maybe_max_seqlen, # type: ignore[attr-defined]
).transpose(1, 2)
logsumexp_njt = torch.nested.nested_tensor_from_jagged(
output[1].transpose(1, 2).squeeze(0),
query._offsets, # type: ignore[attr-defined]
query._lengths, # type: ignore[attr-defined]
min_seqlen=query._maybe_min_seqlen, # type: ignore[attr-defined]
max_seqlen=query._maybe_max_seqlen, # type: ignore[attr-defined]
).transpose(1, 2)
return (output_njt, logsumexp_njt)
@flex_attention_backward_hop.py_impl(NestedTensor) # type: ignore[misc]
def flex_njt_backward(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
out: torch.Tensor,
logsumexp: torch.Tensor,
grad_out: torch.Tensor,
grad_logsumexp: torch.Tensor,
fw_graph: Union[Callable, GraphModule],
joint_graph: GraphModule,
block_mask: Tuple,
scale: float,
kernel_options: Dict[str, Any],
score_mod_other_buffers: Tuple = (),
mask_mod_other_buffers: Tuple = (),
) -> Tuple[
torch.Tensor, torch.Tensor, torch.Tensor, Tuple[Optional[torch.Tensor], ...]
]:
output = flex_attention_backward_hop(
query.values().unsqueeze(0),
key.values().unsqueeze(0),
value.values().unsqueeze(0),
out=out.values().unsqueeze(0),
logsumexp=logsumexp.values().unsqueeze(0),
grad_out=grad_out.values().unsqueeze(0),
grad_logsumexp=grad_logsumexp.values().unsqueeze(0),
fw_graph=fw_graph,
joint_graph=joint_graph,
block_mask=block_mask,
scale=scale,
kernel_options=kernel_options,
score_mod_other_buffers=score_mod_other_buffers,
mask_mod_other_buffers=mask_mod_other_buffers,
)
# wrap grads as NJTs
dense_q_grad, dense_k_grad, dense_v_grad, score_mod_other_buffer_grads = output
njt_q_grad = torch.nested.nested_tensor_from_jagged(
dense_q_grad.transpose(1, 2).squeeze(0),
query._offsets, # type: ignore[attr-defined]
query._lengths, # type: ignore[attr-defined]
min_seqlen=query._maybe_min_seqlen, # type: ignore[attr-defined]
max_seqlen=query._maybe_max_seqlen, # type: ignore[attr-defined]
).transpose(1, 2)
njt_k_grad = torch.nested.nested_tensor_from_jagged(
dense_k_grad.transpose(1, 2).squeeze(0),
key._offsets, # type: ignore[attr-defined]
key._lengths, # type: ignore[attr-defined]
min_seqlen=key._maybe_min_seqlen, # type: ignore[attr-defined]
max_seqlen=key._maybe_max_seqlen, # type: ignore[attr-defined]
).transpose(1, 2)
njt_v_grad = torch.nested.nested_tensor_from_jagged(
dense_v_grad.transpose(1, 2).squeeze(0),
value._offsets, # type: ignore[attr-defined]
value._lengths, # type: ignore[attr-defined]
min_seqlen=value._maybe_min_seqlen, # type: ignore[attr-defined]
max_seqlen=value._maybe_max_seqlen, # type: ignore[attr-defined]
).transpose(1, 2)
return (njt_q_grad, njt_k_grad, njt_v_grad, score_mod_other_buffer_grads)
# Make the dummy available on the C++ side.
@register_jagged_func(torch.ops.aten._nested_get_jagged_dummy.default, "self: any")
def _nested_get_jagged_dummy(func, *args, **kwargs):
from torch.nested._internal.nested_tensor import _nt_view_dummy
return _nt_view_dummy()
with torch.library._scoped_library("aten", "IMPL") as aten:
aten.impl("_nested_get_jagged_dummy", _nested_get_jagged_dummy, "CPU")
aten.impl("_nested_get_jagged_dummy", _nested_get_jagged_dummy, "CUDA")
aten.impl("_nested_get_jagged_dummy", _nested_get_jagged_dummy, "Meta")
```
|
======================================================================================================================
SOURCE CODE FILE: sdpa.py
LINES: 1
SIZE: 34.59 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nested\_internal\sdpa.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import logging
from typing import Optional
import torch
import torch.nn
import torch.nn.functional as F
from torch.backends.cuda import (
can_use_cudnn_attention,
can_use_efficient_attention,
can_use_flash_attention,
cudnn_sdp_enabled,
flash_sdp_enabled,
math_sdp_enabled,
mem_efficient_sdp_enabled,
SDPAParams,
)
from torch.nn.attention import SDPBackend
from .nested_tensor import NestedTensor
log = logging.getLogger(__name__)
def _validate_sdpa_input(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attn_mask: Optional[torch.Tensor] = None,
dropout_p=0.0,
is_causal=False,
scale=None,
):
if (
not isinstance(query, NestedTensor)
or not isinstance(key, NestedTensor)
or not isinstance(value, NestedTensor)
):
raise ValueError(
f"Expected query, key, and value to be nested tensors, "
f"but got query.is_nested: {query.is_nested}, key.is_nested: {key.is_nested}, "
f"and value.is_nested: {value.is_nested} instead."
)
if query.dtype != key.dtype or query.dtype != value.dtype:
raise ValueError(
f"Expected query, key, and value to have the same dtype, "
f"but got query.dtype: {query.dtype}, key.dtype: {key.dtype}, "
f"and value.dtype: {value.dtype} instead."
)
if query.device != key.device or query.device != value.device:
raise ValueError(
f"Expected query, key, and value to have the same device type, "
f"but got query.device: {query.device}, key.device: {key.device}, "
f"and value.device: {value.device} instead."
)
if query.dim() < 3 or key.dim() < 3 or value.dim() < 3:
raise ValueError(
f"Expected query, key, and value to all be at least 3 dimensional, but got query.dim: "
f"{query.dim()}, key.dim: {key.dim()} and value.dim: {value.dim()} instead."
)
if query._ragged_idx != key._ragged_idx or query._ragged_idx != value._ragged_idx:
raise ValueError(
f"Expected query, key, and value to all be ragged on the same dimension, but got ragged "
f"dims {query._ragged_idx}, {key._ragged_idx}, and {value._ragged_idx}, respectively."
)
if attn_mask is not None:
# TODO: Figure out whether masks are actually supported for this layout or not
raise ValueError("Masks are not yet supported!")
if attn_mask.dtype != torch.bool and attn_mask.dtype != query.dtype:
raise ValueError(
f"Expected attn_mask dtype to be bool or to match query dtype, but got attn_mask.dtype: "
f"{attn_mask.dtype}, and query.dtype: {query.dtype} instead."
)
def _check_batch_size_nested(params: SDPAParams, debug=False) -> bool:
# This is expected to be called after check_tensor_shapes ensuring that the
# size() calls won't error since the inputs are all 4 dimensional
q_batch_size = params.query.size(0)
k_batch_size = params.key.size(0)
v_batch_size = params.value.size(0)
# num_heads logic for nested input is checked in
# check_for_seq_len_0_nested_tensor as there is handling there to make sure
# num_heads is not ragged
return q_batch_size == k_batch_size and q_batch_size == v_batch_size
def _check_head_dim_size_flash_nested(params: SDPAParams, debug=False) -> bool:
max_size = 256
query_size_last = params.query.size(-1)
key_size_last = params.key.size(-1)
value_size_last = params.value.size(-1)
same_head_dim_size = (
query_size_last == key_size_last and query_size_last == value_size_last
)
if not (
same_head_dim_size
and (query_size_last % 8 == 0)
and (query_size_last <= max_size)
):
if debug:
log.warning(
"For NestedTensor inputs, Flash attention requires q,k,v to have the same "
"last dimension and to be a multiple of 8 and less than or equal to 256. "
"Got Query.size(-1): %d, Key.size(-1): %d, Value.size(-1): %d instead.",
query_size_last,
key_size_last,
value_size_last,
)
return False
return True
def _check_head_dim_size_cudnn_nested(params: SDPAParams, debug=False) -> bool:
max_size = 128
query_size_last = params.query.size(-1)
key_size_last = params.key.size(-1)
value_size_last = params.value.size(-1)
same_head_dim_size = (
query_size_last == key_size_last and query_size_last == value_size_last
)
if not (
same_head_dim_size
and (query_size_last % 8 == 0)
and (query_size_last <= max_size)
):
if debug:
log.warning(
"For NestedTensor inputs, cuDNN attention requires q,k,v to have the same "
"last dimension and to be a multiple of 8 and less than or equal to 128. "
"Got Query.size(-1): %d, Key.size(-1): %d, Value.size(-1): %d instead.",
query_size_last,
key_size_last,
value_size_last,
)
return False
return True
def _check_for_seq_len_0_and_consistent_head_dim_nested_helper(
param: torch.Tensor, param_name: str, debug=False
) -> bool:
assert isinstance(param, NestedTensor), "param should be a jagged NT"
if param._ragged_idx == 1:
# num_head_dims is ragged
if debug:
log.warning(
"Fused kernels do not support ragged num_head_dims, %s has a ragged num_heads.",
param_name,
)
return False
# This is being called inside sdp with shape [batch, heads, {seq_len}, dim]
if param._get_min_seqlen() == 0:
if debug:
log.warning(
"Fused kernels do not support seq_len == 0, %s has a seq len of 0.",
param_name,
)
return False
return True
def _try_broadcast_param_size(q_size, k_size, v_size, param_name, debug=False) -> bool:
max_size = max(q_size, k_size, v_size)
if (
(q_size != max_size and q_size != 1)
or (k_size != max_size and k_size != 1)
or (v_size != max_size and v_size != 1)
):
if debug:
log.warning(
"Both fused kernels require query, key and value to have broadcastable %s, "
"got Query %s %d, Key %s %d, Value %s %d instead.",
param_name,
param_name,
q_size,
param_name,
k_size,
param_name,
v_size,
)
return False
return True
def _check_for_seq_len_0_nested(params: SDPAParams, debug=False) -> bool:
# When this function is called we are assured that the nt is dim==4
q_is_safe = (
_check_for_seq_len_0_and_consistent_head_dim_nested_helper(
params.query, "query", debug
)
if params.query.is_nested
else True
)
# short circuit if any is unsafe
if not q_is_safe:
return False
k_is_safe = (
_check_for_seq_len_0_and_consistent_head_dim_nested_helper(
params.key, "key", debug
)
if params.key.is_nested
else True
)
# short circuit if any is unsafe
if not k_is_safe:
return False
v_is_safe = (
_check_for_seq_len_0_and_consistent_head_dim_nested_helper(
params.value, "value", debug
)
if params.value.is_nested
else True
)
# short circuit if any is unsafe
if not v_is_safe:
return False
# We now know none of the inputs have ragged num_heads, so we can safely
# access .size(1)
q_num_heads = params.query.size(1)
k_num_heads = params.key.size(1)
v_num_heads = params.value.size(1)
same_num_heads = q_num_heads == k_num_heads and q_num_heads == v_num_heads
if not same_num_heads:
if (
params.query.requires_grad
or params.key.requires_grad
or params.value.requires_grad
):
if debug:
log.warning(
"Both fused kernels do not support training with broadcasted NT inputs."
)
return False
return _try_broadcast_param_size(
q_num_heads, k_num_heads, v_num_heads, "num heads", debug
)
return True
def _can_use_flash_sdpa_jagged(params: SDPAParams, debug=False) -> bool:
constraints = (
_check_batch_size_nested,
_check_head_dim_size_flash_nested,
_check_for_seq_len_0_nested,
)
for constraint in constraints:
if not constraint(params, debug):
return False
return True
def _can_use_efficient_sdpa_jagged(params: SDPAParams, debug=False) -> bool:
constraints = (
_check_batch_size_nested,
_check_for_seq_len_0_nested,
)
for constraint in constraints:
if not constraint(params, debug):
return False
return True
def _can_use_math_sdpa_jagged(params: SDPAParams, debug=False) -> bool:
if (
not params.query.transpose(1, 2).is_contiguous()
or not params.key.transpose(1, 2).is_contiguous()
or not params.value.transpose(1, 2).is_contiguous()
):
if debug:
log.warning(
"If inputs are nested tensors they must be contiguous after transposing."
)
return False
if params.is_causal:
if debug:
log.warning(
"Nested tensors for query / key are not supported when is_causal=True."
)
return False
return True
def _select_sdp_backend(query, key, value, attn_mask, dropout, is_causal, enable_gqa):
if (
not flash_sdp_enabled()
and not mem_efficient_sdp_enabled()
and not math_sdp_enabled()
and not cudnn_sdp_enabled()
):
return SDPBackend.ERROR
ordering = (
SDPBackend.FLASH_ATTENTION,
SDPBackend.EFFICIENT_ATTENTION,
SDPBackend.MATH,
SDPBackend.CUDNN_ATTENTION,
)
params = SDPAParams(query, key, value, attn_mask, dropout, is_causal, enable_gqa)
for backend in ordering:
if backend == SDPBackend.CUDNN_ATTENTION:
if can_use_cudnn_attention(params):
return SDPBackend.CUDNN_ATTENTION
if backend == SDPBackend.FLASH_ATTENTION:
if can_use_flash_attention(params) and _can_use_flash_sdpa_jagged(params):
return SDPBackend.FLASH_ATTENTION
if backend == SDPBackend.EFFICIENT_ATTENTION:
if can_use_efficient_attention(params) and _can_use_efficient_sdpa_jagged(
params
):
return SDPBackend.EFFICIENT_ATTENTION
if backend == SDPBackend.MATH:
if math_sdp_enabled() and _can_use_math_sdpa_jagged(params):
return SDPBackend.MATH
log.warning("Memory efficient kernel not used because:")
can_use_efficient_attention(params, debug=True)
_can_use_efficient_sdpa_jagged(params, debug=True)
log.warning("Flash attention kernel not used because:")
can_use_flash_attention(params, debug=True)
_can_use_flash_sdpa_jagged(params, debug=True)
log.warning("Math attention kernel not used because:")
_can_use_math_sdpa_jagged(params, debug=True)
log.warning("cuDNN attention kernel not used because:")
can_use_cudnn_attention(params, debug=True)
return SDPBackend.ERROR
def _cumulative_and_max_seq_len_nnz(qkv: torch.Tensor) -> tuple[torch.Tensor, int, int]:
# This function is used to calculate two pieces of metadata that are needed
# for use with flash-attention and efficient_attention kernels. They are the
# cumulative sequence_length over a batch of sequences and the maximum
# sequence length.
# It returns a tuple of cumulative sequence lengths and the maximum sequence
# length, and the last element in the cumulative_sequence_lengths
if not isinstance(qkv, NestedTensor):
raise ValueError("QKV must be nested for flash cumulative_seq_len calculation.")
if qkv.lengths() is None:
# TODO: Explore performance impact of copying
cumulative_seqlen = qkv.offsets().to(dtype=torch.int32, device=qkv.device)
max_seqlen = qkv._get_max_seqlen()
n_elem = qkv.values().shape[0]
else:
# TODO: Explore performance impact of copying
cumulative_seqlen = (
qkv.lengths().cumsum(0).to(dtype=torch.int32, device=qkv.device)
)
max_seqlen = qkv._get_max_seqlen()
# TODO: Explore performance impact when compiling
n_elem = int(cumulative_seqlen[-1].item())
return cumulative_seqlen, max_seqlen, n_elem
def _is_safe_to_get_storage_as_tensor(tensor: torch.Tensor):
# This function checks if a nested tensor is valid for
# use with the flash-attention and efficient_attention kernels without
# needing to call contiguous on the nested tensor input.
# It checks that the storage offsets' adjacent_differences are a constant
# mutiple of the previous tensor in the nested tensor and that the strides
# are monitonically decreasing. This check is done after calling transpose on
# the nested tensor resulting in a Nt of shape [bsz, {seq_len}, num_heads, dim]
# Returns a boolean indicating if contiguous needs to be called for input
assert isinstance(tensor, NestedTensor)
offsets = tensor.offsets()
strides = tensor._strides
n_tensors = offsets.size(0) - 1
if n_tensors <= 1:
return True
# Check initially that the tensor strides are in strictly descending order
prev_stride = strides[1]
for stride in strides[2:]:
if prev_stride <= stride:
# This would mean that the last stride is greater than the seq_len
# stride
return False
prev_stride = stride
# Congrats you made it!
return True
def _view_as_dense(
tensor: torch.Tensor, Nnz: int, num_heads: int, head_dim: int
) -> torch.Tensor:
if tensor.is_nested:
return tensor.values()
return tensor.view(Nnz, num_heads, head_dim)
# TODO: Next iteration should add test cases and check it works
# def _sdpa_nested_preprocessing_with_broadcast(query, key, value):
# # Query (Batch x Num_heads x {Q_seq_len} x Dim_per_head)
# # Key (Batch x Num_heads x {KV_seq_len} x Dim_per_head)
# # Value (Batch x Num_heads x {KV_seq_len} x Dim_per_head)
# q_batch_size = query.size(0)
# k_batch_size = key.size(0)
# v_batch_size = value.size(0)
# output_batch_size = max(q_batch_size, k_batch_size, v_batch_size)
# q_num_heads = query.size(1)
# k_num_heads = key.size(1)
# v_num_heads = value.size(1)
# output_num_heads = max(q_num_heads, k_num_heads, v_num_heads)
# head_dim_qk = query.size(3)
# head_dim_v = value.size(3)
# q_t = query.transpose(1, 2)
# k_t = key.transpose(1, 2)
# v_t = value.transpose(1, 2)
# # Checks in sdp_utils ensure that if {*}_batch_size/{*}_num_heads !=
# # output_batch_size/num_heads then they are 1
# q_batch_size_needs_broadcast = q_batch_size != output_batch_size
# k_batch_size_needs_broadcast = k_batch_size != output_batch_size
# v_batch_size_needs_broadcast = v_batch_size != output_batch_size
# # If {*}_batch_size_needs_broadcast, then
# # (1) max_seqlen_batch_{*} is given by {*}_t.size(1)
# # this is because needs_broadcast indicates that the batch_size is 1
# # and hence there is only 1 value for seq_len
# # (2) The cum_seq_lens are given by [0, {*}_t.size(1), 2 * {*}_t.size(1),
# # ..., outut_batch_size * {*}_t.size(1)]
# # (3) Nnz_{*} is given by output_batch_size * {*}_t.size(1)
# if q_batch_size_needs_broadcast or not q_t.is_nested:
# max_seqlen_batch_q = q_t.size(1)
# cumulative_sequence_length_q = torch.arange(
# 0,
# (output_batch_size + 1) * max_seqlen_batch_q,
# max_seqlen_batch_q,
# device=q_t.device,
# dtype=torch.int32,
# )
# Nnz_q = output_batch_size * max_seqlen_batch_q
# else:
# (
# cumulative_sequence_length_q,
# max_seqlen_batch_q,
# Nnz_q,
# ) = _cumulative_and_max_seq_len_nnz(q_t)
# if k_batch_size_needs_broadcast and v_batch_size_needs_broadcast:
# assert k_t.size(1) == v_t.size(1)
# max_seqlen_batch_kv = k_t.size(1)
# cumulative_sequence_length_kv = torch.arange(
# 0,
# (output_batch_size + 1) * max_seqlen_batch_kv,
# max_seqlen_batch_kv,
# device=k_t.device,
# dtype=torch.int32,
# )
# Nnz_kv = output_batch_size * max_seqlen_batch_kv
# else:
# cumulative_sequence_length_kv, max_seqlen_batch_kv, Nnz_kv = (
# _cumulative_and_max_seq_len_nnz(v_t)
# if k_batch_size_needs_broadcast
# else _cumulative_and_max_seq_len_nnz(k_t)
# )
# q_num_heads_needs_broadcast = q_num_heads != output_num_heads
# k_num_heads_needs_broadcast = k_num_heads != output_num_heads
# v_num_heads_needs_broadcast = v_num_heads != output_num_heads
# if not q_t.is_nested:
# query_buffer_reshaped = q_t.expand(
# output_batch_size, q_t.size(1), output_num_heads, head_dim_qk
# )
# query_buffer_reshaped = query_buffer_reshaped.reshape(
# Nnz_q, output_num_heads, head_dim_qk
# )
# else:
# if not q_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(q_t):
# q_t = q_t.contiguous()
# # If we are broadcasting then Nnz_q will be the output_batch_size since
# # seq_len is 1
# effective_batch_size_q = (
# output_batch_size if q_batch_size_needs_broadcast else Nnz_q
# )
# query_buffer_reshaped = _view_as_dense(
# q_t, effective_batch_size_q, output_num_heads, head_dim_qk
# )
# # If the physical layout of the NestedTensor's storage
# # is not: batch, {seq_len}, num_heads, head_dim then we need
# # to call contiguous
# if not k_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(k_t):
# k_t = k_t.contiguous()
# if not v_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(v_t):
# v_t = v_t.contiguous()
# effective_batch_size_k = (
# output_batch_size if k_batch_size_needs_broadcast else Nnz_kv
# )
# key_buffer_reshaped = _view_as_dense(
# k_t, effective_batch_size_k, output_num_heads, head_dim_qk
# )
# effective_batch_size_v = (
# output_batch_size if v_batch_size_needs_broadcast else Nnz_kv
# )
# value_buffer_reshaped = _view_as_dense(
# v_t, effective_batch_size_v, output_num_heads, head_dim_v
# )
# if not q_batch_size_needs_broadcast:
# output_shape = q_t._size
# if head_dim_v != head_dim_qk:
# output_shape[-1] = head_dim_v
# if q_num_heads_needs_broadcast:
# output_shape[1] = output_num_heads
# else:
# output_shape = torch.empty(3, dtype=torch.int64, device=torch.device("cpu"))
# output_shape[0] = q_t.size(1)
# output_shape[1] = output_num_heads
# output_shape[2] = head_dim_v
# return (
# query_buffer_reshaped,
# key_buffer_reshaped,
# value_buffer_reshaped,
# cumulative_sequence_length_q,
# cumulative_sequence_length_kv,
# max_seqlen_batch_q,
# max_seqlen_batch_kv,
# output_shape,
# )
def _sdpa_nested_preprocessing(query, key, value):
# Query (Batch x Num_heads x {Q_seq_len} x Dim_per_head)
# Key (Batch x Num_heads x {KV_seq_len} x Dim_per_head)
# Value (Batch x Num_heads x {KV_seq_len} x Dim_per_head)
q_batch_size = query.size(0)
k_batch_size = key.size(0)
v_batch_size = value.size(0)
q_num_heads = query.size(1)
k_num_heads = key.size(1)
v_num_heads = value.size(1)
if not (q_batch_size == k_batch_size and q_batch_size == v_batch_size) or not (
q_num_heads == k_num_heads and k_num_heads == v_num_heads
):
raise RuntimeError(
"This path is currently not implemented for jagged layout NT."
)
# return _sdpa_nested_preprocessing_with_broadcast(query, key, value)
num_heads = query.size(1)
head_dim_qk = query.size(3)
head_dim_v = value.size(3)
q_t = query.transpose(1, 2)
k_t = key.transpose(1, 2)
v_t = value.transpose(1, 2)
(
cumulative_sequence_length_q,
max_seqlen_batch_q,
Nnz_q,
) = _cumulative_and_max_seq_len_nnz(q_t)
(
cumulative_sequence_length_kv,
max_seqlen_batch_kv,
Nnz_kv,
) = _cumulative_and_max_seq_len_nnz(k_t)
# [TODO] K and V have to have the same Nnz, should probably torch_check
# assume in order to not iterate over v
# If the physical layout of the NestedTensor's storage
# is not: batch, {seq_len}, num_heads, head_dim then we need
# to call contiguous
if not q_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(q_t):
q_t = q_t.contiguous()
if not k_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(k_t):
k_t = k_t.contiguous()
if not v_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(v_t):
v_t = v_t.contiguous()
query_buffer_reshaped = _view_as_dense(q_t, Nnz_q, num_heads, head_dim_qk)
key_buffer_reshaped = _view_as_dense(k_t, Nnz_kv, num_heads, head_dim_qk)
value_buffer_reshaped = _view_as_dense(v_t, Nnz_kv, num_heads, head_dim_v)
output_nt_info = {
"offsets": q_t.offsets(),
"lengths": q_t.lengths(),
"max_seqlen": q_t._get_max_seqlen(),
"min_seqlen": q_t._get_min_seqlen(),
}
return (
query_buffer_reshaped,
key_buffer_reshaped,
value_buffer_reshaped,
cumulative_sequence_length_q,
cumulative_sequence_length_kv,
max_seqlen_batch_q,
max_seqlen_batch_kv,
output_nt_info,
)
def _pad_last_dim(
tensor: torch.Tensor, alignment_size: int, slice: bool
) -> torch.Tensor:
# FlashAttentionV2 requires that head dimension be a multiple of 8
# This was previously done within the kernel, however
# This causes the kernel to maybe alias query, key, value
# So instead we pad the head_dimensions to be a multiple of 8
# in the composite region
last_dim_size = tensor.size(-1)
if last_dim_size % alignment_size == 0:
return tensor
pad_count = alignment_size - (last_dim_size % alignment_size)
tensor = torch.nn.functional.pad(tensor, [0, pad_count])
if slice:
return tensor[..., 0:last_dim_size]
return tensor
# TODO: coalesce with torch/nn/utils/attention.py
def _calculate_scale(query, scale):
# TODO: Investigate why math.sqrt() isn't properly handled by Dynamo?
softmax_scale = scale if scale is not None else torch.sym_sqrt(1.0 / query.size(-1))
return softmax_scale
def _post_process_flash_output(out: torch.Tensor, og_size):
if not out.is_nested and out.size(-1) != og_size:
out = out[..., 0:og_size]
return out
def _is_computing_meta_flops(x):
# Note: there's a use case of using meta tensors & the dispatch-based flop counter.
# We can use this function to check for this scenario in order to handle it specially.
if not torch.jit.is_scripting() and x.device.type == "meta":
torch_dispatch_mode_stack = (
torch.utils._python_dispatch._get_current_dispatch_mode_stack()
)
return any(
type(x) == torch.utils.flop_counter._FlopCounterMode
for x in torch_dispatch_mode_stack
)
return False
def _autocast(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attn_mask: Optional[torch.Tensor],
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""
[Autocasting SDPA for NJT]
Normal autocasting doesn't work for NJT+SDPA right now:
* NJT intercepts the __torch_function__ call for scaled_dot_product_attention, which happens
before we get to any aten ops or dispatcher logic; then the torch_function logic calls into
efficient attention or flash attention. So, autocasting on the scaled_dot_product_attention
op won't work because we never see that aten op.
* If we put autocasting on `_flash_attention_forward`, then we'll get autocasting to run, but
the kernel selection logic in torch_function handling (ie. jagged_scaled_dot_product_attention)
won't work correctly: the kernel selection logic will run before autocasting, and choose
a kernel based on the un-autocasted dtypes; but then autocasting will run and the actual
attention computation will happen in a different dtype.
An alternative is to just change the backend selection logic for SDPA+NJT to be autocast-aware
and rely on autocasting to do the actual conversions for flash attention / efficient attention.
However, by manually doing the actual autocast before the backend selection, we ensure that the
autocast handling for backend selection doesn't diverge from the autocast handling for the
actual dtype conversions.
"""
device_type = query.device.type
# meta device is not supported by autocast, so break early for it
if _is_computing_meta_flops(query) or not torch.is_autocast_enabled(device_type):
return query, key, value, attn_mask
def cvt(x):
if x is None:
return x
target_dtype = torch.get_autocast_dtype(device_type)
if (
(not x.dtype.is_floating_point)
or x.dtype == target_dtype
or x.dtype == torch.float64
):
return x
return x.to(target_dtype)
return cvt(query), cvt(key), cvt(value), cvt(attn_mask)
def jagged_scaled_dot_product_attention(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attn_mask: Optional[torch.Tensor] = None,
dropout_p=0.0,
is_causal=False,
scale=None,
enable_gqa=False,
):
query, key, value, attn_mask = _autocast(query, key, value, attn_mask)
_validate_sdpa_input(query, key, value, attn_mask, dropout_p, is_causal, scale)
# for mypy, ugh
assert (
isinstance(query, NestedTensor)
and isinstance(key, NestedTensor)
and isinstance(value, NestedTensor)
)
from torch.nested._internal.nested_tensor import (
nested_view_from_values_offsets_lengths,
)
# Special path for non-ragged sequence length (e.g. for SAM where we have a ragged
# second batch dim instead). For this case, we can just send the dense buffers through
# vanilla SDPA.
if query.dim() > 3 and key.dim() > 3 and value.dim() > 3 and query._ragged_idx == 1:
output = F.scaled_dot_product_attention(
query.values(),
key.values(),
value.values(),
attn_mask=(
attn_mask.values() if isinstance(attn_mask, NestedTensor) else attn_mask
),
dropout_p=dropout_p,
is_causal=is_causal,
scale=scale,
)
return nested_view_from_values_offsets_lengths(
output,
query.offsets(),
query.lengths(),
min_seqlen=query._maybe_min_seqlen, # type: ignore[attr-defined]
max_seqlen=query._maybe_max_seqlen, # type: ignore[attr-defined]
)
compute_logsumexp = query.requires_grad or key.requires_grad or value.requires_grad
backend_choice = _select_sdp_backend(
query, key, value, attn_mask, dropout_p, is_causal, enable_gqa
)
if _is_computing_meta_flops(query):
# Backend choice will probably not be correct if we have a meta device,
# because backend choice is device-aware. In this case, we mostly just
# want to avoid using math backend (which does a .item() call).
# Arbitrarily choose flash attention.
backend_choice = SDPBackend.FLASH_ATTENTION
if backend_choice == SDPBackend.FLASH_ATTENTION:
og_size = query.size(-1)
query_padded = _pad_last_dim(query, 8, False)
key_padded = _pad_last_dim(key, 8, False)
value_padded = _pad_last_dim(value, 8, False)
# We need to calculate the scale based off the OG head dim size
og_scale = _calculate_scale(query, scale)
(
query_buffer_reshaped,
key_buffer_reshaped,
value_buffer_reshaped,
cumulative_sequence_length_q,
cumulative_sequence_length_kv,
max_seqlen_batch_q,
max_seqlen_batch_kv,
output_nt_info,
) = _sdpa_nested_preprocessing(query_padded, key_padded, value_padded)
(
attention,
_logsumexp,
_philox_seed,
_philox_offset,
_debug_attn_mask,
) = torch.ops.aten._flash_attention_forward(
query_buffer_reshaped,
key_buffer_reshaped,
value_buffer_reshaped,
cumulative_sequence_length_q,
cumulative_sequence_length_kv,
max_seqlen_batch_q,
max_seqlen_batch_kv,
dropout_p,
is_causal,
False,
scale=og_scale,
)
# Reshape output to convert nnz to batch_size and seq_len
attention = nested_view_from_values_offsets_lengths(
attention, # output from flash_attn is [total_q, num_heads, head_size_og]
**output_nt_info,
).transpose(1, 2)
return _post_process_flash_output(attention, og_size)
elif backend_choice == SDPBackend.EFFICIENT_ATTENTION:
(
query_reshaped,
key_reshaped,
value_reshaped,
cumulative_sequence_length_q,
cumulative_sequence_length_kv,
max_seqlen_batch_q,
max_seqlen_batch_kv,
output_nt_info,
) = _sdpa_nested_preprocessing(query, key, value)
(
attention,
log_sumexp,
seed,
offset,
max_seqlen_q,
max_seqlen_batch_kv,
) = torch.ops.aten._efficient_attention_forward(
query_reshaped.unsqueeze(0),
key_reshaped.unsqueeze(0),
value_reshaped.unsqueeze(0),
None,
cumulative_sequence_length_q,
cumulative_sequence_length_kv,
max_seqlen_batch_q,
max_seqlen_batch_kv,
dropout_p,
int(is_causal),
compute_logsumexp,
scale=scale,
)
# Reshape output to convert nnz to batch_size and seq_len
return nested_view_from_values_offsets_lengths(
attention.squeeze(0),
**output_nt_info,
).transpose(1, 2)
elif backend_choice == SDPBackend.CUDNN_ATTENTION:
(
query_reshaped,
key_reshaped,
value_reshaped,
cumulative_sequence_length_q,
cumulative_sequence_length_kv,
max_seqlen_batch_q,
max_seqlen_batch_kv,
output_nt_info,
) = _sdpa_nested_preprocessing(query, key, value)
(
attention,
logsumexp,
cum_seqlen_q,
cum_seqlen_kv,
max_seqlen_q,
max_seqlen_kv,
seed,
offset,
_,
) = torch.ops.aten._cudnn_attention_forward(
query_reshaped,
key_reshaped,
value_reshaped,
attn_mask,
cumulative_sequence_length_q,
cumulative_sequence_length_kv,
max_seqlen_batch_q,
max_seqlen_batch_kv,
compute_logsumexp,
dropout_p,
is_causal,
False,
scale=scale,
)
return nested_view_from_values_offsets_lengths(
attention,
**output_nt_info,
).transpose(1, 2)
elif backend_choice == SDPBackend.MATH:
# save the offsets and shape of the inputs, so we can reshape the final output
# query @ key = attn: [B, D1, j0, D'] @ [B, D1, D' j1] = [B, D1, j0, j1]
# attn @ value = out: [B, D1, j0, j1] @ [B, D1, j1, D2] = [B, D1, j0, D2]
offsets = query.offsets()
q_lengths = query.lengths()
min_seqlen = query._maybe_min_seqlen
max_seqlen = query._maybe_max_seqlen
d1 = query._size[1]
d2 = value._size[-1]
# convert jagged layout Nested Tensor to strided layout Nested Tensor
# which support the math implementation of SDPA
def get_strided_layout_nested_tensor(jagged_layout_nt):
lengths = jagged_layout_nt._offsets[1:] - jagged_layout_nt._offsets[:-1]
transpose = torch.transpose(jagged_layout_nt, 1, 2)
tensor_list = transpose.values().split(list(lengths), dim=0)
strided_nt = torch.nested.as_nested_tensor(list(tensor_list))
strided_nt = strided_nt.transpose(1, 2).contiguous()
return strided_nt
query = get_strided_layout_nested_tensor(query)
key = get_strided_layout_nested_tensor(key)
value = get_strided_layout_nested_tensor(value)
attn_out = torch._scaled_dot_product_attention_math(
query, key, value, attn_mask, dropout_p, is_causal, scale=scale
)[0]
# convert strided layout Nested Tensor back to jagged layout Nested Tensor
attn_out = attn_out.transpose(1, 2).contiguous().values()
attn_out = attn_out.view(-1, d1, d2)
attn_out = nested_view_from_values_offsets_lengths(
attn_out,
offsets,
lengths=q_lengths,
min_seqlen=min_seqlen,
max_seqlen=max_seqlen,
).transpose(1, 2)
return attn_out
else:
raise RuntimeError(
"No viable backend for scaled_dot_product_attention was found."
)
```
|
============================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 2.43 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\__init__.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from torch.nn.parameter import ( # usort: skip
Buffer as Buffer,
Parameter as Parameter,
UninitializedBuffer as UninitializedBuffer,
UninitializedParameter as UninitializedParameter,
)
from torch.nn.modules import * # usort: skip # noqa: F403
from torch.nn import (
attention as attention,
functional as functional,
init as init,
modules as modules,
parallel as parallel,
parameter as parameter,
utils as utils,
)
from torch.nn.parallel import DataParallel as DataParallel
def factory_kwargs(kwargs):
r"""Return a canonicalized dict of factory kwargs.
Given kwargs, returns a canonicalized dict of factory kwargs that can be directly passed
to factory functions like torch.empty, or errors if unrecognized kwargs are present.
This function makes it simple to write code like this::
class MyModule(nn.Module):
def __init__(self, **kwargs):
factory_kwargs = torch.nn.factory_kwargs(kwargs)
self.weight = Parameter(torch.empty(10, **factory_kwargs))
Why should you use this function instead of just passing `kwargs` along directly?
1. This function does error validation, so if there are unexpected kwargs we will
immediately report an error, instead of deferring it to the factory call
2. This function supports a special `factory_kwargs` argument, which can be used to
explicitly specify a kwarg to be used for factory functions, in the event one of the
factory kwargs conflicts with an already existing argument in the signature (e.g.
in the signature ``def f(dtype, **kwargs)``, you can specify ``dtype`` for factory
functions, as distinct from the dtype argument, by saying
``f(dtype1, factory_kwargs={"dtype": dtype2})``)
"""
if kwargs is None:
return {}
simple_keys = {"device", "dtype", "memory_format"}
expected_keys = simple_keys | {"factory_kwargs"}
if not kwargs.keys() <= expected_keys:
raise TypeError(f"unexpected kwargs {kwargs.keys() - expected_keys}")
# guarantee no input kwargs is untouched
r = dict(kwargs.get("factory_kwargs", {}))
for k in simple_keys:
if k in kwargs:
if k in r:
raise TypeError(
f"{k} specified twice, in **kwargs and in factory_kwargs"
)
r[k] = kwargs[k]
return r
```
|
==============================================================================================================
SOURCE CODE FILE: _reduction.py
LINES: 1
SIZE: 1.65 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\_reduction.py
ENCODING: utf-8
```py
import warnings
from typing import Optional
# NB: Keep this file in sync with enums in aten/src/ATen/core/Reduction.h
def get_enum(reduction: str) -> int:
if reduction == "none":
ret = 0
elif reduction == "mean":
ret = 1
elif reduction == "elementwise_mean":
warnings.warn(
"reduction='elementwise_mean' is deprecated. "
"Please use reduction='mean' instead."
)
ret = 1
elif reduction == "sum":
ret = 2
else:
ret = -1 # TODO: remove once JIT exceptions support control flow
raise ValueError(f"{reduction} is not a valid value for reduction")
return ret
# In order to support previous versions, accept boolean size_average and reduce
# and convert them into the new constants for now
# We use these functions in torch/legacy as well, in which case we'll silence the warning
def legacy_get_string(
size_average: Optional[bool],
reduce: Optional[bool],
emit_warning: bool = True,
) -> str:
warning = "size_average and reduce args will be deprecated, please use reduction='{}' instead."
if size_average is None:
size_average = True
if reduce is None:
reduce = True
if size_average and reduce:
ret = "mean"
elif reduce:
ret = "sum"
else:
ret = "none"
if emit_warning:
warnings.warn(warning.format(ret))
return ret
def legacy_get_enum(
size_average: Optional[bool],
reduce: Optional[bool],
emit_warning: bool = True,
) -> int:
return get_enum(legacy_get_string(size_average, reduce, emit_warning))
```
|
======================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 5.92 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\attention\__init__.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
""" This module contains functions and classes that alter the behavior of torch.nn.functional.scaled_dot_product_attention """
import contextlib
from collections.abc import Iterable
from typing import Union
from warnings import warn
import torch.backends.cuda
from torch._C import _SDPBackend as SDPBackend
from torch.backends.cuda import (
can_use_efficient_attention,
can_use_flash_attention,
SDPAParams,
)
__all__: list[str] = ["SDPBackend", "sdpa_kernel", "WARN_FOR_UNFUSED_KERNELS"]
# Note: [SDPA warnings]
# TODO: Consider using this for sdpa regardless of subclasses
# This only effects users of bias subclasses
# If this is set to True, we will warn the user if they are not using the fused kernels
# As well, it will raise warnings for all the reasons why the fused kernels can't be run.
# To set this to True, run
# torch.nn.attention.WARN_FOR_UNFUSED_KERNELS = True
WARN_FOR_UNFUSED_KERNELS = False
# Hacks for Sphinx documentation:
# https://stackoverflow.com/questions/38765577/overriding-sphinx-autodoc-alias-of-for-import-of-private-class
SDPBackend = SDPBackend
r"""An enum-like class that contains the different backends for scaled dot product attention.
This backend class is designed to be used with the sdpa_kernel context manager.
The following Enums are available:
- ERROR: An error occurred when trying to determine the backend.
- MATH: The math backend for scaled dot product attention.
- FLASH_ATTENTION: The flash attention backend for scaled dot product attention.
- EFFICIENT_ATTENTION: The efficient attention backend for scaled dot product attention.
- CUDNN_ATTENTION: The cuDNN backend for scaled dot product attention.
See :func:`torch.nn.attention.sdpa_kernel` for more details.
.. warning:: This class is in beta and subject to change.
"""
SDPBackend.__module__ = __name__
SDPBackend.__name__ = "SDPBackend"
def _raise_kernel_warnings(params: SDPAParams) -> None:
"""
If WARN_FOR_UNFUSED_KERNELS is set to True, this will raise warnings
for all the reasons why the fused kernels can't be run. If using subclasses
"""
if WARN_FOR_UNFUSED_KERNELS:
if not can_use_efficient_attention(params):
warn("Efficient attention can't be used because:")
can_use_efficient_attention(params, True)
if not can_use_flash_attention(params):
warn("Flash attention can't be used because:")
can_use_flash_attention(params, True)
_backend_names = {
"cudnn": "CUDNN_ATTENTION",
"flash": "FLASH_ATTENTION",
"mem_efficient": "EFFICIENT_ATTENTION",
"math": "MATH",
}
def _backend_from_string(name: str):
return getattr(SDPBackend, name)
def _cur_sdpa_kernel_backends():
backends: list[SDPBackend] = []
for name, val in _backend_names.items():
if getattr(torch.backends.cuda, f"{name}_sdp_enabled")():
backends.append(getattr(SDPBackend, val))
return backends
def _sdpa_kernel(backends: Iterable[SDPBackend]):
for name, val in _backend_names.items():
enabled = getattr(SDPBackend, val) in backends
getattr(torch.backends.cuda, f"enable_{name}_sdp")(enabled)
@contextlib.contextmanager
def sdpa_kernel(
backends: Union[list[SDPBackend], SDPBackend], set_priority: bool = False
):
r"""
Context manager to select which backend to use for scaled dot product attention.
.. warning:: This function is beta and subject to change.
Args:
backends (Union[List[SDPBackend], SDPBackend]): A backend or list of backends for scaled dot product attention.
set_priority_order (bool=False): Whether the ordering of the backends is interpreted as their priority order.
Example:
.. code-block:: python
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import SDPBackend, sdpa_kernel
# Only enable flash attention backend
with sdpa_kernel(SDPBackend.FLASH_ATTENTION):
scaled_dot_product_attention(...)
# Enable the Math or Efficient attention backends
with sdpa_kernel([SDPBackend.MATH, SDPBackend.EFFICIENT_ATTENTION]):
scaled_dot_product_attention(...)
This context manager can be used to select which backend to use for scaled dot product attention.
Upon exiting the context manager, the previous state of the flags will be restored, enabling all backends.
"""
assert isinstance(
backends, (list, SDPBackend)
), "Backend must be an instance of SDPBackend or a list of SDPBackend instances"
if isinstance(backends, SDPBackend):
backends = [backends]
backends_set = set(backends)
user_priority = None
previous_priority = None
if set_priority:
user_priority = [
int(x) for idx, x in enumerate(backends) if backends.index(x) == idx # type: ignore[call-overload]
]
previous_priority = torch._C._get_sdp_priority_order()
for backend in previous_priority:
if backend not in user_priority:
user_priority.append(int(backend))
previous_backends = _cur_sdpa_kernel_backends()
try:
if set_priority:
torch._C._set_sdp_priority_order(user_priority) # type: ignore[arg-type]
_sdpa_kernel(backends_set)
yield {}
finally:
_sdpa_kernel(previous_backends)
if set_priority:
torch._C._set_sdp_priority_order(previous_priority) # type: ignore[arg-type]
# variadic version of sdpa_kernel for dynamo to use while reconstructing
@contextlib.contextmanager
def _sdpa_kernel_variadic(*backends: SDPBackend):
with sdpa_kernel(list(backends)):
yield
def _get_flash_version() -> str:
"""This returns the closest matching tag for the flash attention backend"""
return "2.5.7"
```
|
====================================================================================================================
SOURCE CODE FILE: _utils.py
LINES: 1
SIZE: 2.29 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\attention\_utils.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""Defines utilities for interacting with scaled_dot_product_attention"""
import math
from typing import Optional, Union
import torch
__all__: list[str] = []
def _input_requires_grad(*tensors: torch.Tensor) -> bool:
"""Returns True if any of the tensors requires grad"""
return any(t.requires_grad for t in tensors)
def _postprocess_flash_output(inpt_tensor: torch.Tensor, og_size: int) -> torch.Tensor:
"""Handles the unpad of the last dimension"""
if inpt_tensor.size(-1) != og_size:
return inpt_tensor[..., :og_size]
return inpt_tensor
def _calculate_scale(head_dim_size: int, scale: Optional[float]) -> float:
"""
For FlashAttention we pad the head dimension to be a multiple of 8 so we need to scale the output
by the original head size and not the padded.
"""
if scale is not None:
return scale
return 1.0 / math.sqrt(head_dim_size)
_SUPPORTED_HEAD_DIMS = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
def _supported_head_dim(n: Union[int, torch.SymInt]) -> bool:
"""Returns true if the head dim is supported by FlexAttention"""
return n in _SUPPORTED_HEAD_DIMS
def _validate_sdpa_input(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attn_mask: Optional[torch.Tensor] = None,
dropout_p=0.0,
is_causal=False,
scale=None,
):
if query.dtype != key.dtype or query.dtype != value.dtype:
raise ValueError(
f"Expected query, key, and value to have the same dtype, "
f"but got query.dtype: {query.dtype}, key.dtype: {key.dtype}, "
f"and value.dtype: {value.dtype} instead."
)
if query.device != key.device or query.device != value.device:
raise ValueError(
f"Expected query, key, and value to have the same device type, "
f"but got query.device: {query.device}, key.device: {key.device}, "
f"and value.device: {value.device} instead."
)
if query.dim() < 2 or key.dim() < 2 or value.dim() < 2:
raise ValueError(
f"Expected query, key, and value to all be at least 2 dimensional, but got query.dim: "
f"{query.dim()}, key.dim: {key.dim()} and value.dim: {value.dim()} instead."
)
```
|
==================================================================================================================
SOURCE CODE FILE: bias.py
LINES: 1
SIZE: 13.44 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\attention\bias.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""Defines bias subclasses that work with scaled_dot_product_attention"""
from enum import auto, IntEnum
from typing import Optional
from warnings import warn
import torch
import torch.nn.functional as F
from torch.backends.cuda import (
can_use_efficient_attention,
can_use_flash_attention,
is_flash_attention_available,
SDPAParams,
)
from torch.nn.attention import _raise_kernel_warnings
from torch.nn.attention._utils import (
_calculate_scale,
_input_requires_grad,
_postprocess_flash_output,
_validate_sdpa_input,
)
__all__ = ["causal_upper_left", "causal_lower_right", "CausalVariant", "CausalBias"]
torch._dynamo.allow_in_graph(is_flash_attention_available)
torch._dynamo.allow_in_graph(can_use_flash_attention)
torch._dynamo.allow_in_graph(can_use_efficient_attention)
torch._dynamo.allow_in_graph(SDPAParams)
class CausalVariant(IntEnum):
r"""
Enum for causal variants used in attention mechanisms.
Defines two types of causal biases:
`UPPER_LEFT`: Represents upper-left triangular bias for standard causal attention.
The equivalent pytorch code for constructing this bias is:
.. code-block:: python
torch.tril(torch.ones(size, dtype=torch.bool))
For instance, with `shape=(3,4)`, the materialized bias tensor will be:
.. code-block:: text
[[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0]]
`LOWER_RIGHT`: Represents lower-right triangular bias, the include values are aligned to the lower
right corner of the matrix.
The equivalent pytorch code for constructing this bias is:
.. code-block:: python
diagonal_offset = size[1] - size[0]
torch.tril(
torch.ones(size, dtype=torch.bool),
diagonal=diagonal_offset,
)
For instance, with `shape=(3,4)`, the materialized bias tensor will be:
.. code-block:: text
[[1, 1, 0, 0],
[1, 1, 1, 0],
[1, 1, 1, 1]]
Note that these variants are equivalent to each other when the sequence lengths of the query and key/value
tensors are equal since the triangular matrix is square.
.. warning:: This enum is a prototype and subject to change.
"""
UPPER_LEFT = auto()
LOWER_RIGHT = auto()
class CausalBias(torch.Tensor):
"""
A bias representing causal attention patterns. For an overview of the bias structure, see the :class:`CausalVariant` enum.
This class is used for defining causal (triangular) attention biases. For construing the bias, there exist
two factory functions: :func:`causal_upper_left` and :func:`causal_lower_right`.
Example:
.. code-block:: python
from torch.nn.attention.bias import causal_lower_right
bsz, num_heads, seqlen_q, seqlen_kv, head_dim = 32, 8, 4, 12, 8
# Create a lower-right causal bias
attn_bias = causal_lower_right(seqlen_q, seqlen_kv)
q = torch.randn(bsz, num_heads, seqlen_q, head_dim, device="cuda", dtype=torch.float16)
k = torch.randn(bsz, num_heads, seqlen_kv, head_dim, device="cuda", dtype=torch.float16)
v = torch.randn(bsz, num_heads, seqlen_kv, head_dim, device="cuda", dtype=torch.float16)
out = F.scaled_dot_product_attention(q, k, v, attn_bias)
.. warning:: This class is a prototype and subject to change.
"""
def __init__(self, variant: CausalVariant, seq_len_q: int, seq_len_kv: int):
"""
Initializes the CausalBias instance with a specified variant and sequence lengths.
Args:
variant (CausalVariant): The type of causal bias to use (either UPPER_LEFT or LOWER_RIGHT).
seq_len_q (int): The sequence length of the query tensor.
seq_len_kv (int): The sequence length of the key/value tensor.
Raises a warning if the LOWER_RIGHT variant is used with seq_len_q > seq_len_kv, as it may produce NaNs.
"""
assert isinstance(variant, CausalVariant)
self.variant = variant
self.seq_len_q = seq_len_q
self.seq_len_kv = seq_len_kv
if seq_len_q > seq_len_kv and variant == CausalVariant.LOWER_RIGHT:
warn(
"Lower right causal bias will produce NaNs in the output when seq_len_q > seq_len_kv!"
)
def _upper_left(self, device: torch.device) -> torch.Tensor:
"""Upper left causal bias"""
return torch.tril(
torch.ones(self.seq_len_q, self.seq_len_kv, device=device, dtype=torch.bool)
)
def _lower_right(self, device: torch.device) -> torch.Tensor:
"""Lower right causal bias"""
diagonal_offset = self.seq_len_kv - self.seq_len_q
return torch.tril(
torch.ones(
self.seq_len_q, self.seq_len_kv, device=device, dtype=torch.bool
),
diagonal=diagonal_offset,
)
def _materialize(self, device: Optional[torch.device] = None) -> torch.Tensor:
"""
Materializes the causal bias into a tensor form.
Depending on the variant, this method generates either an upper-left or lower-right
triangular matrix to represent the causal bias.
Args:
device (Optional[torch.device]): The device on which to create the tensor. Defaults to CPU.
Returns:
torch.Tensor: The materialized bias tensor.
"""
if device is None:
device = torch.device("cpu")
if self.variant == CausalVariant.UPPER_LEFT:
return self._upper_left(device)
elif self.variant == CausalVariant.LOWER_RIGHT:
return self._lower_right(device)
@staticmethod
def _dispatch(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attn_mask: "CausalBias",
dropout_p: float = 0.0,
is_causal: bool = False,
scale: Optional[float] = None,
enable_gqa: bool = False,
) -> torch.Tensor:
r"""
Handles the logic for computing attention with the specified causal bias.
Args:
query (Tensor): Query tensor; shape :math:`(N, ..., L, E)`.
key (Tensor): Key tensor; shape :math:`(N, ..., S, E)`.
value (Tensor): Value tensor; shape :math:`(N, ..., S, Ev)`.
attn_mask (CausalBias): The type of causal attention to apply.
A boolean mask where a value of True indicates that the element *should* take part in attention.
A float mask of the same type as query, key, value that is added to the attention score.
dropout_p (float): Dropout probability; if greater than 0.0, dropout is applied
is_causal (bool): If true, assumes upper left causal attention masking and errors if both attn_mask and is_causal
are set.
scale (optional float): Scaling factor applied prior to softmax. If None, the default value is set
to :math:`\frac{1}{\sqrt{E}}`.
enable_gqa (optional bool): If set to True, Grouped Query Attention (GQA) is enabled, by default it is set to False.
Returns:
output (Tensor): Attention output; shape :math:`(N, ..., L, Ev)`.
Raises:
ValueError: If the causal bias variant is not a CausalVariant type.
"""
if is_causal:
raise ValueError("CausalBias should not be used with causal=True")
if (
attn_mask.seq_len_q == attn_mask.seq_len_kv
or attn_mask.variant == CausalVariant.UPPER_LEFT
):
return F.scaled_dot_product_attention(
query,
key,
value,
attn_mask=None,
dropout_p=dropout_p,
is_causal=True,
scale=scale,
enable_gqa=enable_gqa,
)
elif attn_mask.variant == CausalVariant.LOWER_RIGHT:
_validate_sdpa_input(query, key, value, None, dropout_p, is_causal, scale)
sdpa_params = SDPAParams(
query, key, value, None, dropout_p, is_causal, enable_gqa
)
if can_use_flash_attention(sdpa_params):
needs_padding = query.size(-1) % 8 != 0
og_head_size = query.size(-1)
og_scale = _calculate_scale(og_head_size, scale)
if needs_padding:
query = torch.nn.functional.pad(query, (0, 8 - query.size(-1) % 8))
key = torch.nn.functional.pad(key, (0, 8 - key.size(-1) % 8))
value = torch.nn.functional.pad(value, (0, 8 - value.size(-1) % 8))
out = torch.ops.aten._scaled_dot_product_flash_attention(
query,
key,
value,
dropout_p,
is_causal=True, # TODO: Flash accepts causal = True and for this particular op it means lower right
return_debug_mask=False,
scale=og_scale,
)[0]
return _postprocess_flash_output(out, og_head_size)
if can_use_efficient_attention(sdpa_params):
compute_log_sumexp = False
if _input_requires_grad(query, key, value):
compute_log_sumexp = True
return torch.ops.aten._efficient_attention_forward(
query.transpose(1, 2),
key.transpose(1, 2),
value.transpose(1, 2),
bias=None,
cu_seqlens_q=None,
cu_seqlens_k=None,
max_seqlen_q=None,
max_seqlen_k=None,
dropout_p=dropout_p,
custom_mask_type=int(attn_mask.variant),
compute_log_sumexp=compute_log_sumexp,
scale=scale,
seqlen_k=None,
)[0].transpose(1, 2)
else:
_raise_kernel_warnings(sdpa_params)
# We cant use efficient attention the only support for lower right is via materialization
return F.scaled_dot_product_attention(
query,
key,
value,
attn_mask=attn_mask._materialize(query.device),
dropout_p=dropout_p,
is_causal=False,
scale=scale,
enable_gqa=enable_gqa,
)
else:
raise ValueError(
f"CausalBias.variant must be a CausalVariant type, but found: {attn_mask.variant}"
)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
"""Defines the behavior of torch.nn.functional.scaled_dot_product_attention when the attn_bias is an AttnBias"""
if kwargs is None:
kwargs = {}
if func != torch.nn.functional.scaled_dot_product_attention:
raise NotImplementedError(
"CausalBias only supports scaled_dot_product_attention"
)
return cls._dispatch(*args, **kwargs)
def __repr__(self): # type:ignore[override]
return self._materialize().__repr__()
def causal_upper_left(*size) -> CausalBias:
"""
Creates an upper-left triangular causal bias.
This function generates a upper-left triangular matrix to represent causal attention bias with a
diagonal offset set so that the inclusive values are aligned to the upper left corner of the matrix.
This equivalent to the `is_causal=True` argument in `scaled_dot_product_attention`.
The equivalent pytorch code for constructing this bias is:
.. code-block:: python
torch.tril(torch.ones(size, dtype=torch.bool))
For instance, with `shape=(3,4)`, the materialized bias tensor will be:
.. code-block:: text
[[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0]]
Args:
size: The size of the bias matrix.
Returns:
CausalBias: The UPPER_LEFT triangular causal bias variant.
"""
assert len(size) == 2, "causal_upper_left only supports 2D tensors"
seq_len_q, seq_len_kv = size
return CausalBias(CausalVariant.UPPER_LEFT, seq_len_q, seq_len_kv)
def causal_lower_right(*size) -> CausalBias:
"""
Creates a lower-right triangular causal bias.
This function generates a lower-right triangular matrix to represent causal attention bias with a
diagonal offset set so that the inclusive values are aligned to the lower right corner of the matrix.
The equivalent pytorch code for constructing this bias is:
.. code-block:: python
diagonal_offset = size[1] - size[0]
torch.tril(
torch.ones(size, dtype=torch.bool),
diagonal=diagonal_offset,
)
For instance, with `shape=(3,4)`, the materialized bias tensor will be:
.. code-block:: text
[[1, 1, 0, 0],
[1, 1, 1, 0],
[1, 1, 1, 1]]
Args:
size: The size of the bias matrix.
Returns:
CausalBias: The LOWER_RIGHT triangular causal bias variant.
"""
assert len(size) == 2, "causal_lower_right only supports 2D tensors"
seq_len_q, seq_len_kv = size
return CausalBias(CausalVariant.LOWER_RIGHT, seq_len_q, seq_len_kv)
```
|
===================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.11 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\attention\experimental\__init__.py
ENCODING: utf-8
```py
# Experimental features are not mature yet and are subject to change.
# We do not provide any BC/FC guarntees
```
|
===========================================================================================================================================
SOURCE CODE FILE: _paged_attention.py
LINES: 1
SIZE: 12.11 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\attention\experimental\_paged_attention.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""
This module implements Paged Attention on top of flex_attention.
This module is experimental and subject to change.
"""
from typing import Optional, Union
import torch
from torch.nn.attention.flex_attention import (
_identity,
_mask_mod_signature,
_score_mod_signature,
BlockMask,
noop_mask,
)
__all__ = ["PagedAttention"]
def _cdiv(
x: Union[int, float, torch.Tensor], multiple: Union[int, float, torch.Tensor]
):
return (x + multiple - 1) // multiple
class PagedAttention:
"""
PagedAttention supports flex attention inference with a large batch size.
With PagedAttention, a batch of key/value tensors with varying kv length
is splitted into tensor blocks of fixed length and cached in a compact way.
Thus we can avoid redundant memory consumption due to varying kv length and
support a larger batch size.
"""
def __init__(
self,
n_pages: int,
page_size: int,
max_batch_size: int,
device: str = "cuda",
):
# number of pages
self.n_pages = n_pages
# number of tokens per page
self.page_size = page_size
# page table: [batch, logical_block_idx] -> physical_page_idx
self.page_table = -torch.ones(
(max_batch_size, self.n_pages), dtype=torch.int64, device=device
)
# capacity: batch_idx -> allocated sequence length
self.capacity = torch.zeros(max_batch_size, dtype=torch.int64, device=device)
# index of empty pages that is available for allocation
self.empty_pages = list(range(n_pages - 1, -1, -1))
# mapping from physical page index to logical page index
self.physical_to_logical = -torch.ones(
(max_batch_size, n_pages), dtype=torch.int64, device=device
)
def reserve(self, batch_idx: torch.Tensor, seq_len: torch.Tensor) -> None:
"""
Requests the capacity of a given batch to be at least enough to
hold `seq_len` elements.
Args:
batch_idx (Tensor): batch index to be reserved; shape :math:`(1)`.
seq_len (Tensor): minimum capacity for the given batch; shape :math:`(1)`.
"""
if seq_len <= self.capacity[batch_idx]:
return
num_pages_to_allocate = _cdiv(
seq_len - self.capacity[batch_idx], self.page_size
)
assert len(self.empty_pages) >= num_pages_to_allocate, (
f"requested {num_pages_to_allocate.item()} pages "
f"but there are only {len(self.empty_pages)} empty pages"
)
start_page_idx = self.capacity[batch_idx] // self.page_size
end_page_idx = start_page_idx + num_pages_to_allocate
# find empty physical pages
allocated_pages = torch.tensor(
self.empty_pages[-num_pages_to_allocate:],
device=num_pages_to_allocate.device,
)
self.empty_pages = self.empty_pages[:-num_pages_to_allocate]
# update page table
self.page_table[
batch_idx,
start_page_idx:end_page_idx,
] = allocated_pages
# update metadata
self.physical_to_logical[batch_idx, allocated_pages] = torch.arange(
start_page_idx.item(),
end_page_idx.item(),
device=num_pages_to_allocate.device,
)
self.capacity[batch_idx] += num_pages_to_allocate * self.page_size
def erase(self, batch_idx: torch.Tensor) -> None:
"""
Removes a single batch from paged attention.
Args:
batch_idx (Tensor): batch index to be removed; shape :math:`(1)`.
"""
# find allocated pages
allocated_page_idx = self.page_table[batch_idx] != -1
allocated_pages = self.page_table[batch_idx][allocated_page_idx]
# clean metadata
self.capacity[batch_idx] = 0
self.empty_pages += allocated_pages.tolist()
self.physical_to_logical[batch_idx][:, allocated_pages] = -1
self.page_table[batch_idx] = -1
def assign(
self,
batch_idx: torch.Tensor,
input_pos: torch.Tensor,
k_val: torch.Tensor,
v_val: torch.Tensor,
k_cache: torch.Tensor,
v_cache: torch.Tensor,
) -> None:
"""
Assigns new contents `val` to the storage `cache` at the location
`batch_idx` and `input_pos`.
Args:
batch_idx (Tensor): batch index; shape :math:`(B)`.
input_pos (Tensor): input positions to be assigned for the given batch; shape :math:`(B, S)`.
val (Tensor): value to be assigned; shape :math:`(B, H, S, D)`
cache (Tensor): the cache to store the values; shape:`(1, H, MAX_S, D)`
"""
if k_val.requires_grad:
raise RuntimeError("val must not require gradient")
B, H, S, K_D = k_val.shape
V_D = v_val.shape[3]
if B != batch_idx.shape[0]:
raise RuntimeError(
f"Expect val and batch_idx have the same batch size "
f"but got B={B} and B={batch_idx.shape[0]}."
)
if H != k_cache.shape[1]:
raise RuntimeError(
f"Expect val and cache has the same number of heads "
f"but got H={H} and H={k_cache.shape[1]}."
)
if S != input_pos.shape[1]:
raise RuntimeError(
f"Expect val and input_pos has the same length "
f"but got S={S} and S={input_pos.shape[0]}."
)
if K_D != k_cache.shape[3]:
raise RuntimeError(
f"Expect k_val and k_cache has the same hidden dim "
f"but got D={K_D} and D={k_cache.shape[3]}."
)
if V_D != v_cache.shape[3]:
raise RuntimeError(
f"Expect v_val and v_cache has the same hidden dim "
f"but got D={V_D} and D={v_cache.shape[3]}."
)
# find address
logical_block_idx = input_pos // self.page_size # [B, S]
logical_block_offset = input_pos % self.page_size # [B, S]
physical_block_idx = torch.gather(
self.page_table[batch_idx], 1, logical_block_idx.to(torch.int64)
).to(
torch.int32
) # [B, S]
addr = (physical_block_idx * self.page_size + logical_block_offset).view(
-1
) # [B*S]
k_val = k_val.permute(1, 0, 2, 3).contiguous().view(1, H, B * S, K_D)
v_val = v_val.permute(1, 0, 2, 3).contiguous().view(1, H, B * S, V_D)
k_cache[:, :, addr, :] = k_val
v_cache[:, :, addr, :] = v_val
def convert_logical_block_mask(
self,
block_mask: BlockMask,
batch_idx: Optional[torch.Tensor] = None,
) -> BlockMask:
"""
Converts a logical block mask by mapping its logical kv indices to the corresponding
physical kv indices.
Args:
block_mask (BlockMask): logical block mask;
kv_indices shape :math:`(B, H, ROWS, MAX_BLOCKS_IN_COL)`.
batch_idx (Tensor): batch index corresponding to the block_mask
batch dimension. This provides flexibility to convert a
block mask with smaller batch size than the page table;
shape :math:`(B)`.
"""
B, H, ROWS, MAX_BLOCKS_IN_COL = block_mask.kv_indices.shape
if block_mask.BLOCK_SIZE[1] != self.page_size:
raise RuntimeError(
f"Expect block_mask has the same column block size as page_size"
f"but got size={block_mask.BLOCK_SIZE[1]} and size={self.page_size}"
)
# Increase the num columns of converted block mask from logical block mask's
# num columns to n_pages, since a) the converted block mask
# may have larger indices values; and b) `_ordered_to_dense` realizes
# a dense tensor with these converted indices. There would be an IndexError
# if using the logical block mask's num columns.
device = block_mask.kv_num_blocks.device
if batch_idx is None:
batch_idx = torch.arange(B, device=device)
page_table = self.page_table[batch_idx]
new_kv_num_blocks = block_mask.kv_num_blocks.clone()
new_kv_indices = torch.zeros(
(B, H, ROWS, self.n_pages), dtype=torch.int32, device=device
)
new_kv_indices[:, :, :, :MAX_BLOCKS_IN_COL] = (
torch.gather(
page_table, 1, block_mask.kv_indices.view(B, -1).to(torch.int64)
)
.view(block_mask.kv_indices.shape)
.to(torch.int32)
)
new_full_kv_indices, new_full_kv_num_blocks = None, None
if block_mask.full_kv_num_blocks is not None:
assert block_mask.full_kv_indices is not None
new_full_kv_num_blocks = block_mask.full_kv_num_blocks.clone()
new_full_kv_indices = torch.zeros(
(B, H, ROWS, self.n_pages), dtype=torch.int32, device=device
)
new_full_kv_indices[:, :, :, :MAX_BLOCKS_IN_COL] = (
torch.gather(
page_table,
1,
block_mask.full_kv_indices.view(B, -1).to(torch.int64),
)
.view(block_mask.full_kv_indices.shape)
.to(torch.int32)
)
new_mask_mod = self.get_mask_mod(block_mask.mask_mod)
seq_lengths = (block_mask.seq_lengths[0], self.n_pages * self.page_size)
return BlockMask.from_kv_blocks(
new_kv_num_blocks,
new_kv_indices,
new_full_kv_num_blocks,
new_full_kv_indices,
block_mask.BLOCK_SIZE,
new_mask_mod,
seq_lengths=seq_lengths,
)
def get_mask_mod(
self, mask_mod: Optional[_mask_mod_signature]
) -> _mask_mod_signature:
"""
Converts a mask_mod based on mapping from the physical block index to the logical
block index.
Args:
mask_mod (_mask_mod_signature): mask_mod based on the logical block index.
"""
if mask_mod is None:
mask_mod = noop_mask
def new_mask_mod(
b: torch.Tensor,
h: torch.Tensor,
q_idx: torch.Tensor,
physical_kv_idx: torch.Tensor,
):
physical_kv_block = physical_kv_idx // self.page_size
physical_kv_offset = physical_kv_idx % self.page_size
logical_block_idx = self.physical_to_logical[b, physical_kv_block]
logical_kv_idx = logical_block_idx * self.page_size + physical_kv_offset
return torch.where(
logical_block_idx >= 0, mask_mod(b, h, q_idx, logical_kv_idx), False
)
return new_mask_mod
def get_score_mod(
self, score_mod: Optional[_score_mod_signature]
) -> _score_mod_signature:
"""
Converts a score_mod based on mapping from the physical block index to the logical
block index.
Args:
score_mod (_score_mod_signature): score_mod based on the logical block index.
"""
if score_mod is None:
score_mod = _identity
def new_score_mod(
score: torch.Tensor,
b: torch.Tensor,
h: torch.Tensor,
q_idx: torch.Tensor,
physical_kv_idx: torch.Tensor,
):
physical_kv_block = physical_kv_idx // self.page_size
physical_kv_offset = physical_kv_idx % self.page_size
logical_block_idx = self.physical_to_logical[b, physical_kv_block]
logical_kv_idx = logical_block_idx * self.page_size + physical_kv_offset
return torch.where(
logical_block_idx >= 0,
score_mod(score, b, h, q_idx, logical_kv_idx),
float("-inf"),
)
return new_score_mod
```
|
============================================================================================================================
SOURCE CODE FILE: flex_attention.py
LINES: 19
SIZE: 55.37 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\attention\flex_attention.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
# flake8: noqa C101
"""This module implements the user facing API for flex_attention in PyTorch."""
import functools
import inspect
import itertools
import math
import operator
import warnings
from enum import Enum
from typing import Any, Callable, Optional, Union
import torch
from torch import Tensor
from torch._dynamo._trace_wrapped_higher_order_op import TransformGetItemToIndex
from torch._higher_order_ops.flex_attention import flex_attention as flex_attention_hop
from torch._higher_order_ops.utils import _set_compilation_env
from torch.fx.experimental.proxy_tensor import (
_temp_remove_metadata_torch_function_mode,
_temp_remove_pre_dispatch_torch_function_mode,
)
from torch.nn.attention._utils import _supported_head_dim, _validate_sdpa_input
from torch.utils._pytree import tree_map_only
__all__ = [
"BlockMask",
"flex_attention",
"create_block_mask",
"create_mask",
"create_nested_block_mask",
"or_masks",
"and_masks",
"noop_mask",
]
_score_mod_signature = Callable[[Tensor, Tensor, Tensor, Tensor, Tensor], Tensor]
_mask_mod_signature = Callable[[Tensor, Tensor, Tensor, Tensor], Tensor]
class _ModificationType(Enum):
"""Enum for the type of modification function.
- SCORE_MOD: score_mod function which accepts a score as the first argument
- mask_mod: mask function which does not accept a score and is only used for generating
block mask
"""
SCORE_MOD = 1
MASK_MOD = 2
UNKNOWN = 3
def _get_mod_type(fn: Callable) -> _ModificationType:
"""Get the type of modification function.
This function inspects the number of positional arguments of the function to determine
the type of modification function. If the function has 5 positional arguments, it is
considered as a score_mod function. If the function has 4 positional arguments, it is
considered as a mask function.
"""
num_positional_args = sum(
1
for param in inspect.signature(fn).parameters.values()
if param.default == inspect.Parameter.empty
)
assert num_positional_args == 5 or num_positional_args == 4
if num_positional_args == 5:
return _ModificationType.SCORE_MOD
elif num_positional_args == 4:
return _ModificationType.MASK_MOD
else:
return _ModificationType.UNKNOWN
# Need to define it here so that Dynamo doesn't skip it
def _vmap_for_bhqkv(
fn: Callable,
prefix: tuple[Optional[int], ...],
suffix: tuple[Optional[int], ...] = (),
out_dims: Union[int, list[Optional[int]]] = 0,
group_dim: bool = False,
):
"""Used to vmap both score_mods and mask_mods over 4-dimensional/5-dimension inputs.
Mapping over the [b, hq, q_idx, kv_idx] or [b, hkv, g, q_idx, kv_idx] dimensions.
Args:
fn (callable): The function to vmap.
prefix (tuple): The prefix of the vmap. For score mod functions,
this should be set to (0,). For mask_mods = ()
suffix (tuple): We need to add (0,) if gradOut is being mapped over,
and (None,) * len(other_buffers).
out_dims (tuple): For forward cases, keep this as the default 0 since
we are only returning 1 output. For backwards, the joint
graph returns grads for B, H, Q_idx, KV_idx and other_buffers,
so we set this to (0, None, None, None, None) + (None,) * len(other_buffers).
Returns:
callable: The vmapped function.
"""
# We vamp a function 4 times, broadcasting the [b, h, q_idx, kv_idx] dimensions
dimensions: list[tuple[None | int, None | int, None | int, None | int]] = []
dimensions = [
(None, None, None, 0),
(None, None, 0, None),
(None, 0, None, None),
]
if group_dim:
dimensions += [
(None, 0, None, None),
]
dimensions += [
(0, None, None, None),
]
for dims in dimensions:
fn = torch.vmap(fn, in_dims=prefix + dims + suffix, out_dims=out_dims) # type: ignore[arg-type]
return fn
def _identity(
score: Tensor,
batch: Tensor,
head: Tensor,
token_q: Tensor,
token_kv: Tensor,
) -> Tensor:
return score
def noop_mask(
batch: Tensor,
head: Tensor,
token_q: Tensor,
token_kv: Tensor,
) -> Tensor:
"""Returns a noop mask_mod"""
return batch.new_ones(size=(), dtype=torch.bool, device=batch.device)
_DEFAULT_SPARSE_BLOCK_SIZE = 128
_LARGE_SPARSE_BLOCK_SIZE = 1 << 30
def _ordered_to_dense(num_blocks_in_row: Tensor, col_indices: Tensor):
num_rows = col_indices.shape[-2]
num_cols = col_indices.shape[-1]
batch_dims = num_blocks_in_row.shape[:-1]
device = num_blocks_in_row.device
def create_dense_one(kv_num_blocks, kv_indices):
dense_mask = kv_indices.new_zeros(num_rows, num_cols + 1, dtype=torch.int32)
row_indices = torch.arange(num_rows, dtype=torch.int, device=device).unsqueeze(
-1
)
col_range = torch.arange(num_cols, dtype=torch.int, device=device)
index_mask = col_range < kv_num_blocks.unsqueeze(-1)
# We write to one spot "out of bounds"
valid_indices = torch.where(index_mask, kv_indices, num_cols)
# set the values in 'a' to 1 where the indices are valid
dense_mask[row_indices, valid_indices] = dense_mask.new_ones(())
return dense_mask[:, :num_cols].contiguous()
create_dense_batched = create_dense_one
for _ in range(len(batch_dims)):
create_dense_batched = torch.vmap(create_dense_batched, in_dims=(0, 0))
out = create_dense_batched(num_blocks_in_row, col_indices)
return out
def _dense_to_ordered(dense_mask) -> tuple[Tensor, Tensor]:
dense_mask = dense_mask.to(dtype=torch.int32)
num_blocks_in_row = dense_mask.sum(dim=-1)
col_indices = torch.argsort(dense_mask, dim=-1, descending=True, stable=True)
return (
num_blocks_in_row.to(torch.int32, memory_format=torch.contiguous_format),
col_indices.to(torch.int32, memory_format=torch.contiguous_format),
)
def _transpose_ordered(num_blocks_in_row: Tensor, col_indices: Tensor):
dense = _ordered_to_dense(num_blocks_in_row, col_indices)
return _dense_to_ordered(dense.transpose(-2, -1))
def _adjust_num_blocks_and_indices(
num_blocks: Tensor,
indices: Tensor,
new_num_rows: int,
new_num_cols: int,
):
indices = indices[:, :, :new_num_rows, :new_num_cols]
num_blocks = num_blocks[:, :, :new_num_rows]
num_blocks = torch.where(num_blocks < new_num_cols, num_blocks, new_num_cols)
num_blocks = torch.sum(indices < num_blocks[:, :, :, None], dim=-1).to(torch.int32)
return num_blocks, indices
class BlockMask:
r"""
BlockMask is our format for representing a block-sparse attention mask.
It is somewhat of a cross in-between BCSR and a non-sparse format.
Basics
------
A block-sparse mask means that instead of representing the sparsity of
individual elements in the mask, a KV_BLOCK_SIZE x Q_BLOCK_SIZE block is
considered sparse only if every element within that block is sparse.
This aligns well with hardware, which generally expects to perform
contiguous loads and computation.
This format is primarily optimized for 1. simplicity, and 2. kernel
efficiency. Notably, it is *not* optimized for size, as this mask is always
reduced by a factor of KV_BLOCK_SIZE * Q_BLOCK_SIZE. If the size is a
concern, the tensors can be reduced in size by increasing the block size.
The essentials of our format are:
num_blocks_in_row: Tensor[ROWS]:
Describes the number of blocks present in each row.
col_indices: Tensor[ROWS, MAX_BLOCKS_IN_COL]:
`col_indices[i]` is the sequence of block positions for row i. The values of
this row after `col_indices[i][num_blocks_in_row[i]]` are undefined.
For example, to reconstruct the original tensor from this format:
.. code-block:: python
dense_mask = torch.zeros(ROWS, COLS)
for row in range(ROWS):
for block_idx in range(num_blocks_in_row[row]):
dense_mask[row, col_indices[row, block_idx]] = 1
Notably, this format makes it easier to implement a reduction along the
*rows* of the mask.
Details
-------
The basics of our format require only kv_num_blocks and kv_indices. But, we
have up to 8 tensors on this object. This represents 4 pairs:
1. (kv_num_blocks, kv_indices): Used for the forwards pass of attention, as
we reduce along the KV dimension.
2. [OPTIONAL] (full_kv_num_blocks, full_kv_indices): This is optional and
purely an optimization. As it turns out, applying masking to every block
is quite expensive! If we specifically know which blocks are "full" and
don't require masking at all, then we can skip applying mask_mod to these
blocks. This requires the user to split out a separate mask_mod from the
score_mod. For causal masks, this is about a 15% speedup.
3. [GENERATED] (q_num_blocks, q_indices): Required for the backwards pass,
as computing dKV requires iterating along the mask along the Q dimension. These are autogenerated from 1.
4. [GENERATED] (full_q_num_blocks, full_q_indices): Same as above, but for
the backwards pass. These are autogenerated from 2.
"""
seq_lengths: tuple[int, int]
kv_num_blocks: Tensor
kv_indices: Tensor
full_kv_num_blocks: Optional[Tensor]
full_kv_indices: Optional[Tensor]
q_num_blocks: Optional[Tensor]
q_indices: Optional[Tensor]
full_q_num_blocks: Optional[Tensor]
full_q_indices: Optional[Tensor]
BLOCK_SIZE: tuple[int, int]
mask_mod: _mask_mod_signature
def __init__(
self,
seq_lengths: tuple[int, int],
kv_num_blocks: Tensor,
kv_indices: Tensor,
full_kv_num_blocks: Optional[Tensor],
full_kv_indices: Optional[Tensor],
q_num_blocks: Optional[Tensor],
q_indices: Optional[Tensor],
full_q_num_blocks: Optional[Tensor],
full_q_indices: Optional[Tensor],
BLOCK_SIZE: tuple[int, int],
mask_mod: _mask_mod_signature,
):
if kv_indices.dim() < 2:
raise RuntimeError("BlockMask must have at least 2 dimensions")
assert kv_num_blocks is not None, "kv_num_blocks must be provided"
assert kv_indices is not None, "kv_indices must be provided"
assert q_num_blocks is not None, "q_num_blocks must be provided"
assert q_indices is not None, "q_indices must be provided"
assert (full_kv_num_blocks is None) == (
full_kv_indices is None
), "full_kv_num_blocks and full_kv_indices must be both provided or omitted"
assert (full_q_num_blocks is None) == (
full_q_indices is None
), "full_q_num_blocks and full_q_indices must be both provided or omitted"
self.seq_lengths = seq_lengths
self.kv_num_blocks = kv_num_blocks
self.kv_indices = kv_indices
self.full_kv_num_blocks = full_kv_num_blocks
self.full_kv_indices = full_kv_indices
self.q_num_blocks = q_num_blocks
self.q_indices = q_indices
self.full_q_num_blocks = full_q_num_blocks
self.full_q_indices = full_q_indices
self.BLOCK_SIZE = BLOCK_SIZE
self.mask_mod = mask_mod
@classmethod
def from_kv_blocks(
cls,
kv_num_blocks: Tensor,
kv_indices: Tensor,
full_kv_num_blocks: Optional[Tensor] = None,
full_kv_indices: Optional[Tensor] = None,
BLOCK_SIZE: Union[int, tuple[int, int]] = _DEFAULT_SPARSE_BLOCK_SIZE,
mask_mod: Optional[_mask_mod_signature] = None,
seq_lengths: Optional[tuple[int, int]] = None,
):
"""
Creates a BlockMask instance from key-value block information.
Args:
kv_num_blocks (Tensor): Number of kv_blocks in each Q_BLOCK_SIZE row tile.
kv_indices (Tensor): Indices of key-value blocks in each Q_BLOCK_SIZE row tile.
full_kv_num_blocks (Optional[Tensor]): Number of full kv_blocks in each Q_BLOCK_SIZE row tile.
full_kv_indices (Optional[Tensor]): Indices of full key-value blocks in each Q_BLOCK_SIZE row tile.
BLOCK_SIZE (Union[int, tuple[int, int]]): Size of KV_BLOCK_SIZE x Q_BLOCK_SIZE tiles.
mask_mod (Optional[Callable]): Function to modify the mask.
Returns:
BlockMask: Instance with full Q information generated via _transposed_ordered
Raises:
RuntimeError: If kv_indices has < 2 dimensions.
AssertionError: If only one of full_kv_* args is provided.
"""
if kv_indices.dim() < 2:
raise RuntimeError("BlockMask must have at least 2 dimensions")
assert (full_kv_num_blocks is None) == (
full_kv_indices is None
), "full_kv_num_blocks and full_kv_indices must be both provided or omitted"
# Generate q_num_blocks and q_indices
q_num_blocks, q_indices = _transpose_ordered(kv_num_blocks, kv_indices)
if full_kv_num_blocks is not None:
assert full_kv_indices is not None
full_q_num_blocks, full_q_indices = _transpose_ordered(
full_kv_num_blocks, full_kv_indices
)
else:
full_q_num_blocks, full_q_indices = None, None
if isinstance(BLOCK_SIZE, int):
BLOCK_SIZE = (BLOCK_SIZE, BLOCK_SIZE)
mask_mod = mask_mod if mask_mod is not None else noop_mask
if seq_lengths is None:
q_length = kv_indices.shape[-2] * BLOCK_SIZE[0]
kv_length = q_indices.shape[-2] * BLOCK_SIZE[1]
seq_lengths = (q_length, kv_length)
return cls(
seq_lengths=seq_lengths,
kv_num_blocks=kv_num_blocks,
kv_indices=kv_indices,
full_kv_num_blocks=full_kv_num_blocks,
full_kv_indices=full_kv_indices,
q_num_blocks=q_num_blocks,
q_indices=q_indices,
full_q_num_blocks=full_q_num_blocks,
full_q_indices=full_q_indices,
BLOCK_SIZE=BLOCK_SIZE,
mask_mod=mask_mod,
)
def as_tuple(self, flatten: bool = True):
"""
Returns a tuple of the attributes of the BlockMask.
Args:
flatten (bool): If True, it will flatten the tuple of (KV_BLOCK_SIZE, Q_BLOCK_SIZE)
"""
if flatten:
block_size = (self.BLOCK_SIZE[0], self.BLOCK_SIZE[1]) # type: ignore[assignment]
seq_lengths = (self.seq_lengths[0], self.seq_lengths[1]) # type: ignore[assignment]
else:
block_size = (self.BLOCK_SIZE,) # type: ignore[assignment]
seq_lengths = (self.seq_lengths,) # type: ignore[assignment]
return (
*seq_lengths,
self.kv_num_blocks,
self.kv_indices,
self.full_kv_num_blocks,
self.full_kv_indices,
self.q_num_blocks,
self.q_indices,
self.full_q_num_blocks,
self.full_q_indices,
*block_size,
self.mask_mod,
)
@property
def shape(self):
*batch_dims, _, _ = self.kv_indices.shape
return tuple(batch_dims) + self.seq_lengths
def __str__(self):
s = f"BlockMask(shape={self.shape}, sparsity={self.sparsity():.2f}%, \n"
mask_str = self.to_string().strip()
s += mask_str
s += "\n)"
return s
def __getitem__(self, index) -> "BlockMask":
"""
Returns a new BlockMask instance by getting the mask for the given index position.
Args:
index: Index to apply to all attributes.
Example Usage:
.. code-block:: python
def causal_mask(b, h, q_idx, kv_idx):
return q_idx >= kv_idx
block_mask = create_block_mask(causal_mask, 4, 2, 512, 512, device="cuda")
assert block_mask.kv_num_blocks.shape == (4,2,4)
assert block_mask.kv_indices.shape == (4,2,4,4)
# Index on batch dimension
new_block_mask = block_mask[0]
assert new_block_mask.kv_num_blocks.shape == (2,4)
assert new_block_mask.kv_indices.shape == (2,4,4)
# Index on batch and head dimension
new_block_mask = block_mask[0, 1]
assert new_block_mask.kv_num_blocks.shape == (4,)
assert new_block_mask.kv_indices.shape == (4,4)
# slicing on batch and head dimension
new_block_mask = block_mask[0:2, 1:2]
assert new_block_mask.kv_num_blocks.shape == (2,1,4)
assert new_block_mask.kv_indices.shape == (2,1,4,4)
# slicing on batch, head, and query dimension
new_block_mask = block_mask[0:2, 1:2, torch.tensor([1], dtype=torch.int32)]
assert new_block_mask.kv_num_blocks.shape == (2,1,1)
assert new_block_mask.kv_indices.shape == (2,1,1,4)
"""
new_kv_num_blocks = self.kv_num_blocks[index]
new_kv_indices = self.kv_indices[index]
if self.full_kv_num_blocks is not None:
assert self.full_kv_indices is not None
new_full_kv_num_blocks = self.full_kv_num_blocks[index]
new_full_kv_indices = self.full_kv_indices[index]
else:
new_full_kv_num_blocks = None
new_full_kv_indices = None
return BlockMask.from_kv_blocks(
new_kv_num_blocks,
new_kv_indices,
new_full_kv_num_blocks,
new_full_kv_indices,
BLOCK_SIZE=self.BLOCK_SIZE,
mask_mod=None,
seq_lengths=self.seq_lengths,
)
def __repr__(self):
def shape_or_none(x: Optional[torch.Tensor]):
return x.shape if x is not None else None
return (
f"BlockMask(\n"
f" kv_num_blocks={self.kv_num_blocks.shape},\n"
f" kv_indices={self.kv_indices.shape},\n"
f" full_kv_num_blocks={shape_or_none(self.full_kv_num_blocks )},\n"
f" full_kv_indices={shape_or_none(self.full_kv_indices)},\n"
f" q_num_blocks={shape_or_none(self.q_num_blocks)},\n"
f" q_indices={shape_or_none(self.q_indices)},\n"
f" full_q_num_blocks={shape_or_none(self.full_q_num_blocks)},\n"
f" full_q_indices={shape_or_none(self.full_q_indices)},\n"
f" BLOCK_SIZE={self.BLOCK_SIZE},\n"
f" shape={self.shape},\n"
f" sparsity={self.sparsity():.2f}%,\n"
f" mask_mod={self.mask_mod.__name__ if hasattr(self.mask_mod, '__name__') else self.mask_mod}\n"
f")"
)
def _adjust(self, new_q_len: int, new_kv_len: int):
new_num_rows = (new_q_len + self.BLOCK_SIZE[0] - 1) // self.BLOCK_SIZE[0]
new_num_cols = (new_kv_len + self.BLOCK_SIZE[1] - 1) // self.BLOCK_SIZE[1]
new_kv_num_blocks, new_kv_indices = _adjust_num_blocks_and_indices(
self.kv_num_blocks, self.kv_indices, new_num_rows, new_num_cols
)
if self.full_kv_num_blocks is not None:
assert self.full_kv_indices is not None
(
new_full_kv_num_blocks,
new_full_kv_indices,
) = _adjust_num_blocks_and_indices(
self.full_kv_num_blocks,
self.full_kv_indices,
new_num_rows,
new_num_cols,
)
else:
new_full_kv_num_blocks = None
new_full_kv_indices = None
return self.from_kv_blocks(
new_kv_num_blocks,
new_kv_indices,
new_full_kv_num_blocks,
new_full_kv_indices,
self.BLOCK_SIZE,
self.mask_mod,
)
def numel(self):
"""Returns the number of elements (not accounting for sparsity) in the mask."""
shape = self.shape
def _prod(xs):
return functools.reduce(operator.mul, xs, 1)
return _prod(shape)
def sparsity(self) -> float:
"""Computes the percentage of blocks that are sparse (i.e. not computed)"""
total_size = self.numel()
computed_blocks = self.kv_num_blocks.sum()
if self.full_kv_num_blocks is not None:
computed_blocks += self.full_kv_num_blocks.sum()
computed_size = computed_blocks.item() * self.BLOCK_SIZE[0] * self.BLOCK_SIZE[1]
dense_ratio = computed_size / total_size
return 100 * (1 - dense_ratio)
def to_dense(self) -> Tensor:
"""Returns a dense block that is equivalent to the block mask."""
partial_dense = _ordered_to_dense(self.kv_num_blocks, self.kv_indices)
if self.full_kv_num_blocks is not None:
assert self.full_kv_indices is not None
return partial_dense | _ordered_to_dense(
self.full_kv_num_blocks, self.full_kv_indices
)
return partial_dense
def to_string(self, grid_size=(20, 20), limit=4):
"""Returns a string representation of the block mask. Quite nifty.
If grid_size is None, prints out an uncompressed version. Warning, it can be quite big!
"""
dense_mask = self.to_dense()
*batch_dims, num_rows, num_cols = dense_mask.shape
if isinstance(grid_size, int):
max_rows = grid_size
max_cols = grid_size
elif grid_size == -1:
max_rows = num_rows
max_cols = num_cols
else:
max_rows, max_cols = grid_size
def create_block_vis(*batch_idx):
descriptors = []
descriptors.append(f"{batch_idx}")
vis = ", ".join(reversed(descriptors)) + "\n"
def summarize_section(section):
percentage = section.float().mean().item()
if percentage == 1:
return "█"
elif percentage == 0:
return " "
else:
return "░"
def cdiv(a, b):
return (a + (b - 1)) // b
row_step = max(1, cdiv(num_rows, max_rows))
col_step = max(1, cdiv(num_cols, max_cols))
for r in range(0, num_rows, row_step):
for c in range(0, num_cols, col_step):
cur_mask = dense_mask
for idx in batch_idx:
cur_mask = cur_mask[idx]
char = summarize_section(
cur_mask[r : r + row_step, c : c + col_step]
)
vis += char * 2
vis += "\n"
return vis
total_vis = []
for idx, batch_idx in enumerate(
itertools.product(*[range(i) for i in batch_dims])
):
if idx == limit:
total_vis.append("...")
total_vis.append("To print out more, set BlockMask.to_string(limit=N)")
total_vis.append(
"You can also index (BlockMask[batch, head]) to choose a specific batch or head"
)
break
block_vis = create_block_vis(*batch_idx)
total_vis.append(block_vis)
return "\n".join(total_vis)
def to(self, device: Union[torch.device, str]) -> "BlockMask":
"""Moves the BlockMask to the specified device.
Args:
device (torch.device or str): The target device to move the BlockMask to.
Can be a torch.device object or a string (e.g., 'cpu', 'cuda:0').
Returns:
BlockMask: A new BlockMask instance with all tensor components moved
to the specified device.
Note:
This method does not modify the original BlockMask in-place.
Instead, it returns a new BlockMask instance where invidual tensor attributes
may or may not be moved to the specified device, depending on their
current device placement.
"""
mapped_attributes = tree_map_only(
torch.Tensor,
lambda x: x.to(device),
self.as_tuple(flatten=False),
)
return BlockMask(*mapped_attributes)
def _broadcast_to_dim(x, dim):
while x.dim() < dim:
x = x.unsqueeze(0)
return x
def _round_up_to_multiple(x, multiple):
return (x + multiple - 1) // multiple * multiple
def _convert_mask_to_block_mask(
mask: Tensor,
Q_BLOCK_SIZE=_DEFAULT_SPARSE_BLOCK_SIZE,
KV_BLOCK_SIZE=_DEFAULT_SPARSE_BLOCK_SIZE,
separate_full_blocks: bool = False,
) -> tuple[Tensor, Optional[Tensor]]:
assert mask.dtype == torch.bool
mask = _broadcast_to_dim(mask, 4)
def padding_needed_for_multiple(x, multiple):
return _round_up_to_multiple(x, multiple) - x
mask = torch.nn.functional.pad(
mask,
(
0,
padding_needed_for_multiple(mask.shape[-1], KV_BLOCK_SIZE),
0,
padding_needed_for_multiple(mask.shape[-2], Q_BLOCK_SIZE),
),
)
B, H, Q, KV = mask.shape
assert Q % Q_BLOCK_SIZE == 0
assert KV % KV_BLOCK_SIZE == 0
mask = mask.view(
B, H, Q // Q_BLOCK_SIZE, Q_BLOCK_SIZE, KV // KV_BLOCK_SIZE, KV_BLOCK_SIZE
) # [B, H, Q//Q_BLOCK_SIZE, Q_BLOCK_SIZE, KV//KV_BLOCK_SIZE, KV_BLOCK_SIZE]
mask = mask.permute(
0, 1, 2, 4, 3, 5
) # [B, H, Q//Q_BLOCK_SIZE, KV//KV_BLOCK_SIZE, Q_BLOCK_SIZE, KV_BLOCK_SIZE]
mask_block_sum = mask.sum(
dim=[-2, -1]
) # [B, H, Q//Q_BLOCK_SIZE, KV//KV_BLOCK_SIZE]
if separate_full_blocks:
full_block_sum = Q_BLOCK_SIZE * KV_BLOCK_SIZE
full_blocks = mask_block_sum == full_block_sum
partial_blocks = (mask_block_sum > 0) & (mask_block_sum < full_block_sum)
partial_blocks = partial_blocks.to(dtype=torch.int8)
full_blocks = full_blocks.to(dtype=torch.int8)
return partial_blocks, full_blocks
else:
partial_blocks = mask_block_sum > 0
partial_blocks = partial_blocks.to(dtype=torch.int8)
return partial_blocks, None
def or_masks(*mask_mods: _mask_mod_signature) -> _mask_mod_signature:
"""Returns a mask_mod that's the union of provided mask_mods"""
if not all(callable(arg) for arg in mask_mods):
raise RuntimeError(f"All inputs should be callable mask_mods: {mask_mods}")
def or_mask(b, h, q_idx, kv_idx):
result = b.new_zeros((), dtype=torch.bool)
for mask in mask_mods:
result = result | mask(b, h, q_idx, kv_idx)
return result
return or_mask
def and_masks(*mask_mods: _mask_mod_signature) -> _mask_mod_signature:
"""Returns a mask_mod that's the intersection of provided mask_mods"""
if not all(callable(arg) for arg in mask_mods):
raise RuntimeError(f"All inputs should be callable mask_mods: {mask_mods}")
def and_mask(b, h, q_idx, kv_idx):
result = b.new_ones((), dtype=torch.bool)
for mask in mask_mods:
result = result & mask(b, h, q_idx, kv_idx)
return result
return and_mask
def _convert_block_mask_to_mask(
block_mask,
KV_BLOCK_SIZE=_DEFAULT_SPARSE_BLOCK_SIZE,
Q_BLOCK_SIZE=_DEFAULT_SPARSE_BLOCK_SIZE,
) -> Tensor:
assert block_mask.dim() == 4
B, H, Q, KV = block_mask.shape
block_mask = block_mask.expand(Q_BLOCK_SIZE, KV_BLOCK_SIZE, *block_mask.shape)
block_mask = block_mask.permute(2, 3, 4, 0, 5, 1).reshape(
B, H, Q * Q_BLOCK_SIZE, KV * KV_BLOCK_SIZE
)
return block_mask
def _create_sparse_block_from_block_mask(
block_mask: tuple[Tensor, Optional[Tensor]],
mask_mod: Optional[Callable],
seq_lengths: tuple[int, int],
Q_BLOCK_SIZE: int = _DEFAULT_SPARSE_BLOCK_SIZE,
KV_BLOCK_SIZE: int = _DEFAULT_SPARSE_BLOCK_SIZE,
) -> BlockMask:
partial_blocks, full_blocks = block_mask
partial_bm = _dense_to_ordered(partial_blocks)
if full_blocks is not None:
full_bm: tuple[Optional[Tensor], Optional[Tensor]] = _dense_to_ordered(
full_blocks
)
else:
full_bm = (None, None)
return BlockMask.from_kv_blocks(
partial_bm[0],
partial_bm[1],
full_bm[0],
full_bm[1],
BLOCK_SIZE=(Q_BLOCK_SIZE, KV_BLOCK_SIZE),
mask_mod=mask_mod,
seq_lengths=seq_lengths,
)
def create_mask(
mod_fn: Union[_score_mod_signature, _mask_mod_signature],
B: Optional[int],
H: Optional[int],
Q_LEN: int,
KV_LEN: int,
device: str = "cuda",
) -> Tensor:
r"""This function creates a mask tensor from a mod_fn function.
Args:
mod_fn (Union[_score_mod_signature, _mask_mod_signature]): Function to modify attention scores.
B (int): Batch size.
H (int): Number of query heads.
Q_LEN (int): Sequence length of query.
KV_LEN (int): Sequence length of key/value.
device (str): Device to run the mask creation on.
Returns:
mask (Tensor): A mask tensor with shape (B, H, M, N).
"""
if B is None:
B = 1
if H is None:
H = 1
b = torch.arange(0, B, device=device)
h = torch.arange(0, H, device=device)
m = torch.arange(0, Q_LEN, device=device)
n = torch.arange(0, KV_LEN, device=device)
mod_type = _get_mod_type(mod_fn)
with TransformGetItemToIndex():
if mod_type == _ModificationType.SCORE_MOD:
score_mod = mod_fn
score_mod = _vmap_for_bhqkv(score_mod, prefix=(0,)) # first input is score
out = score_mod(torch.zeros(B, H, Q_LEN, KV_LEN, device=device), b, h, m, n)
mask = torch.where(torch.isneginf(out), False, True)
return mask
elif mod_type == _ModificationType.MASK_MOD:
mask_mod = mod_fn
mask_mod = _vmap_for_bhqkv(mask_mod, prefix=())
mask = mask_mod(b, h, m, n)
return mask
else:
raise AssertionError
def create_block_mask(
mask_mod: _mask_mod_signature,
B: Optional[int],
H: Optional[int],
Q_LEN: int,
KV_LEN: int,
device: str = "cuda",
BLOCK_SIZE: Union[int, tuple[int, int]] = _DEFAULT_SPARSE_BLOCK_SIZE,
_compile=False,
) -> BlockMask:
r"""This function creates a block mask tuple from a mask_mod function.
Args:
mask_mod (Callable): mask_mod function. This is a callable that defines the
masking pattern for the attention mechanism. It takes four arguments:
b (batch size), h (number of heads), q_idx (query index), and kv_idx (key/value index).
It should return a boolean tensor indicating which attention connections are allowed (True)
or masked out (False).
B (int): Batch size.
H (int): Number of query heads.
Q_LEN (int): Sequence length of query.
KV_LEN (int): Sequence length of key/value.
device (str): Device to run the mask creation on.
BLOCK_SIZE (int or tuple[int, int]): Block size for the block mask. If a single int is provided it is used for both query and key/value.
Returns:
BlockMask: A BlockMask object that contains the block mask information.
Example Usage:
.. code-block:: python
def causal_mask(b, h, q_idx, kv_idx):
return q_idx >= kv_idx
block_mask = create_block_mask(causal_mask, 1, 1, 8192, 8192, device="cuda")
query = torch.randn(1, 1, 8192, 64, device="cuda", dtype=torch.float16)
key = torch.randn(1, 1, 8192, 64, device="cuda", dtype=torch.float16)
value = torch.randn(1, 1, 8192, 64, device="cuda", dtype=torch.float16)
output = flex_attention(query, key, value, block_mask=block_mask)
"""
mod_type = _get_mod_type(mask_mod)
assert (
mod_type == _ModificationType.MASK_MOD
), f"create-block_mask requires a mask_mod function! Got {mask_mod}"
if B is None:
B = 1
if H is None:
H = 1
if isinstance(BLOCK_SIZE, int):
Q_BLOCK_SIZE = BLOCK_SIZE
KV_BLOCK_SIZE = BLOCK_SIZE
else:
Q_BLOCK_SIZE, KV_BLOCK_SIZE = BLOCK_SIZE
if _compile:
warnings.warn(
"_compile flag on create_block_mask was originally added to work around a torch.compile limitation. That limitation has since been addressed. So, to compile create_block_mask, we suggest doing torch.compile(create_block_mask). This still works for now, but will be removed in the future.",
DeprecationWarning,
)
return torch.compile(create_block_mask)(
mask_mod, B, H, Q_LEN, KV_LEN, device, BLOCK_SIZE
)
mask_tensor = create_mask(mask_mod, B, H, Q_LEN, KV_LEN, device)
partial_block_mask, full_block_mask = _convert_mask_to_block_mask(
mask_tensor,
Q_BLOCK_SIZE=Q_BLOCK_SIZE,
KV_BLOCK_SIZE=KV_BLOCK_SIZE,
separate_full_blocks=True,
)
block_mask = _create_sparse_block_from_block_mask(
(partial_block_mask, full_block_mask),
mask_mod,
(Q_LEN, KV_LEN),
Q_BLOCK_SIZE,
KV_BLOCK_SIZE,
)
return block_mask
def _create_empty_block_mask(query: Tensor, key: Tensor) -> BlockMask:
r"""Default block mask for flex attention.
If users don't specify any block sparse mask info, we create this
empty block sparse mask. Which creates a BlockMask with 1 block that is the full length
of the query and key tensors.
"""
device = query.device
return BlockMask.from_kv_blocks(
kv_num_blocks=torch.ones([1, 1, 1], dtype=torch.int32, device=device),
kv_indices=torch.zeros([1, 1, 1, 1], dtype=torch.int32, device=device),
BLOCK_SIZE=_LARGE_SPARSE_BLOCK_SIZE,
seq_lengths=(1, 1),
)
def _nested_mod_func_adapter(
orig_mod_func: Union[_score_mod_signature, _mask_mod_signature],
q_nt: torch.Tensor,
kv_nt: torch.Tensor,
is_score_mod: bool,
) -> Union[_score_mod_signature, _mask_mod_signature]:
r"""Adapter to convert a score_mod / mask_mod to be NJT-compatible. The given mod func
should be written as if operating over a single sequence at a item. This adapter will
handle conversion from indices operating over a "stacked sequence" of length ``sum(S)``
for sequence length ``S`` in the NJT to "sequence relative" indices in range ``[0, S)``.
Args:
orig_mod_func (Callable): Function to modify attention scores. It takes four or five
arguments, depending on whether a mask_mod or score_mod func is passed.
q_nt (torch.Tensor): Jagged layout nested tensor (NJT) that defines the sequence length
structure for query.
kv_nt (torch.Tensor): Jagged layout nested tensor (NJT) that defines the sequence length
structure for key / value.
is_score_mod (bool): Indicates whether the mod function is a score_mod.
Returns:
nt_score_mod: An NJT-compatible version of orig_score_mod
"""
# Used to convert indices within the "stacked" sequence (range [0, sum(*)))
# to "sequence local" indices (range [0, S) for each S).
def _build_seq_idx(offsets, total_length):
range_tensor = torch.arange(
total_length, device=offsets.device, dtype=torch.int32
)
# Use searchsorted to find the index for each position
# NB: This assumes offsets[0] to offsets[-1] spans the packed dim of values.
# If we ever loosen this restriction, this logic will need to be updated.
seq_idx = torch.searchsorted(offsets, range_tensor, right=True) - 1
return seq_idx
q_offsets = q_nt._offsets # type: ignore[attr-defined]
kv_offsets = kv_nt._offsets # type: ignore[attr-defined]
q_seq_idx = _build_seq_idx(q_offsets, q_nt._values.shape[q_nt._ragged_idx - 1]) # type: ignore[attr-defined]
if q_nt is kv_nt:
kv_seq_idx = q_seq_idx
else:
# cross attention case
kv_seq_idx = _build_seq_idx(kv_offsets, kv_nt._values.shape[kv_nt._ragged_idx - 1]) # type: ignore[attr-defined]
# Converts q_idx / kv_idx from [0, total_length) -> [0, S), where S refers
# to the sequence length for each sequence in the NJT, for use in given
# score_mod. This allows the user to write a score_mod as if it were
# operating on a single sequence and the "stacked sequence" is split
# automatically into individual sequences for them.
if is_score_mod:
def nt_score_mod(score, b, h, q_idx, kv_idx):
b_nested = q_seq_idx[q_idx]
q_nested = q_idx - q_offsets[q_seq_idx[q_idx]]
kv_nested = kv_idx - kv_offsets[kv_seq_idx[kv_idx]]
is_same_sequence = q_seq_idx[q_idx] == kv_seq_idx[kv_idx]
return torch.where(
is_same_sequence,
orig_mod_func(score, b_nested, h, q_nested, kv_nested), # type: ignore[call-arg]
# don't allow inter-sequence attention
float("-inf"),
)
return nt_score_mod
else:
def nt_mask_mod(b, h, q_idx, kv_idx):
b_nested = q_seq_idx[q_idx]
q_nested = q_idx - q_offsets[q_seq_idx[q_idx]]
kv_nested = kv_idx - kv_offsets[kv_seq_idx[kv_idx]]
# don't allow inter-sequence attention
is_same_sequence = q_seq_idx[q_idx] == kv_seq_idx[kv_idx]
return orig_mod_func(b_nested, h, q_nested, kv_nested) & is_same_sequence # type: ignore[call-arg]
return nt_mask_mod
def create_nested_block_mask(
mask_mod: _mask_mod_signature,
B: Optional[int],
H: Optional[int],
q_nt: torch.Tensor,
kv_nt: Optional[torch.Tensor] = None,
BLOCK_SIZE: Union[int, tuple[int, int]] = _DEFAULT_SPARSE_BLOCK_SIZE,
_compile=False,
) -> BlockMask:
r"""This function creates a nested tensor compatible block mask tuple from a mask_mod
function. The returned BlockMask will be on the device specified by the input nested tensor.
Args:
mask_mod (Callable): mask_mod function. This is a callable that defines the
masking pattern for the attention mechanism. It takes four arguments:
b (batch size), h (number of heads), q_idx (query index), and kv_idx (key/value index).
It should return a boolean tensor indicating which attention connections are allowed
(True) or masked out (False).
B (int): Batch size.
H (int): Number of query heads.
q_nt (torch.Tensor): Jagged layout nested tensor (NJT) that defines the sequence length
structure for query. The block mask will be constructed to operate on a "stacked
sequence" of length ``sum(S)`` for sequence length ``S`` from the NJT.
kv_nt (torch.Tensor): Jagged layout nested tensor (NJT) that defines the sequence length
structure for key / value, allowing for cross attention. The block mask will be
constructed to operate on a "stacked sequence" of length ``sum(S)`` for sequence
length ``S`` from the NJT. If this is None, ``q_nt`` is used to define the structure
for key / value as well. Default: None
BLOCK_SIZE (int or tuple[int, int]): Block size for the block mask. If a single int is
provided it is used for both query and key/value.
Returns:
BlockMask: A BlockMask object that contains the block mask information.
Example Usage:
.. code-block:: python
# shape (B, num_heads, seq_len*, D) where seq_len* varies across the batch
query = torch.nested.nested_tensor(..., layout=torch.jagged)
key = torch.nested.nested_tensor(..., layout=torch.jagged)
value = torch.nested.nested_tensor(..., layout=torch.jagged)
def causal_mask(b, h, q_idx, kv_idx):
return q_idx >= kv_idx
block_mask = create_nested_block_mask(causal_mask, 1, 1, query, _compile=True)
output = flex_attention(query, key, value, block_mask=block_mask)
.. code-block:: python
# shape (B, num_heads, seq_len*, D) where seq_len* varies across the batch
query = torch.nested.nested_tensor(..., layout=torch.jagged)
key = torch.nested.nested_tensor(..., layout=torch.jagged)
value = torch.nested.nested_tensor(..., layout=torch.jagged)
def causal_mask(b, h, q_idx, kv_idx):
return q_idx >= kv_idx
# cross attention case: pass both query and key/value NJTs
block_mask = create_nested_block_mask(causal_mask, 1, 1, query, key, _compile=True)
output = flex_attention(query, key, value, block_mask=block_mask)
"""
# use same structure for kv as for q by default
if kv_nt is None:
kv_nt = q_nt
if q_nt.device != kv_nt.device:
raise ValueError(
"create_nested_block_mask(): Expected q_nt and kv_nt to be on the same device"
)
return create_block_mask(
_nested_mod_func_adapter(mask_mod, q_nt, kv_nt, is_score_mod=False), # type: ignore[arg-type]
B,
H,
q_nt._values.shape[q_nt._ragged_idx - 1], # type: ignore[attr-defined]
kv_nt._values.shape[kv_nt._ragged_idx - 1], # type: ignore[attr-defined]
device=q_nt.device, # type: ignore[arg-type]
# compile is important so we don't materialize a mask_tensor of
# shape (1, 1, total_seqlen, total_seqlen)
BLOCK_SIZE=BLOCK_SIZE,
_compile=_compile,
)
def _apply_kernel_options(
query: Tensor, key: Tensor, value: Tensor, return_lse: bool, kernel_options
):
kernel_options = {} if kernel_options is None else dict(kernel_options)
kernel_options.setdefault("PRESCALE_QK", False)
kernel_options.setdefault("ROWS_GUARANTEED_SAFE", False)
kernel_options.setdefault("BLOCKS_ARE_CONTIGUOUS", False)
# This forces all biases grad scatters to be done in the DQ iteration loop of the backwards
kernel_options.setdefault("WRITE_DQ", True)
# If forward kernel needs to return logsumexp is decided by this rule internally.
assert "OUTPUT_LOGSUMEXP" not in kernel_options
kernel_options["OUTPUT_LOGSUMEXP"] = True
if not return_lse:
# We used to check if q,k,v required grads but since captured buffers can require grad
# we always write unless in no_grad
output_logsumexp = torch.is_grad_enabled()
kernel_options["OUTPUT_LOGSUMEXP"] = output_logsumexp
any_inputs_on_cpu_device = (
query.device.type == "cpu"
or key.device.type == "cpu"
or value.device.type == "cpu"
)
if any_inputs_on_cpu_device:
# CPU with torch.compile now supports infernece, and will not return lse
# TODO: support CPU for training and return lse
kernel_options["OUTPUT_LOGSUMEXP"] = False
return kernel_options
def _validate_embed_dim(query: Tensor, key: Tensor, value: Tensor):
if query.size(-1) != key.size(-1):
raise ValueError(
f"Expect query and key/value to have the same embedding dimension "
f"but got E={query.size(-1)} and E={key.size(-1)}."
)
return
# TODO this config segfaults with Triton without:
# https://github.com/triton-lang/triton/pull/4540
if not (
_supported_head_dim(query.size(-1)) and _supported_head_dim(value.size(-1))
):
raise ValueError(
f"NYI: Currently non power of 2 embedding dimension are not supported. "
f"Got E={query.size(-1)} and Ev={value.size(-1)}."
)
def _validate_device(query: Tensor, key: Tensor, value: Tensor):
"""TODO: Remove once non cuda/cpu devices support is added
We only need to check query since we have already that q,k,v are on the same device
"""
if query.device.type != "cuda" and query.device.type != "cpu":
raise ValueError(
"FlexAttention is only supported on CUDA or CPU devices. "
f"Found input tensors on {query.device.type} device."
)
def _validate_nestedness(query: Tensor, key: Tensor, value: Tensor):
# Currently, inputs can only be all nested or no nested.
if query.is_nested != key.is_nested or key.is_nested != value.is_nested:
raise ValueError(
"FlexAttention does not support mixed nested tensor / non-nested tensor inputs. "
"Please file an issue requesting this if it is important to you."
)
if (
(query.is_nested and query._lengths is not None) # type: ignore[attr-defined]
or (key.is_nested and key._lengths is not None) # type: ignore[attr-defined]
or (value.is_nested and value._lengths is not None) # type: ignore[attr-defined]
):
raise ValueError(
"FlexAttention does not support nested tensors that are non-contiguous with holes. "
"Please file an issue requesting this if it is important to you."
)
def flex_attention(
query: Tensor,
key: Tensor,
value: Tensor,
score_mod: Optional[_score_mod_signature] = None,
block_mask: Optional[BlockMask] = None,
scale: Optional[float] = None,
enable_gqa: bool = False,
return_lse: bool = False,
kernel_options: Optional[dict[str, Any]] = None,
) -> Union[Tensor, tuple[Tensor, Tensor]]:
r"""This function implements scaled dot product attention with an arbitrary attention score modification function.
This function computes the scaled dot product attention between query, key, and value tensors with a user-defined
attention score modification function. The attention score modification function will be applied after the attention
scores have been calculated between the query and key tensors. The attention scores are calculated as follows:
The ``score_mod`` function should have the following signature:
.. code-block:: python
def score_mod(
score: Tensor,
batch: Tensor,
head: Tensor,
q_idx: Tensor,
k_idx: Tensor
) -> Tensor:
Where:
- ``score``: A scalar tensor representing the attention score,
with the same data type and device as the query, key, and value tensors.
- ``batch``, ``head``, ``q_idx``, ``k_idx``: Scalar tensors indicating
the batch index, query head index, query index, and key/value index, respectively.
These should have the ``torch.int`` data type and be located on the same device as the score tensor.
Args:
query (Tensor): Query tensor; shape :math:`(B, Hq, L, E)`.
key (Tensor): Key tensor; shape :math:`(B, Hkv, S, E)`.
value (Tensor): Value tensor; shape :math:`(B, Hkv, S, Ev)`.
score_mod (Optional[Callable]): Function to modify attention scores. By default no score_mod is applied.
block_mask (Optional[BlockMask]): BlockMask object that controls the blocksparsity pattern of the attention.
scale (Optional[float]): Scaling factor applied prior to softmax. If none, the default value is set to :math:`\frac{1}{\sqrt{E}}`.
enable_gqa (bool): If set to True, enables Grouped Query Attention (GQA) and broadcasts key/value heads to query heads.
return_lse (bool): Whether to return the logsumexp of the attention scores. Default is False.
kernel_options (Optional[Dict[str, Any]]): Options to pass into the Triton kernels.
Returns:
output (Tensor): Attention output; shape :math:`(B, Hq, L, Ev)`.
Shape legend:
- :math:`N: \text{Batch size} ... : \text{Any number of other batch dimensions (optional)}`
- :math:`S: \text{Source sequence length}`
- :math:`L: \text{Target sequence length}`
- :math:`E: \text{Embedding dimension of the query and key}`
- :math:`Ev: \text{Embedding dimension of the value}`
.. warning::
`torch.nn.attention.flex_attention` is a prototype feature in PyTorch.
Please look forward to a more stable implementation in a future version of PyTorch.
Read more about feature classification at: https://pytorch.org/blog/pytorch-feature-classification-changes/#prototype
"""
# Some basic input validation
_validate_sdpa_input(query, key, value)
_validate_embed_dim(query, key, value)
_validate_device(query, key, value)
_validate_nestedness(query, key, value)
if query.dim() != 4 or key.dim() != 4 or value.dim() != 4:
raise NotImplementedError("NYI: query, key, and value must be 4D tensors")
if (not enable_gqa) and query.size(-3) != key.size(-3):
raise ValueError(
f"Expect query and key/value to have the same number of heads "
f"but got Hq={query.size(-3)} and Hkv={key.size(-3)}. "
f"Try setting enable_gqa=True for GQA."
)
if enable_gqa:
Hq = query.size(1)
Hkv = key.size(1)
if Hq % Hkv != 0:
raise ValueError(
f"Expect number of query heads to be a multiple of kv heads for GQA "
f"but got Hq={Hq} and Hkv={Hkv}."
)
if query.size(0) != key.size(0):
if block_mask is None:
raise ValueError(
f"Expect query and key/value to have the same batch size, "
f"or non-none block_mask, "
f"but got block_mask=None, Bq={query.size(0)}, and Bkv={key.size(0)}."
)
if block_mask.kv_num_blocks.size(0) != query.size(0):
raise ValueError(
f"Expect query and key/value to have the same batch size, "
f"or block_mask and query to have the same batch size, "
f"but got Bq={query.size(0)}, Bkv={key.size(0)}, B_block_mask={block_mask.kv_num_blocks.size(0)}."
)
if score_mod is None:
score_mod = _identity
elif query.is_nested:
# use same NJT if the ragged structures for sequence lengths match between q and kv
kv = (
query
if query.size(query._ragged_idx) == key.size(query._ragged_idx) # type: ignore[attr-defined]
else key
)
score_mod = _nested_mod_func_adapter(score_mod, query, kv, is_score_mod=True) # type: ignore[assignment]
if block_mask is None:
block_mask = _create_empty_block_mask(query, key)
if (
block_mask.BLOCK_SIZE[0] == _LARGE_SPARSE_BLOCK_SIZE
and block_mask.BLOCK_SIZE[1] == _LARGE_SPARSE_BLOCK_SIZE
):
# This corresponds to the case where we essentially have a "no-op" block mask.
pass
elif query.is_nested:
if block_mask.shape[-2] != query._values.size(query._ragged_idx - 1): # type: ignore[attr-defined]
raise RuntimeError(
f"block_mask of shape {block_mask.shape} is not compatible with nested tensor input "
f"with total sequence length of {query._values.size(query._ragged_idx - 1)}" # type: ignore[attr-defined]
)
else:
block_mask_q_len = block_mask.shape[-2]
block_mask_kv_len = block_mask.shape[-1]
if query.size(-2) > block_mask_q_len or key.size(-2) > block_mask_kv_len:
raise ValueError(
f"block_mask was created for block_mask.shape={block_mask.shape} but got q_len={query.size(-2)} and kv_len={key.size(-2)}. "
"As the block mask was created for a smaller length than you're using it for, you likely need to create a new block mask."
)
elif (
query.size(-2) < block_mask_q_len and key.size(-2) <= block_mask_kv_len
) or (query.size(-2) <= block_mask_q_len and key.size(-2) < block_mask_kv_len):
raise ValueError(
f"block_mask was created for block_mask.shape={block_mask.shape} but got q_len={query.size(-2)} and kv_len={key.size(-2)}. "
"As the block mask was created for a larger length than you're using it for, you can either 1. create a new block mask with the correct length, or 2. 'adjust' the existing block mask to the correct length by calling block_mask._adjust(q_len, kv_len). This essentially 'crops' the block mask to the upper left corner, which does not work for all mask_mods!"
)
assert query.size(-2) == block_mask_q_len
assert key.size(-2) == block_mask_kv_len
if scale is None:
scale = 1.0 / math.sqrt(query.size(-1))
if query.device != block_mask.kv_num_blocks.device: # type: ignore[union-attr]
raise RuntimeError(
f"Expect q/k/v and block_mask to be on the same device "
f"but got {query.device} and {block_mask.kv_num_blocks.device}." # type: ignore[union-attr]
)
kernel_options = _apply_kernel_options(
query,
key,
value,
return_lse,
kernel_options,
)
if torch.compiler.is_dynamo_compiling():
# mark head_dim and number of heads to be static
for x in [query, key, value]:
torch._dynamo.mark_static(x, -3)
torch._dynamo.mark_static(x, -1)
out, lse = flex_attention_hop(
query, key, value, score_mod, block_mask.as_tuple(), scale, kernel_options # type: ignore[union-attr]
)
if return_lse:
return out, lse * math.log(2)
else:
return out
if not torch._dynamo.is_dynamo_supported():
raise RuntimeError("flex_attention requires dynamo support")
from torch._dynamo.backends.debugging import (
make_eager_backend_with_torch_function_mode,
)
# Dynamo is expecting a callable with "__code__" attribute.
# We cannot directly pass hop to it. So we wrap it in a dummy function.
def _flex_attention_hop_wrapper(*args, **kwargs):
return flex_attention_hop(*args, **kwargs)
with _set_compilation_env():
with torch._dynamo.utils.disable_cache_limit():
with _temp_remove_pre_dispatch_torch_function_mode():
with _temp_remove_metadata_torch_function_mode() as metadata_mode:
if metadata_mode:
backend = make_eager_backend_with_torch_function_mode(
metadata_mode
)
else:
backend = "eager"
out, lse = torch.compile(
_flex_attention_hop_wrapper, backend=backend, fullgraph=True
)(
query,
key,
value,
score_mod,
block_mask.as_tuple(), # type: ignore[union-attr]
scale,
kernel_options,
)
if return_lse:
return out, lse * math.log(2)
else:
return out
```
|
=====================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\backends\__init__.py
ENCODING: utf-8
```py
```
|
=================================================================================================================
SOURCE CODE FILE: thnn.py
LINES: 1
SIZE: 0.15 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\backends\thnn.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
# this is for historical pickle deserialization, it is not used otherwise
def _get_thnn_function_backend():
pass
```
|
================================================================================================================
SOURCE CODE FILE: common_types.py
LINES: 1
SIZE: 1.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\common_types.py
ENCODING: utf-8
```py
from typing import Optional, TypeVar, Union
from torch import Tensor
# Create some useful type aliases
# Template for arguments which can be supplied as a tuple, or which can be a scalar which PyTorch will internally
# broadcast to a tuple.
# Comes in several variants: A tuple of unknown size, and a fixed-size tuple for 1d, 2d, or 3d operations.
T = TypeVar("T")
_scalar_or_tuple_any_t = Union[T, tuple[T, ...]]
_scalar_or_tuple_1_t = Union[T, tuple[T]]
_scalar_or_tuple_2_t = Union[T, tuple[T, T]]
_scalar_or_tuple_3_t = Union[T, tuple[T, T, T]]
_scalar_or_tuple_4_t = Union[T, tuple[T, T, T, T]]
_scalar_or_tuple_5_t = Union[T, tuple[T, T, T, T, T]]
_scalar_or_tuple_6_t = Union[T, tuple[T, T, T, T, T, T]]
# For arguments which represent size parameters (eg, kernel size, padding)
_size_any_t = _scalar_or_tuple_any_t[int]
_size_1_t = _scalar_or_tuple_1_t[int]
_size_2_t = _scalar_or_tuple_2_t[int]
_size_3_t = _scalar_or_tuple_3_t[int]
_size_4_t = _scalar_or_tuple_4_t[int]
_size_5_t = _scalar_or_tuple_5_t[int]
_size_6_t = _scalar_or_tuple_6_t[int]
# For arguments which represent optional size parameters (eg, adaptive pool parameters)
_size_any_opt_t = _scalar_or_tuple_any_t[Optional[int]]
_size_2_opt_t = _scalar_or_tuple_2_t[Optional[int]]
_size_3_opt_t = _scalar_or_tuple_3_t[Optional[int]]
# For arguments that represent a ratio to adjust each dimension of an input with (eg, upsampling parameters)
_ratio_2_t = _scalar_or_tuple_2_t[float]
_ratio_3_t = _scalar_or_tuple_3_t[float]
_ratio_any_t = _scalar_or_tuple_any_t[float]
_tensor_list_t = _scalar_or_tuple_any_t[Tensor]
# For the return value of max pooling operations that may or may not return indices.
# With the proposed 'Literal' feature to Python typing, it might be possible to
# eventually eliminate this.
_maybe_indices_t = _scalar_or_tuple_2_t[Tensor]
```
|
=======================================================================================================
SOURCE CODE FILE: cpp.py
LINES: 1
SIZE: 3.03 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\cpp.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""Functionality for Python <-> C++ frontend inter-op."""
from torch import nn
class OrderedDictWrapper:
"""A wrapper around a C++ OrderedDict.
It dynamically evaluates the OrderedDict getter on a bound C++ module, such
that new changes on the C++ side are picked up. Otherwise accessing e.g.
``cpp_module._parameters`` just once would get a frozen copy of the parameters
at the time of access. ``torch.nn.Module`` accesses ``_parameters`` et al. via ``self.__dict__``
so using properties does not work.
"""
def __init__(self, cpp_module, attr):
self.cpp_module = cpp_module
self.attr = attr
@property
def cpp_dict(self):
return getattr(self.cpp_module, self.attr)
# Magic methods cannot be assigned dynamically and bypass ``getattr``, so we
# must manually override them.
def items(self):
return self.cpp_dict.items()
def keys(self):
return self.cpp_dict.keys()
def values(self):
return self.cpp_dict.values()
def __iter__(self):
return self.cpp_dict.__iter__()
def __len__(self):
return self.cpp_dict.__len__()
def __contains__(self, key):
return self.cpp_dict.__contains__(key)
def __getitem__(self, key):
return self.cpp_dict.__getitem__(key)
class ModuleWrapper(nn.Module):
"""A subclass of ``torch.nn.Module`` that wraps a C++ frontend module and delegates all access."""
def __init__(self, cpp_module):
# Assign before the super class constructor so ``self.training`` can be
# assigned to in the super class constructor.
self.cpp_module = cpp_module
super().__init__()
self._parameters = OrderedDictWrapper(cpp_module, "_parameters") # type: ignore[assignment]
self._buffers: OrderedDictWrapper = OrderedDictWrapper(cpp_module, "_buffers") # type: ignore[assignment]
self._modules: OrderedDictWrapper = OrderedDictWrapper(cpp_module, "_modules") # type: ignore[assignment]
for attr in dir(cpp_module):
# Skip magic methods and the three attributes above.
if not attr.startswith("_"):
setattr(self, attr, getattr(self.cpp_module, attr))
def _apply(self, fn, recurse=True):
for param in self.parameters():
# Tensors stored in modules are graph leaves, and we don't
# want to create copy nodes, so we have to unpack the data.
param.data = fn(param.data)
if param._grad is not None:
param._grad.data = fn(param._grad.data)
for buf in self.buffers():
buf.data = fn(buf.data)
return self
# nn.Module defines training as a boolean
@property # type: ignore[override]
def training(self):
return self.cpp_module.training
@training.setter
def training(self, mode):
self.cpp_module.train(mode)
def __repr__(self):
return self.cpp_module.__repr__()
```
|
==============================================================================================================
SOURCE CODE FILE: functional.py
LINES: 1
SIZE: 242.15 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\functional.py
ENCODING: utf-8
```py
"""Functional interface."""
import importlib
import math
import warnings
from typing import Callable, Optional, TYPE_CHECKING, Union
import torch
from torch import _VF, sym_int as _sym_int, Tensor
from torch._C import _add_docstr, _infer_size
from torch._jit_internal import (
_overload,
boolean_dispatch,
BroadcastingList1,
BroadcastingList2,
BroadcastingList3,
)
from torch._torch_docs import reproducibility_notes, sparse_support_notes, tf32_notes
from torch.nn import _reduction as _Reduction, grad # noqa: F401
from torch.nn.modules.utils import _list_with_default, _pair, _single, _triple
from torch.overrides import (
handle_torch_function,
has_torch_function,
has_torch_function_unary,
has_torch_function_variadic,
)
if TYPE_CHECKING:
from torch.types import _dtype as DType
else:
# The JIT doesn't understand Union, nor torch.dtype here
DType = int
try:
import numpy as np
except ModuleNotFoundError:
np = None
conv1d = _add_docstr(
torch.conv1d,
r"""
conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
Applies a 1D convolution over an input signal composed of several input
planes.
{tf32_note}
See :class:`~torch.nn.Conv1d` for details and output shape.
Note:
{cudnn_reproducibility_note}
Note:
This operator supports complex data types i.e. ``complex32, complex64, complex128``.
""".format(
**reproducibility_notes, **tf32_notes
)
+ r"""
Args:
input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`
weight: filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kW)`
bias: optional bias of shape :math:`(\text{out\_channels})`. Default: ``None``
stride: the stride of the convolving kernel. Can be a single number or
a one-element tuple `(sW,)`. Default: 1
padding: implicit paddings on both sides of the input. Can be a string {'valid', 'same'},
single number or a one-element tuple `(padW,)`. Default: 0
``padding='valid'`` is the same as no padding. ``padding='same'`` pads
the input so the output has the same shape as the input. However, this mode
doesn't support any stride values other than 1.
.. warning::
For ``padding='same'``, if the ``weight`` is even-length and
``dilation`` is odd in any dimension, a full :func:`pad` operation
may be needed internally. Lowering performance.
dilation: the spacing between kernel elements. Can be a single number or
a one-element tuple `(dW,)`. Default: 1
groups: split input into groups, :math:`\text{in\_channels}` should be divisible by
the number of groups. Default: 1
Examples::
>>> inputs = torch.randn(33, 16, 30)
>>> filters = torch.randn(20, 16, 5)
>>> F.conv1d(inputs, filters)
""",
)
conv2d = _add_docstr(
torch.conv2d,
r"""
conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
Applies a 2D convolution over an input image composed of several input
planes.
{tf32_note}
See :class:`~torch.nn.Conv2d` for details and output shape.
Note:
{cudnn_reproducibility_note}
Note:
This operator supports complex data types i.e. ``complex32, complex64, complex128``.
""".format(
**reproducibility_notes, **tf32_notes
)
+ r"""
Args:
input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
weight: filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kH , kW)`
bias: optional bias tensor of shape :math:`(\text{out\_channels})`. Default: ``None``
stride: the stride of the convolving kernel. Can be a single number or a
tuple `(sH, sW)`. Default: 1
padding: implicit paddings on both sides of the input. Can be a string {'valid', 'same'},
single number or a tuple `(padH, padW)`. Default: 0
``padding='valid'`` is the same as no padding. ``padding='same'`` pads
the input so the output has the same shape as the input. However, this mode
doesn't support any stride values other than 1.
.. warning::
For ``padding='same'``, if the ``weight`` is even-length and
``dilation`` is odd in any dimension, a full :func:`pad` operation
may be needed internally. Lowering performance.
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dH, dW)`. Default: 1
groups: split input into groups, both :math:`\text{in\_channels}` and :math:`\text{out\_channels}`
should be divisible by the number of groups. Default: 1
Examples::
>>> # With square kernels and equal stride
>>> filters = torch.randn(8, 4, 3, 3)
>>> inputs = torch.randn(1, 4, 5, 5)
>>> F.conv2d(inputs, filters, padding=1)
""",
) # noqa: E501
conv3d = _add_docstr(
torch.conv3d,
r"""
conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
Applies a 3D convolution over an input image composed of several input
planes.
{tf32_note}
See :class:`~torch.nn.Conv3d` for details and output shape.
Note:
{cudnn_reproducibility_note}
Note:
This operator supports complex data types i.e. ``complex32, complex64, complex128``.
""".format(
**reproducibility_notes, **tf32_notes
)
+ r"""
Args:
input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iT , iH , iW)`
weight: filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kT , kH , kW)`
bias: optional bias tensor of shape :math:`(\text{out\_channels})`. Default: None
stride: the stride of the convolving kernel. Can be a single number or a
tuple `(sT, sH, sW)`. Default: 1
padding: implicit paddings on both sides of the input. Can be a string {'valid', 'same'},
single number or a tuple `(padT, padH, padW)`. Default: 0
``padding='valid'`` is the same as no padding. ``padding='same'`` pads
the input so the output has the same shape as the input. However, this mode
doesn't support any stride values other than 1.
.. warning::
For ``padding='same'``, if the ``weight`` is even-length and
``dilation`` is odd in any dimension, a full :func:`pad` operation
may be needed internally. Lowering performance.
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dT, dH, dW)`. Default: 1
groups: split input into groups, :math:`\text{in\_channels}` should be divisible by
the number of groups. Default: 1
Examples::
>>> filters = torch.randn(33, 16, 3, 3, 3)
>>> inputs = torch.randn(20, 16, 50, 10, 20)
>>> F.conv3d(inputs, filters)
""",
) # noqa: E501
conv_transpose1d = _add_docstr(
torch.conv_transpose1d,
r"""
conv_transpose1d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor
Applies a 1D transposed convolution operator over an input signal
composed of several input planes, sometimes also called "deconvolution".
{tf32_note}
See :class:`~torch.nn.ConvTranspose1d` for details and output shape.
Note:
{cudnn_reproducibility_note}
""".format(
**reproducibility_notes, **tf32_notes
)
+ r"""
Args:
input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`
weight: filters of shape :math:`(\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kW)`
bias: optional bias of shape :math:`(\text{out\_channels})`. Default: None
stride: the stride of the convolving kernel. Can be a single number or a
tuple ``(sW,)``. Default: 1
padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both
sides of each dimension in the input. Can be a single number or a tuple
``(padW,)``. Default: 0
output_padding: additional size added to one side of each dimension in the
output shape. Can be a single number or a tuple ``(out_padW)``. Default: 0
groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
number of groups. Default: 1
dilation: the spacing between kernel elements. Can be a single number or
a tuple ``(dW,)``. Default: 1
Examples::
>>> inputs = torch.randn(20, 16, 50)
>>> weights = torch.randn(16, 33, 5)
>>> F.conv_transpose1d(inputs, weights)
""",
)
conv_transpose2d = _add_docstr(
torch.conv_transpose2d,
r"""
conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor
Applies a 2D transposed convolution operator over an input image
composed of several input planes, sometimes also called "deconvolution".
{tf32_note}
See :class:`~torch.nn.ConvTranspose2d` for details and output shape.
Note:
{cudnn_reproducibility_note}
""".format(
**reproducibility_notes, **tf32_notes
)
+ r"""
Args:
input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
weight: filters of shape :math:`(\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kH , kW)`
bias: optional bias of shape :math:`(\text{out\_channels})`. Default: None
stride: the stride of the convolving kernel. Can be a single number or a
tuple ``(sH, sW)``. Default: 1
padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both
sides of each dimension in the input. Can be a single number or a tuple
``(padH, padW)``. Default: 0
output_padding: additional size added to one side of each dimension in the
output shape. Can be a single number or a tuple ``(out_padH, out_padW)``.
Default: 0
groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
number of groups. Default: 1
dilation: the spacing between kernel elements. Can be a single number or
a tuple ``(dH, dW)``. Default: 1
Examples::
>>> # With square kernels and equal stride
>>> inputs = torch.randn(1, 4, 5, 5)
>>> weights = torch.randn(4, 8, 3, 3)
>>> F.conv_transpose2d(inputs, weights, padding=1)
""",
) # noqa: E501
conv_transpose3d = _add_docstr(
torch.conv_transpose3d,
r"""
conv_transpose3d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor
Applies a 3D transposed convolution operator over an input image
composed of several input planes, sometimes also called "deconvolution"
{tf32_note}
See :class:`~torch.nn.ConvTranspose3d` for details and output shape.
Note:
{cudnn_reproducibility_note}
""".format(
**reproducibility_notes, **tf32_notes
)
+ r"""
Args:
input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iT , iH , iW)`
weight: filters of shape :math:`(\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kT , kH , kW)`
bias: optional bias of shape :math:`(\text{out\_channels})`. Default: None
stride: the stride of the convolving kernel. Can be a single number or a
tuple ``(sT, sH, sW)``. Default: 1
padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both
sides of each dimension in the input. Can be a single number or a tuple
``(padT, padH, padW)``. Default: 0
output_padding: additional size added to one side of each dimension in the
output shape. Can be a single number or a tuple
``(out_padT, out_padH, out_padW)``. Default: 0
groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
number of groups. Default: 1
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dT, dH, dW)`. Default: 1
Examples::
>>> inputs = torch.randn(20, 16, 50, 10, 20)
>>> weights = torch.randn(16, 33, 3, 3, 3)
>>> F.conv_transpose3d(inputs, weights)
""",
) # noqa: E501
conv_tbc = _add_docstr(
torch.conv_tbc,
r"""
Applies a 1-dimensional sequence convolution over an input sequence.
Input and output dimensions are (Time, Batch, Channels) - hence TBC.
Args:
input: input tensor of shape :math:`(\text{sequence length} \times batch \times \text{in\_channels})`
weight: filter of shape (:math:`\text{kernel width} \times \text{in\_channels} \times \text{out\_channels}`)
bias: bias of shape (:math:`\text{out\_channels}`)
pad: number of timesteps to pad. Default: 0
""",
)
# Pooling
avg_pool1d = _add_docstr(
torch.avg_pool1d,
r"""
avg_pool1d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True) -> Tensor
Applies a 1D average pooling over an input signal composed of several
input planes.
See :class:`~torch.nn.AvgPool1d` for details and output shape.
Args:
input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`
kernel_size: the size of the window. Can be a single number or a
tuple `(kW,)`
stride: the stride of the window. Can be a single number or a tuple
`(sW,)`. Default: :attr:`kernel_size`
padding: implicit zero paddings on both sides of the input. Can be a
single number or a tuple `(padW,)`. Default: 0
ceil_mode: when True, will use `ceil` instead of `floor` to compute the
output shape. Default: ``False``
count_include_pad: when True, will include the zero-padding in the
averaging calculation. Default: ``True``
Examples::
>>> # pool of square window of size=3, stride=2
>>> input = torch.tensor([[[1, 2, 3, 4, 5, 6, 7]]], dtype=torch.float32)
>>> F.avg_pool1d(input, kernel_size=3, stride=2)
tensor([[[ 2., 4., 6.]]])
""",
)
avg_pool2d = _add_docstr(
torch._C._nn.avg_pool2d,
r"""
avg_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None) -> Tensor
Applies 2D average-pooling operation in :math:`kH \times kW` regions by step size
:math:`sH \times sW` steps. The number of output features is equal to the number of
input planes.
See :class:`~torch.nn.AvgPool2d` for details and output shape.
Args:
input: input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
kernel_size: size of the pooling region. Can be a single number or a
tuple `(kH, kW)`
stride: stride of the pooling operation. Can be a single number or a
tuple `(sH, sW)`. Default: :attr:`kernel_size`
padding: implicit zero paddings on both sides of the input. Can be a
single number or a tuple `(padH, padW)`. Default: 0
ceil_mode: when True, will use `ceil` instead of `floor` in the formula
to compute the output shape. Default: ``False``
count_include_pad: when True, will include the zero-padding in the
averaging calculation. Default: ``True``
divisor_override: if specified, it will be used as divisor, otherwise
size of the pooling region will be used. Default: None
""",
)
avg_pool3d = _add_docstr(
torch._C._nn.avg_pool3d,
r"""
avg_pool3d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None) -> Tensor
Applies 3D average-pooling operation in :math:`kT \times kH \times kW` regions by step
size :math:`sT \times sH \times sW` steps. The number of output features is equal to
:math:`\lfloor\frac{\text{input planes}}{sT}\rfloor`.
See :class:`~torch.nn.AvgPool3d` for details and output shape.
Args:
input: input tensor :math:`(\text{minibatch} , \text{in\_channels} , iT \times iH , iW)`
kernel_size: size of the pooling region. Can be a single number or a
tuple `(kT, kH, kW)`
stride: stride of the pooling operation. Can be a single number or a
tuple `(sT, sH, sW)`. Default: :attr:`kernel_size`
padding: implicit zero paddings on both sides of the input. Can be a
single number or a tuple `(padT, padH, padW)`, Default: 0
ceil_mode: when True, will use `ceil` instead of `floor` in the formula
to compute the output shape
count_include_pad: when True, will include the zero-padding in the
averaging calculation
divisor_override: if specified, it will be used as divisor, otherwise
size of the pooling region will be used. Default: None
""",
)
def fractional_max_pool2d_with_indices(
input: Tensor,
kernel_size: BroadcastingList2[int],
output_size: Optional[BroadcastingList2[int]] = None,
output_ratio: Optional[BroadcastingList2[float]] = None,
return_indices: bool = False,
_random_samples: Optional[Tensor] = None,
) -> tuple[Tensor, Tensor]: # noqa: D400
r"""
fractional_max_pool2d(input, kernel_size, output_size=None, output_ratio=None, return_indices=False, _random_samples=None)
Applies 2D fractional max pooling over an input signal composed of several input planes.
Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
The max-pooling operation is applied in :math:`kH \times kW` regions by a stochastic
step size determined by the target output size.
The number of output features is equal to the number of input planes.
Args:
kernel_size: the size of the window to take a max over.
Can be a single number :math:`k` (for a square kernel of :math:`k \times k`)
or a tuple `(kH, kW)`
output_size: the target output size of the image of the form :math:`oH \times oW`.
Can be a tuple `(oH, oW)` or a single number :math:`oH` for a square image :math:`oH \times oH`
output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
This has to be a number or tuple in the range (0, 1)
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to :func:`~torch.nn.functional.max_unpool2d`.
Examples::
>>> input = torch.randn(20, 16, 50, 32)
>>> # pool of square window of size=3, and target output size 13x12
>>> F.fractional_max_pool2d(input, 3, output_size=(13, 12))
>>> # pool of square window and target output size being half of input image size
>>> F.fractional_max_pool2d(input, 3, output_ratio=(0.5, 0.5))
.. _Fractional MaxPooling:
http://arxiv.org/abs/1412.6071
"""
if has_torch_function_variadic(input, _random_samples):
return handle_torch_function(
fractional_max_pool2d_with_indices,
(input, _random_samples),
input,
kernel_size,
output_size=output_size,
output_ratio=output_ratio,
return_indices=return_indices,
_random_samples=_random_samples,
)
if output_size is None and output_ratio is None:
raise ValueError(
"fractional_max_pool2d requires specifying either an output_size or an output_ratio"
)
if output_size is None:
assert output_ratio is not None
if len(output_ratio) > 2:
raise ValueError(
"fractional_max_pool2d requires output_ratio to either be a single Int or tuple of Ints."
)
_output_ratio = _pair(output_ratio)
output_size = [
int(input.size(-2) * _output_ratio[0]),
int(input.size(-1) * _output_ratio[1]),
]
if _random_samples is None:
n_batch = 1 if input.dim() == 3 else input.size(0)
_random_samples = torch.rand(
n_batch, input.size(-3), 2, dtype=input.dtype, device=input.device
)
return torch._C._nn.fractional_max_pool2d(
input, kernel_size, output_size, _random_samples
)
def _fractional_max_pool2d(
input: Tensor,
kernel_size: BroadcastingList2[int],
output_size: Optional[BroadcastingList2[int]] = None,
output_ratio: Optional[BroadcastingList2[float]] = None,
return_indices: bool = False,
_random_samples: Optional[Tensor] = None,
) -> Tensor:
if has_torch_function_variadic(input, _random_samples):
return handle_torch_function(
fractional_max_pool2d,
(input, _random_samples),
input,
kernel_size,
output_size=output_size,
output_ratio=output_ratio,
return_indices=return_indices,
_random_samples=_random_samples,
)
return fractional_max_pool2d_with_indices(
input, kernel_size, output_size, output_ratio, return_indices, _random_samples
)[0]
fractional_max_pool2d = boolean_dispatch(
arg_name="return_indices",
arg_index=4,
default=False,
if_true=fractional_max_pool2d_with_indices,
if_false=_fractional_max_pool2d,
module_name=__name__,
func_name="fractional_max_pool2d",
)
def fractional_max_pool3d_with_indices(
input: Tensor,
kernel_size: BroadcastingList3[int],
output_size: Optional[BroadcastingList3[int]] = None,
output_ratio: Optional[BroadcastingList3[float]] = None,
return_indices: bool = False,
_random_samples: Optional[Tensor] = None,
) -> tuple[Tensor, Tensor]: # noqa: D400
r"""
fractional_max_pool3d(input, kernel_size, output_size=None, output_ratio=None, return_indices=False, _random_samples=None)
Applies 3D fractional max pooling over an input signal composed of several input planes.
Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
The max-pooling operation is applied in :math:`kT \times kH \times kW` regions by a stochastic
step size determined by the target output size.
The number of output features is equal to the number of input planes.
Args:
kernel_size: the size of the window to take a max over.
Can be a single number :math:`k` (for a square kernel of :math:`k \times k \times k`)
or a tuple `(kT, kH, kW)`
output_size: the target output size of the form :math:`oT \times oH \times oW`.
Can be a tuple `(oT, oH, oW)` or a single number :math:`oH` for a cubic output
:math:`oH \times oH \times oH`
output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
This has to be a number or tuple in the range (0, 1)
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to :func:`~torch.nn.functional.max_unpool3d`.
Shape:
- Input: :math:`(N, C, T_{in}, H_{in}, W_{in})` or :math:`(C, T_{in}, H_{in}, W_{in})`.
- Output: :math:`(N, C, T_{out}, H_{out}, W_{out})` or :math:`(C, T_{out}, H_{out}, W_{out})`, where
:math:`(T_{out}, H_{out}, W_{out})=\text{output\_size}` or
:math:`(T_{out}, H_{out}, W_{out})=\text{output\_ratio} \times (T_{in}, H_{in}, W_{in})`
Examples::
>>> input = torch.randn(20, 16, 50, 32, 16)
>>> # pool of cubic window of size=3, and target output size 13x12x11
>>> F.fractional_max_pool3d(input, 3, output_size=(13, 12, 11))
>>> # pool of cubic window and target output size being half of input size
>>> F.fractional_max_pool3d(input, 3, output_ratio=(0.5, 0.5, 0.5))
.. _Fractional MaxPooling:
http://arxiv.org/abs/1412.6071
"""
if has_torch_function_variadic(input, _random_samples):
return handle_torch_function(
fractional_max_pool3d_with_indices,
(input, _random_samples),
input,
kernel_size,
output_size=output_size,
output_ratio=output_ratio,
return_indices=return_indices,
_random_samples=_random_samples,
)
if output_size is None and output_ratio is None:
raise ValueError(
"fractional_max_pool3d requires specifying either an output_size or an output_ratio"
)
if output_size is None:
assert output_ratio is not None
_output_ratio = _triple(output_ratio)
output_size = [
int(input.size(-3) * _output_ratio[0]),
int(input.size(-2) * _output_ratio[1]),
int(input.size(-1) * _output_ratio[2]),
]
if _random_samples is None:
n_batch = 1 if input.dim() == 4 else input.size(0)
_random_samples = torch.rand(
n_batch, input.size(-4), 3, dtype=input.dtype, device=input.device
)
return torch._C._nn.fractional_max_pool3d(
input, kernel_size, output_size, _random_samples
)
def _fractional_max_pool3d(
input: Tensor,
kernel_size: BroadcastingList3[int],
output_size: Optional[BroadcastingList3[int]] = None,
output_ratio: Optional[BroadcastingList3[float]] = None,
return_indices: bool = False,
_random_samples: Optional[Tensor] = None,
) -> Tensor:
if has_torch_function_variadic(input, _random_samples):
return handle_torch_function(
fractional_max_pool3d,
(input, _random_samples),
input,
kernel_size,
output_size=output_size,
output_ratio=output_ratio,
return_indices=return_indices,
_random_samples=_random_samples,
)
return fractional_max_pool3d_with_indices(
input, kernel_size, output_size, output_ratio, return_indices, _random_samples
)[0]
fractional_max_pool3d = boolean_dispatch(
arg_name="return_indices",
arg_index=4,
default=False,
if_true=fractional_max_pool3d_with_indices,
if_false=_fractional_max_pool3d,
module_name=__name__,
func_name="fractional_max_pool3d",
)
def max_pool1d_with_indices(
input: Tensor,
kernel_size: BroadcastingList1[int],
stride: Optional[BroadcastingList1[int]] = None,
padding: BroadcastingList1[int] = 0,
dilation: BroadcastingList1[int] = 1,
ceil_mode: bool = False,
return_indices: bool = False,
) -> tuple[Tensor, Tensor]: # noqa: D400
r"""
max_pool1d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False)
Applies a 1D max pooling over an input signal composed of several input
planes.
.. note::
The order of :attr:`ceil_mode` and :attr:`return_indices` is different from
what seen in :class:`~torch.nn.MaxPool1d`, and will change in a future release.
See :class:`~torch.nn.MaxPool1d` for details.
Args:
input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`, minibatch dim optional.
kernel_size: the size of the window. Can be a single number or a
tuple `(kW,)`
stride: the stride of the window. Can be a single number or a tuple
`(sW,)`. Default: :attr:`kernel_size`
padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.
dilation: The stride between elements within a sliding window, must be > 0.
ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This
ensures that every element in the input tensor is covered by a sliding window.
return_indices: If ``True``, will return the argmax along with the max values.
Useful for :class:`torch.nn.functional.max_unpool1d` later
"""
if has_torch_function_unary(input):
return handle_torch_function(
max_pool1d_with_indices,
(input,),
input,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode,
return_indices=return_indices,
)
if stride is None:
stride = torch.jit.annotate(list[int], [])
return torch.max_pool1d_with_indices(
input, kernel_size, stride, padding, dilation, ceil_mode
)
def _max_pool1d(
input: Tensor,
kernel_size: BroadcastingList1[int],
stride: Optional[BroadcastingList1[int]] = None,
padding: BroadcastingList1[int] = 0,
dilation: BroadcastingList1[int] = 1,
ceil_mode: bool = False,
return_indices: bool = False,
) -> Tensor:
if has_torch_function_unary(input):
return handle_torch_function(
max_pool1d,
(input,),
input,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode,
return_indices=return_indices,
)
if stride is None:
stride = torch.jit.annotate(list[int], [])
return torch.max_pool1d(input, kernel_size, stride, padding, dilation, ceil_mode)
max_pool1d = boolean_dispatch(
arg_name="return_indices",
arg_index=6,
default=False,
if_true=max_pool1d_with_indices,
if_false=_max_pool1d,
module_name=__name__,
func_name="max_pool1d",
)
def max_pool2d_with_indices(
input: Tensor,
kernel_size: BroadcastingList2[int],
stride: Optional[BroadcastingList2[int]] = None,
padding: BroadcastingList2[int] = 0,
dilation: BroadcastingList2[int] = 1,
ceil_mode: bool = False,
return_indices: bool = False,
) -> tuple[Tensor, Tensor]: # noqa: D400
r"""
max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False)
Applies a 2D max pooling over an input signal composed of several input
planes.
.. note::
The order of :attr:`ceil_mode` and :attr:`return_indices` is different from
what seen in :class:`~torch.nn.MaxPool2d`, and will change in a future release.
See :class:`~torch.nn.MaxPool2d` for details.
Args:
input: input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`, minibatch dim optional.
kernel_size: size of the pooling region. Can be a single number or a
tuple `(kH, kW)`
stride: stride of the pooling operation. Can be a single number or a
tuple `(sH, sW)`. Default: :attr:`kernel_size`
padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.
dilation: The stride between elements within a sliding window, must be > 0.
ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This
ensures that every element in the input tensor is covered by a sliding window.
return_indices: If ``True``, will return the argmax along with the max values.
Useful for :class:`torch.nn.functional.max_unpool2d` later
"""
if has_torch_function_unary(input):
return handle_torch_function(
max_pool2d_with_indices,
(input,),
input,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode,
return_indices=return_indices,
)
if stride is None:
stride = torch.jit.annotate(list[int], [])
return torch._C._nn.max_pool2d_with_indices(
input, kernel_size, stride, padding, dilation, ceil_mode
)
def _max_pool2d(
input: Tensor,
kernel_size: BroadcastingList2[int],
stride: Optional[BroadcastingList2[int]] = None,
padding: BroadcastingList2[int] = 0,
dilation: BroadcastingList2[int] = 1,
ceil_mode: bool = False,
return_indices: bool = False,
) -> Tensor:
if has_torch_function_unary(input):
return handle_torch_function(
max_pool2d,
(input,),
input,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode,
return_indices=return_indices,
)
if stride is None:
stride = torch.jit.annotate(list[int], [])
return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)
max_pool2d = boolean_dispatch(
arg_name="return_indices",
arg_index=6,
default=False,
if_true=max_pool2d_with_indices,
if_false=_max_pool2d,
module_name=__name__,
func_name="max_pool2d",
)
def max_pool3d_with_indices(
input: Tensor,
kernel_size: BroadcastingList3[int],
stride: Optional[BroadcastingList3[int]] = None,
padding: BroadcastingList3[int] = 0,
dilation: BroadcastingList3[int] = 1,
ceil_mode: bool = False,
return_indices: bool = False,
) -> tuple[Tensor, Tensor]: # noqa: D400
r"""
max_pool3d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False)
Applies a 3D max pooling over an input signal composed of several input
planes.
.. note::
The order of :attr:`ceil_mode` and :attr:`return_indices` is different from
what seen in :class:`~torch.nn.MaxPool3d`, and will change in a future release.
See :class:`~torch.nn.MaxPool3d` for details.
Args:
input: input tensor :math:`(\text{minibatch} , \text{in\_channels} , iD, iH , iW)`, minibatch dim optional.
kernel_size: size of the pooling region. Can be a single number or a
tuple `(kT, kH, kW)`
stride: stride of the pooling operation. Can be a single number or a
tuple `(sT, sH, sW)`. Default: :attr:`kernel_size`
padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.
dilation: The stride between elements within a sliding window, must be > 0.
ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This
ensures that every element in the input tensor is covered by a sliding window.
return_indices: If ``True``, will return the argmax along with the max values.
Useful for :class:`torch.nn.functional.max_unpool3d` later
"""
if has_torch_function_unary(input):
return handle_torch_function(
max_pool3d_with_indices,
(input,),
input,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode,
return_indices=return_indices,
)
if stride is None:
stride = torch.jit.annotate(list[int], [])
return torch._C._nn.max_pool3d_with_indices(
input, kernel_size, stride, padding, dilation, ceil_mode
)
def _max_pool3d(
input: Tensor,
kernel_size: BroadcastingList3[int],
stride: Optional[BroadcastingList3[int]] = None,
padding: BroadcastingList3[int] = 0,
dilation: BroadcastingList3[int] = 1,
ceil_mode: bool = False,
return_indices: bool = False,
) -> Tensor:
if has_torch_function_unary(input):
return handle_torch_function(
max_pool3d,
(input,),
input,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode,
return_indices=return_indices,
)
if stride is None:
stride = torch.jit.annotate(list[int], [])
return torch.max_pool3d(input, kernel_size, stride, padding, dilation, ceil_mode)
max_pool3d = boolean_dispatch(
arg_name="return_indices",
arg_index=6,
default=False,
if_true=max_pool3d_with_indices,
if_false=_max_pool3d,
module_name=__name__,
func_name="max_pool3d",
)
def _unpool_output_size(
input: Tensor,
kernel_size: list[int],
stride: list[int],
padding: list[int],
output_size: Optional[list[int]],
) -> list[int]:
input_size = input.size()
default_size = torch.jit.annotate(list[int], [])
for d in range(len(kernel_size)):
default_size.append(
(input_size[-len(kernel_size) + d] - 1) * stride[d]
+ kernel_size[d]
- 2 * padding[d]
)
if output_size is None:
ret = default_size
else:
if len(output_size) == len(kernel_size) + 2:
output_size = output_size[2:]
if len(output_size) != len(kernel_size):
raise ValueError(
"output_size should be a sequence containing "
f"{len(kernel_size)} or {len(kernel_size) + 2} elements, but it has a length of '{len(output_size)}'"
)
for d in range(len(kernel_size)):
min_size = default_size[d] - stride[d]
max_size = default_size[d] + stride[d]
if not (min_size < output_size[d] < max_size):
raise ValueError(
f'invalid output_size "{output_size}" (dim {d} must be between {min_size} and {max_size})'
)
ret = output_size
return ret
def max_unpool1d(
input: Tensor,
indices: Tensor,
kernel_size: BroadcastingList1[int],
stride: Optional[BroadcastingList1[int]] = None,
padding: BroadcastingList1[int] = 0,
output_size: Optional[BroadcastingList1[int]] = None,
) -> Tensor:
r"""Compute a partial inverse of :class:`MaxPool1d`.
See :class:`~torch.nn.MaxUnpool1d` for details.
"""
if has_torch_function_unary(input):
return handle_torch_function(
max_unpool1d,
(input,),
input,
indices,
kernel_size,
stride=stride,
padding=padding,
output_size=output_size,
)
kernel_size = _single(kernel_size)
if stride is not None:
_stride = _single(stride)
else:
_stride = kernel_size
padding = _single(padding)
output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size)
if isinstance(output_size, list):
output_size = output_size + [1]
else:
output_size = output_size + (1,)
return torch._C._nn.max_unpool2d(
input.unsqueeze(-1), indices.unsqueeze(-1), output_size
).squeeze(-1)
def max_unpool2d(
input: Tensor,
indices: Tensor,
kernel_size: BroadcastingList2[int],
stride: Optional[BroadcastingList2[int]] = None,
padding: BroadcastingList2[int] = 0,
output_size: Optional[BroadcastingList2[int]] = None,
) -> Tensor:
r"""Compute a partial inverse of :class:`MaxPool2d`.
See :class:`~torch.nn.MaxUnpool2d` for details.
"""
if has_torch_function_unary(input):
return handle_torch_function(
max_unpool2d,
(input,),
input,
indices,
kernel_size,
stride=stride,
padding=padding,
output_size=output_size,
)
kernel_size = _pair(kernel_size)
if stride is not None:
_stride = _pair(stride)
else:
_stride = kernel_size
padding = _pair(padding)
output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size)
return torch._C._nn.max_unpool2d(input, indices, output_size)
def max_unpool3d(
input: Tensor,
indices: Tensor,
kernel_size: BroadcastingList3[int],
stride: Optional[BroadcastingList3[int]] = None,
padding: BroadcastingList3[int] = 0,
output_size: Optional[BroadcastingList3[int]] = None,
) -> Tensor:
r"""Compute a partial inverse of :class:`MaxPool3d`.
See :class:`~torch.nn.MaxUnpool3d` for details.
"""
if has_torch_function_unary(input):
return handle_torch_function(
max_unpool3d,
(input,),
input,
indices,
kernel_size,
stride=stride,
padding=padding,
output_size=output_size,
)
kernel_size = _triple(kernel_size)
if stride is not None:
_stride = _triple(stride)
else:
_stride = kernel_size
padding = _triple(padding)
output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size)
return torch._C._nn.max_unpool3d(input, indices, output_size, _stride, padding)
def lp_pool3d(
input: Tensor,
norm_type: Union[int, float],
kernel_size: BroadcastingList3[int],
stride: Optional[BroadcastingList3[int]] = None,
ceil_mode: bool = False,
) -> Tensor:
r"""
Apply a 3D power-average pooling over an input signal composed of several input planes.
If the sum of all inputs to the power of `p` is
zero, the gradient is set to zero as well.
See :class:`~torch.nn.LPPool3d` for details.
"""
if has_torch_function_unary(input):
return handle_torch_function(
lp_pool3d,
(input,),
input,
norm_type,
kernel_size,
stride=stride,
ceil_mode=ceil_mode,
)
kd, kw, kh = _triple(kernel_size)
if stride is not None:
out = avg_pool3d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)
else:
out = avg_pool3d(
input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode
)
return (
(torch.sign(out) * relu(torch.abs(out))).mul(kd * kw * kh).pow(1.0 / norm_type)
)
def lp_pool2d(
input: Tensor,
norm_type: Union[int, float],
kernel_size: BroadcastingList2[int],
stride: Optional[BroadcastingList2[int]] = None,
ceil_mode: bool = False,
) -> Tensor:
r"""
Apply a 2D power-average pooling over an input signal composed of several input planes.
If the sum of all inputs to the power of `p` is
zero, the gradient is set to zero as well.
See :class:`~torch.nn.LPPool2d` for details.
"""
if has_torch_function_unary(input):
return handle_torch_function(
lp_pool2d,
(input,),
input,
norm_type,
kernel_size,
stride=stride,
ceil_mode=ceil_mode,
)
kw, kh = _pair(kernel_size)
if stride is not None:
out = avg_pool2d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)
else:
out = avg_pool2d(
input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode
)
return (torch.sign(out) * relu(torch.abs(out))).mul(kw * kh).pow(1.0 / norm_type)
def lp_pool1d(
input: Tensor,
norm_type: Union[int, float],
kernel_size: int,
stride: Optional[BroadcastingList1[int]] = None,
ceil_mode: bool = False,
) -> Tensor:
r"""Apply a 1D power-average pooling over an input signal composed of several input planes.
If the sum of all inputs to the power of `p` is
zero, the gradient is set to zero as well.
See :class:`~torch.nn.LPPool1d` for details.
"""
if has_torch_function_unary(input):
return handle_torch_function(
lp_pool1d,
(input,),
input,
norm_type,
kernel_size,
stride=stride,
ceil_mode=ceil_mode,
)
if stride is not None:
out = avg_pool1d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)
else:
out = avg_pool1d(
input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode
)
return (
(torch.sign(out) * relu(torch.abs(out))).mul(kernel_size).pow(1.0 / norm_type)
)
def adaptive_max_pool1d_with_indices(
input: Tensor,
output_size: BroadcastingList1[int],
return_indices: bool = False,
) -> tuple[Tensor, Tensor]: # noqa: D400
r"""
adaptive_max_pool1d(input, output_size, return_indices=False)
Applies a 1D adaptive max pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveMaxPool1d` for details and output shape.
Args:
output_size: the target output size (single integer)
return_indices: whether to return pooling indices. Default: ``False``
"""
if has_torch_function_unary(input):
return handle_torch_function(
adaptive_max_pool1d_with_indices,
(input,),
input,
output_size,
return_indices=return_indices,
)
return torch.adaptive_max_pool1d(input, output_size)
def _adaptive_max_pool1d(
input: Tensor,
output_size: BroadcastingList1[int],
return_indices: bool = False,
) -> Tensor:
if has_torch_function_unary(input):
return handle_torch_function(
adaptive_max_pool1d,
(input,),
input,
output_size,
return_indices=return_indices,
)
return adaptive_max_pool1d_with_indices(input, output_size)[0]
adaptive_max_pool1d = boolean_dispatch(
arg_name="return_indices",
arg_index=2,
default=False,
if_true=adaptive_max_pool1d_with_indices,
if_false=_adaptive_max_pool1d,
module_name=__name__,
func_name="adaptive_max_pool1d",
)
def adaptive_max_pool2d_with_indices(
input: Tensor,
output_size: BroadcastingList2[int],
return_indices: bool = False,
) -> tuple[Tensor, Tensor]: # noqa: D400
r"""adaptive_max_pool2d(input, output_size, return_indices=False)
Applies a 2D adaptive max pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveMaxPool2d` for details and output shape.
Args:
output_size: the target output size (single integer or
double-integer tuple)
return_indices: whether to return pooling indices. Default: ``False``
"""
if has_torch_function_unary(input):
return handle_torch_function(
adaptive_max_pool2d_with_indices,
(input,),
input,
output_size,
return_indices=return_indices,
)
output_size = _list_with_default(output_size, input.size())
return torch._C._nn.adaptive_max_pool2d(input, output_size)
def _adaptive_max_pool2d(
input: Tensor,
output_size: BroadcastingList2[int],
return_indices: bool = False,
) -> Tensor:
if has_torch_function_unary(input):
return handle_torch_function(
adaptive_max_pool2d,
(input,),
input,
output_size,
return_indices=return_indices,
)
return adaptive_max_pool2d_with_indices(input, output_size)[0]
adaptive_max_pool2d = boolean_dispatch(
arg_name="return_indices",
arg_index=2,
default=False,
if_true=adaptive_max_pool2d_with_indices,
if_false=_adaptive_max_pool2d,
module_name=__name__,
func_name="adaptive_max_pool2d",
)
def adaptive_max_pool3d_with_indices(
input: Tensor,
output_size: BroadcastingList3[int],
return_indices: bool = False,
) -> tuple[Tensor, Tensor]: # noqa: D400
r"""
adaptive_max_pool3d(input, output_size, return_indices=False)
Applies a 3D adaptive max pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveMaxPool3d` for details and output shape.
Args:
output_size: the target output size (single integer or
triple-integer tuple)
return_indices: whether to return pooling indices. Default: ``False``
"""
if has_torch_function_unary(input):
return handle_torch_function(
adaptive_max_pool3d_with_indices,
(input,),
input,
output_size,
return_indices=return_indices,
)
output_size = _list_with_default(output_size, input.size())
return torch._C._nn.adaptive_max_pool3d(input, output_size)
def _adaptive_max_pool3d(
input: Tensor,
output_size: BroadcastingList3[int],
return_indices: bool = False,
) -> Tensor:
if has_torch_function_unary(input):
return handle_torch_function(
adaptive_max_pool3d,
(input,),
input,
output_size,
return_indices=return_indices,
)
return adaptive_max_pool3d_with_indices(input, output_size)[0]
adaptive_max_pool3d = boolean_dispatch(
arg_name="return_indices",
arg_index=2,
default=False,
if_true=adaptive_max_pool3d_with_indices,
if_false=_adaptive_max_pool3d,
module_name=__name__,
func_name="adaptive_max_pool3d",
)
adaptive_avg_pool1d = _add_docstr(
torch.adaptive_avg_pool1d,
r"""
adaptive_avg_pool1d(input, output_size) -> Tensor
Applies a 1D adaptive average pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveAvgPool1d` for details and output shape.
Args:
output_size: the target output size (single integer)
""",
)
def adaptive_avg_pool2d(input: Tensor, output_size: BroadcastingList2[int]) -> Tensor:
r"""Apply a 2D adaptive average pooling over an input signal composed of several input planes.
See :class:`~torch.nn.AdaptiveAvgPool2d` for details and output shape.
Args:
output_size: the target output size (single integer or
double-integer tuple)
"""
if has_torch_function_unary(input):
return handle_torch_function(adaptive_avg_pool2d, (input,), input, output_size)
_output_size = _list_with_default(output_size, input.size())
return torch._C._nn.adaptive_avg_pool2d(input, _output_size)
def adaptive_avg_pool3d(input: Tensor, output_size: BroadcastingList3[int]) -> Tensor:
r"""Apply a 3D adaptive average pooling over an input signal composed of several input planes.
See :class:`~torch.nn.AdaptiveAvgPool3d` for details and output shape.
Args:
output_size: the target output size (single integer or
triple-integer tuple)
"""
if has_torch_function_unary(input):
return handle_torch_function(adaptive_avg_pool3d, (input,), input, output_size)
_output_size = _list_with_default(output_size, input.size())
return torch._C._nn.adaptive_avg_pool3d(input, _output_size)
# Activation functions
def dropout(
input: Tensor,
p: float = 0.5,
training: bool = True,
inplace: bool = False,
) -> Tensor:
r"""During training, randomly zeroes some elements of the input tensor with probability :attr:`p`.
Uses samples from a Bernoulli distribution.
See :class:`~torch.nn.Dropout` for details.
Args:
p: probability of an element to be zeroed. Default: 0.5
training: apply dropout if is ``True``. Default: ``True``
inplace: If set to ``True``, will do this operation in-place. Default: ``False``
"""
if has_torch_function_unary(input):
return handle_torch_function(
dropout, (input,), input, p=p, training=training, inplace=inplace
)
if p < 0.0 or p > 1.0:
raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}")
return (
_VF.dropout_(input, p, training) if inplace else _VF.dropout(input, p, training)
)
def alpha_dropout(
input: Tensor,
p: float = 0.5,
training: bool = False,
inplace: bool = False,
) -> Tensor:
r"""Apply alpha dropout to the input.
See :class:`~torch.nn.AlphaDropout` for details.
"""
if has_torch_function_unary(input):
return handle_torch_function(
alpha_dropout, (input,), input, p=p, training=training, inplace=inplace
)
if p < 0.0 or p > 1.0:
raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}")
return (
_VF.alpha_dropout_(input, p, training)
if inplace
else _VF.alpha_dropout(input, p, training)
)
def dropout1d(
input: Tensor,
p: float = 0.5,
training: bool = True,
inplace: bool = False,
) -> Tensor:
r"""Randomly zero out entire channels (a channel is a 1D feature map).
For example, the :math:`j`-th channel of the :math:`i`-th sample in the
batched input is a 1D tensor :math:`\text{input}[i, j]` of the input tensor.
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
See :class:`~torch.nn.Dropout1d` for details.
Args:
p: probability of a channel to be zeroed. Default: 0.5
training: apply dropout if is ``True``. Default: ``True``
inplace: If set to ``True``, will do this operation in-place. Default: ``False``
"""
if has_torch_function_unary(input):
return handle_torch_function(
dropout1d, (input,), input, p=p, training=training, inplace=inplace
)
if p < 0.0 or p > 1.0:
raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}")
inp_dim = input.dim()
if inp_dim not in (2, 3):
raise RuntimeError(
f"dropout1d: Expected 2D or 3D input, but received a {inp_dim}D input. "
"Note that dropout1d exists to provide channel-wise dropout on inputs with 1 "
"spatial dimension, a channel dimension, and an optional batch dimension "
"(i.e. 2D or 3D inputs)."
)
is_batched = inp_dim == 3
if not is_batched:
input = input.unsqueeze_(0) if inplace else input.unsqueeze(0)
result = (
_VF.feature_dropout_(input, p, training)
if inplace
else _VF.feature_dropout(input, p, training)
)
if not is_batched:
result = result.squeeze_(0) if inplace else result.squeeze(0)
return result
def dropout2d(
input: Tensor,
p: float = 0.5,
training: bool = True,
inplace: bool = False,
) -> Tensor:
r"""Randomly zero out entire channels (a channel is a 2D feature map).
For example, the :math:`j`-th channel of the :math:`i`-th sample in the
batched input is a 2D tensor :math:`\text{input}[i, j]` of the input tensor.
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
See :class:`~torch.nn.Dropout2d` for details.
Args:
p: probability of a channel to be zeroed. Default: 0.5
training: apply dropout if is ``True``. Default: ``True``
inplace: If set to ``True``, will do this operation in-place. Default: ``False``
"""
if has_torch_function_unary(input):
return handle_torch_function(
dropout2d, (input,), input, p=p, training=training, inplace=inplace
)
if p < 0.0 or p > 1.0:
raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}")
inp_dim = input.dim()
if inp_dim not in (3, 4):
warn_msg = (
f"dropout2d: Received a {inp_dim}-D input to dropout2d, which is deprecated "
"and will result in an error in a future release. To retain the behavior "
"and silence this warning, please use dropout instead. Note that dropout2d "
"exists to provide channel-wise dropout on inputs with 2 spatial dimensions, "
"a channel dimension, and an optional batch dimension (i.e. 3D or 4D inputs)."
)
warnings.warn(warn_msg)
# TODO: Properly support no-batch-dim inputs. For now, these are NOT supported; passing
# a 3D input will perform dropout1d behavior instead. This was done historically and the
# behavior is maintained here for now.
# See https://github.com/pytorch/pytorch/issues/77081
if inp_dim == 3:
warnings.warn(
"dropout2d: Received a 3D input to dropout2d and assuming that channel-wise "
"1D dropout behavior is desired - input is interpreted as shape (N, C, L), where C "
"is the channel dim. This behavior will change in a future release to interpret the "
"input as one without a batch dimension, i.e. shape (C, H, W). To maintain the 1D "
"channel-wise dropout behavior, please switch to using dropout1d instead."
)
result = (
_VF.feature_dropout_(input, p, training)
if inplace
else _VF.feature_dropout(input, p, training)
)
return result
def dropout3d(
input: Tensor,
p: float = 0.5,
training: bool = True,
inplace: bool = False,
) -> Tensor:
r"""Randomly zero out entire channels (a channel is a 3D feature map).
For example, the :math:`j`-th channel of the :math:`i`-th sample in the
batched input is a 3D tensor :math:`\text{input}[i, j]` of the input tensor.
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
See :class:`~torch.nn.Dropout3d` for details.
Args:
p: probability of a channel to be zeroed. Default: 0.5
training: apply dropout if is ``True``. Default: ``True``
inplace: If set to ``True``, will do this operation in-place. Default: ``False``
"""
if has_torch_function_unary(input):
return handle_torch_function(
dropout3d, (input,), input, p=p, training=training, inplace=inplace
)
if p < 0.0 or p > 1.0:
raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}")
inp_dim = input.dim()
if inp_dim not in (4, 5):
warn_msg = (
f"dropout3d: Received a {inp_dim}-D input to dropout3d, which is deprecated "
"and will result in an error in a future release. To retain the behavior "
"and silence this warning, please use dropout instead. Note that dropout3d "
"exists to provide channel-wise dropout on inputs with 3 spatial dimensions, "
"a channel dimension, and an optional batch dimension (i.e. 4D or 5D inputs)."
)
warnings.warn(warn_msg)
is_batched = inp_dim == 5
if not is_batched:
input = input.unsqueeze_(0) if inplace else input.unsqueeze(0)
result = (
_VF.feature_dropout_(input, p, training)
if inplace
else _VF.feature_dropout(input, p, training)
)
if not is_batched:
result = result.squeeze_(0) if inplace else result.squeeze(0)
return result
def feature_alpha_dropout(
input: Tensor,
p: float = 0.5,
training: bool = False,
inplace: bool = False,
) -> Tensor:
r"""Randomly masks out entire channels (a channel is a feature map).
For example, the :math:`j`-th channel of the :math:`i`-th sample in the batch input
is a tensor :math:`\text{input}[i, j]` of the input tensor. Instead of
setting activations to zero, as in regular Dropout, the activations are set
to the negative saturation value of the SELU activation function.
Each element will be masked independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
The elements to be masked are randomized on every forward call, and scaled
and shifted to maintain zero mean and unit variance.
See :class:`~torch.nn.FeatureAlphaDropout` for details.
Args:
p: dropout probability of a channel to be zeroed. Default: 0.5
training: apply dropout if is ``True``. Default: ``True``
inplace: If set to ``True``, will do this operation in-place. Default: ``False``
"""
if has_torch_function_unary(input):
return handle_torch_function(
feature_alpha_dropout,
(input,),
input,
p=p,
training=training,
inplace=inplace,
)
if p < 0.0 or p > 1.0:
raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}")
return (
_VF.feature_alpha_dropout_(input, p, training)
if inplace
else _VF.feature_alpha_dropout(input, p, training)
)
def _threshold(
input: Tensor,
threshold: float,
value: float,
inplace: bool = False,
) -> Tensor:
r"""Apply a threshold to each element of the input Tensor.
See :class:`~torch.nn.Threshold` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(
_threshold, (input,), input, threshold, value, inplace=inplace
)
if inplace:
result = _VF.threshold_(input, threshold, value)
else:
result = _VF.threshold(input, threshold, value)
return result
# We define this function as _threshold because it takes an argument
# named threshold, which clobbers the recursive reference to the
# function needed for __torch_function__ support
threshold = _threshold
threshold_ = _add_docstr(
_VF.threshold_,
r"""
threshold_(input, threshold, value) -> Tensor
In-place version of :func:`~threshold`.
""",
)
def relu(input: Tensor, inplace: bool = False) -> Tensor: # noqa: D400,D402
r"""relu(input, inplace=False) -> Tensor
Applies the rectified linear unit function element-wise. See
:class:`~torch.nn.ReLU` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(relu, (input,), input, inplace=inplace)
if inplace:
result = torch.relu_(input)
else:
result = torch.relu(input)
return result
relu_ = _add_docstr(
torch.relu_,
r"""
relu_(input) -> Tensor
In-place version of :func:`~relu`.
""",
)
def glu(input: Tensor, dim: int = -1) -> Tensor: # noqa: D400,D402
r"""
glu(input, dim=-1) -> Tensor
The gated linear unit. Computes:
.. math ::
\text{GLU}(a, b) = a \otimes \sigma(b)
where `input` is split in half along `dim` to form `a` and `b`, :math:`\sigma`
is the sigmoid function and :math:`\otimes` is the element-wise product between matrices.
See `Language Modeling with Gated Convolutional Networks <https://arxiv.org/abs/1612.08083>`_.
Args:
input (Tensor): input tensor
dim (int): dimension on which to split the input. Default: -1
"""
if has_torch_function_unary(input):
return handle_torch_function(glu, (input,), input, dim=dim)
if input.dim() == 0:
raise RuntimeError(
"glu does not support scalars because halving size must be even"
)
return torch._C._nn.glu(input, dim)
def hardtanh(
input: Tensor,
min_val: float = -1.0,
max_val: float = 1.0,
inplace: bool = False,
) -> Tensor: # noqa: D400,D402
r"""
hardtanh(input, min_val=-1., max_val=1., inplace=False) -> Tensor
Applies the HardTanh function element-wise. See :class:`~torch.nn.Hardtanh` for more
details.
"""
if has_torch_function_unary(input):
return handle_torch_function(
hardtanh, (input,), input, min_val=min_val, max_val=max_val, inplace=inplace
)
if min_val > max_val:
raise ValueError("min_val cannot be greater than max_val")
if inplace:
result = torch._C._nn.hardtanh_(input, min_val, max_val)
else:
result = torch._C._nn.hardtanh(input, min_val, max_val)
return result
hardtanh_ = _add_docstr(
torch._C._nn.hardtanh_,
r"""
hardtanh_(input, min_val=-1., max_val=1.) -> Tensor
In-place version of :func:`~hardtanh`.
""",
)
def relu6(input: Tensor, inplace: bool = False) -> Tensor: # noqa: D400,D402
r"""relu6(input, inplace=False) -> Tensor
Applies the element-wise function :math:`\text{ReLU6}(x) = \min(\max(0,x), 6)`.
See :class:`~torch.nn.ReLU6` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(relu6, (input,), input, inplace=inplace)
if inplace:
result = torch._C._nn.relu6_(input)
else:
result = torch._C._nn.relu6(input)
return result
def elu(input: Tensor, alpha: float = 1.0, inplace: bool = False) -> Tensor:
r"""Apply the Exponential Linear Unit (ELU) function element-wise.
See :class:`~torch.nn.ELU` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(elu, (input,), input, alpha=alpha, inplace=inplace)
if inplace:
result = torch._C._nn.elu_(input, alpha)
else:
result = torch._C._nn.elu(input, alpha)
return result
elu_ = _add_docstr(
torch._C._nn.elu_,
r"""
elu_(input, alpha=1.) -> Tensor
In-place version of :func:`~elu`.
""",
)
def selu(input: Tensor, inplace: bool = False) -> Tensor: # noqa: D400,D402
r"""selu(input, inplace=False) -> Tensor
Applies element-wise,
:math:`\text{SELU}(x) = scale * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1)))`,
with :math:`\alpha=1.6732632423543772848170429916717` and
:math:`scale=1.0507009873554804934193349852946`.
See :class:`~torch.nn.SELU` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(selu, (input,), input, inplace=inplace)
if inplace:
result = torch.selu_(input)
else:
result = torch.selu(input)
return result
selu_ = _add_docstr(
torch.selu_,
r"""
selu_(input) -> Tensor
In-place version of :func:`~selu`.
""",
)
def celu(
input: Tensor,
alpha: float = 1.0,
inplace: bool = False,
) -> Tensor: # noqa: D400,D402
r"""celu(input, alpha=1., inplace=False) -> Tensor
Applies element-wise,
:math:`\text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1))`.
See :class:`~torch.nn.CELU` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(
celu, (input,), input, alpha=alpha, inplace=inplace
)
if inplace:
result = torch.celu_(input, alpha)
else:
result = torch.celu(input, alpha)
return result
celu_ = _add_docstr(
torch.celu_,
r"""
celu_(input, alpha=1.) -> Tensor
In-place version of :func:`~celu`.
""",
)
def leaky_relu(
input: Tensor,
negative_slope: float = 0.01,
inplace: bool = False,
) -> Tensor: # noqa: D400,D402
r"""
leaky_relu(input, negative_slope=0.01, inplace=False) -> Tensor
Applies element-wise,
:math:`\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)`
See :class:`~torch.nn.LeakyReLU` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(
leaky_relu, (input,), input, negative_slope=negative_slope, inplace=inplace
)
if inplace:
result = torch._C._nn.leaky_relu_(input, negative_slope)
else:
result = torch._C._nn.leaky_relu(input, negative_slope)
return result
leaky_relu_ = _add_docstr(
torch._C._nn.leaky_relu_,
r"""
leaky_relu_(input, negative_slope=0.01) -> Tensor
In-place version of :func:`~leaky_relu`.
""",
)
prelu = _add_docstr(
torch.prelu,
r"""prelu(input, weight) -> Tensor
Applies element-wise the function
:math:`\text{PReLU}(x) = \max(0,x) + \text{weight} * \min(0,x)` where weight is a
learnable parameter.
.. note::
`weight` is expected to be a scalar or 1-D tensor. If `weight` is 1-D,
its size must match the number of input channels, determined by
`input.size(1)` when `input.dim() >= 2`, otherwise 1.
In the 1-D case, note that when `input` has dim > 2, `weight` can be expanded
to the shape of `input` in a way that is not possible using normal
:ref:`broadcasting semantics<broadcasting-semantics>`.
See :class:`~torch.nn.PReLU` for more details.
""",
)
def rrelu(
input: Tensor,
lower: float = 1.0 / 8,
upper: float = 1.0 / 3,
training: bool = False,
inplace: bool = False,
) -> Tensor: # noqa: D400,D402
r"""rrelu(input, lower=1./8, upper=1./3, training=False, inplace=False) -> Tensor
Randomized leaky ReLU.
See :class:`~torch.nn.RReLU` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(
rrelu,
(input,),
input,
lower=lower,
upper=upper,
training=training,
inplace=inplace,
)
if inplace:
result = torch.rrelu_(input, lower, upper, training)
else:
result = torch.rrelu(input, lower, upper, training)
return result
rrelu_ = _add_docstr(
torch.rrelu_,
r"""
rrelu_(input, lower=1./8, upper=1./3, training=False) -> Tensor
In-place version of :func:`~rrelu`.
""",
)
logsigmoid = _add_docstr(
torch._C._nn.log_sigmoid,
r"""
logsigmoid(input) -> Tensor
Applies element-wise :math:`\text{LogSigmoid}(x_i) = \log \left(\frac{1}{1 + \exp(-x_i)}\right)`
See :class:`~torch.nn.LogSigmoid` for more details.
""",
)
gelu = _add_docstr(
torch._C._nn.gelu,
r"""
gelu(input, approximate = 'none') -> Tensor
When the approximate argument is 'none', it applies element-wise the function
:math:`\text{GELU}(x) = x * \Phi(x)`
where :math:`\Phi(x)` is the Cumulative Distribution Function for Gaussian Distribution.
When the approximate argument is 'tanh', Gelu is estimated with
.. math::
\text{GELU}(x) = 0.5 * x * (1 + \text{Tanh}(\sqrt{2 / \pi} * (x + 0.044715 * x^3)))
See `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_.
""",
)
hardshrink = _add_docstr(
torch.hardshrink,
r"""
hardshrink(input, lambd=0.5) -> Tensor
Applies the hard shrinkage function element-wise
See :class:`~torch.nn.Hardshrink` for more details.
""",
)
def tanhshrink(input): # noqa: D400,D402
r"""tanhshrink(input) -> Tensor
Applies element-wise, :math:`\text{Tanhshrink}(x) = x - \text{Tanh}(x)`
See :class:`~torch.nn.Tanhshrink` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(tanhshrink, (input,), input)
return input - input.tanh()
def softsign(input): # noqa: D400,D402
r"""softsign(input) -> Tensor
Applies element-wise, the function :math:`\text{SoftSign}(x) = \frac{x}{1 + |x|}`
See :class:`~torch.nn.Softsign` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(softsign, (input,), input)
return input / (input.abs() + 1)
softplus = _add_docstr(
torch._C._nn.softplus,
r"""
softplus(input, beta=1, threshold=20) -> Tensor
Applies element-wise, the function :math:`\text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x))`.
For numerical stability the implementation reverts to the linear function
when :math:`input \times \beta > threshold`.
See :class:`~torch.nn.Softplus` for more details.
""",
)
def _get_softmax_dim(name: str, ndim: int, stacklevel: int) -> int:
warnings.warn(
f"Implicit dimension choice for {name} has been deprecated. "
"Change the call to include dim=X as an argument.",
stacklevel=stacklevel,
)
if ndim == 0 or ndim == 1 or ndim == 3:
ret = 0
else:
ret = 1
return ret
def softmin(
input: Tensor,
dim: Optional[int] = None,
_stacklevel: int = 3,
dtype: Optional[DType] = None,
) -> Tensor:
r"""Apply a softmin function.
Note that :math:`\text{Softmin}(x) = \text{Softmax}(-x)`. See softmax definition for mathematical formula.
See :class:`~torch.nn.Softmin` for more details.
Args:
input (Tensor): input
dim (int): A dimension along which softmin will be computed (so every slice
along dim will sum to 1).
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
"""
if has_torch_function_unary(input):
return handle_torch_function(
softmin, (input,), input, dim=dim, _stacklevel=_stacklevel, dtype=dtype
)
if dim is None:
dim = _get_softmax_dim("softmin", input.dim(), _stacklevel)
if dtype is None:
ret = (-input).softmax(dim)
else:
ret = (-input).softmax(dim, dtype=dtype)
return ret
def softmax(
input: Tensor,
dim: Optional[int] = None,
_stacklevel: int = 3,
dtype: Optional[DType] = None,
) -> Tensor:
r"""Apply a softmax function.
Softmax is defined as:
:math:`\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}`
It is applied to all slices along dim, and will re-scale them so that the elements
lie in the range `[0, 1]` and sum to 1.
See :class:`~torch.nn.Softmax` for more details.
Args:
input (Tensor): input
dim (int): A dimension along which softmax will be computed.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
.. note::
This function doesn't work directly with NLLLoss,
which expects the Log to be computed between the Softmax and itself.
Use log_softmax instead (it's faster and has better numerical properties).
"""
if has_torch_function_unary(input):
return handle_torch_function(
softmax, (input,), input, dim=dim, _stacklevel=_stacklevel, dtype=dtype
)
if dim is None:
dim = _get_softmax_dim("softmax", input.dim(), _stacklevel)
if dtype is None:
ret = input.softmax(dim)
else:
ret = input.softmax(dim, dtype=dtype)
return ret
def gumbel_softmax(
logits: Tensor,
tau: float = 1,
hard: bool = False,
eps: float = 1e-10,
dim: int = -1,
) -> Tensor:
r"""
Sample from the Gumbel-Softmax distribution (`Link 1`_ `Link 2`_) and optionally discretize.
Args:
logits: `[..., num_features]` unnormalized log probabilities
tau: non-negative scalar temperature
hard: if ``True``, the returned samples will be discretized as one-hot vectors,
but will be differentiated as if it is the soft sample in autograd
dim (int): A dimension along which softmax will be computed. Default: -1.
Returns:
Sampled tensor of same shape as `logits` from the Gumbel-Softmax distribution.
If ``hard=True``, the returned samples will be one-hot, otherwise they will
be probability distributions that sum to 1 across `dim`.
.. note::
This function is here for legacy reasons, may be removed from nn.Functional in the future.
.. note::
The main trick for `hard` is to do `y_hard - y_soft.detach() + y_soft`
It achieves two things:
- makes the output value exactly one-hot
(since we add then subtract y_soft value)
- makes the gradient equal to y_soft gradient
(since we strip all other gradients)
Examples::
>>> logits = torch.randn(20, 32)
>>> # Sample soft categorical using reparametrization trick:
>>> F.gumbel_softmax(logits, tau=1, hard=False)
>>> # Sample hard categorical using "Straight-through" trick:
>>> F.gumbel_softmax(logits, tau=1, hard=True)
.. _Link 1:
https://arxiv.org/abs/1611.00712
.. _Link 2:
https://arxiv.org/abs/1611.01144
"""
if has_torch_function_unary(logits):
return handle_torch_function(
gumbel_softmax, (logits,), logits, tau=tau, hard=hard, eps=eps, dim=dim
)
if eps != 1e-10:
warnings.warn("`eps` parameter is deprecated and has no effect.")
gumbels = (
-torch.empty_like(logits, memory_format=torch.legacy_contiguous_format)
.exponential_()
.log()
) # ~Gumbel(0,1)
gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau)
y_soft = gumbels.softmax(dim)
if hard:
# Straight through.
index = y_soft.max(dim, keepdim=True)[1]
y_hard = torch.zeros_like(
logits, memory_format=torch.legacy_contiguous_format
).scatter_(dim, index, 1.0)
ret = y_hard - y_soft.detach() + y_soft
else:
# Reparametrization trick.
ret = y_soft
return ret
def log_softmax(
input: Tensor,
dim: Optional[int] = None,
_stacklevel: int = 3,
dtype: Optional[DType] = None,
) -> Tensor:
r"""Apply a softmax followed by a logarithm.
While mathematically equivalent to log(softmax(x)), doing these two
operations separately is slower and numerically unstable. This function
uses an alternative formulation to compute the output and gradient correctly.
See :class:`~torch.nn.LogSoftmax` for more details.
Args:
input (Tensor): input
dim (int): A dimension along which log_softmax will be computed.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is cast to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
"""
if has_torch_function_unary(input):
return handle_torch_function(
log_softmax, (input,), input, dim=dim, _stacklevel=_stacklevel, dtype=dtype
)
if dim is None:
dim = _get_softmax_dim("log_softmax", input.dim(), _stacklevel)
if dtype is None:
ret = input.log_softmax(dim)
else:
ret = input.log_softmax(dim, dtype=dtype)
return ret
softshrink = _add_docstr(
torch._C._nn.softshrink,
r"""
softshrink(input, lambd=0.5) -> Tensor
Applies the soft shrinkage function elementwise
See :class:`~torch.nn.Softshrink` for more details.
""",
)
def tanh(input): # noqa: D400,D402
r"""tanh(input) -> Tensor
Applies element-wise,
:math:`\text{Tanh}(x) = \tanh(x) = \frac{\exp(x) - \exp(-x)}{\exp(x) + \exp(-x)}`
See :class:`~torch.nn.Tanh` for more details.
"""
return input.tanh()
def sigmoid(input): # noqa: D400,D402
r"""sigmoid(input) -> Tensor
Applies the element-wise function :math:`\text{Sigmoid}(x) = \frac{1}{1 + \exp(-x)}`
See :class:`~torch.nn.Sigmoid` for more details.
"""
return input.sigmoid()
def hardsigmoid(input: Tensor, inplace: bool = False) -> Tensor:
r"""Apply the Hardsigmoid function element-wise.
.. math::
\text{Hardsigmoid}(x) = \begin{cases}
0 & \text{if~} x \le -3, \\
1 & \text{if~} x \ge +3, \\
x / 6 + 1 / 2 & \text{otherwise}
\end{cases}
Args:
inplace: If set to ``True``, will do this operation in-place. Default: ``False``
See :class:`~torch.nn.Hardsigmoid` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(hardsigmoid, (input,), input, inplace=inplace)
if inplace:
return torch._C._nn.hardsigmoid_(input)
return torch._C._nn.hardsigmoid(input)
linear = _add_docstr(
torch._C._nn.linear,
r"""
linear(input, weight, bias=None) -> Tensor
Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.
This operation supports 2-D :attr:`weight` with :ref:`sparse layout<sparse-docs>`
{sparse_beta_warning}
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
Shape:
- Input: :math:`(*, in\_features)` where `*` means any number of
additional dimensions, including none
- Weight: :math:`(out\_features, in\_features)` or :math:`(in\_features)`
- Bias: :math:`(out\_features)` or :math:`()`
- Output: :math:`(*, out\_features)` or :math:`(*)`, based on the shape of the weight
""".format(
**sparse_support_notes
),
)
bilinear = _add_docstr(
torch.bilinear,
r"""
bilinear(input1, input2, weight, bias=None) -> Tensor
Applies a bilinear transformation to the incoming data:
:math:`y = x_1^T A x_2 + b`
Shape:
- input1: :math:`(N, *, H_{in1})` where :math:`H_{in1}=\text{in1\_features}`
and :math:`*` means any number of additional dimensions.
All but the last dimension of the inputs should be the same.
- input2: :math:`(N, *, H_{in2})` where :math:`H_{in2}=\text{in2\_features}`
- weight: :math:`(\text{out\_features}, \text{in1\_features},
\text{in2\_features})`
- bias: :math:`(\text{out\_features})`
- output: :math:`(N, *, H_{out})` where :math:`H_{out}=\text{out\_features}`
and all but the last dimension are the same shape as the input.
""",
)
def silu(input: Tensor, inplace: bool = False) -> Tensor:
r"""Apply the Sigmoid Linear Unit (SiLU) function, element-wise.
The SiLU function is also known as the swish function.
.. math::
\text{silu}(x) = x * \sigma(x), \text{where } \sigma(x) \text{ is the logistic sigmoid.}
.. note::
See `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_
where the SiLU (Sigmoid Linear Unit) was originally coined, and see
`Sigmoid-Weighted Linear Units for Neural Network Function Approximation
in Reinforcement Learning <https://arxiv.org/abs/1702.03118>`_ and `Swish:
a Self-Gated Activation Function <https://arxiv.org/abs/1710.05941v1>`_
where the SiLU was experimented with later.
See :class:`~torch.nn.SiLU` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(silu, (input,), input, inplace=inplace)
if inplace:
return torch._C._nn.silu_(input)
return torch._C._nn.silu(input)
def mish(input: Tensor, inplace: bool = False) -> Tensor:
r"""Apply the Mish function, element-wise.
Mish: A Self Regularized Non-Monotonic Neural Activation Function.
.. math::
\text{Mish}(x) = x * \text{Tanh}(\text{Softplus}(x))
.. note::
See `Mish: A Self Regularized Non-Monotonic Neural Activation Function <https://arxiv.org/abs/1908.08681>`_
See :class:`~torch.nn.Mish` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(mish, (input,), input, inplace=inplace)
if inplace:
return torch._C._nn.mish_(input)
return torch._C._nn.mish(input)
def hardswish(input: Tensor, inplace: bool = False) -> Tensor:
r"""Apply hardswish function, element-wise.
Follows implementation as described in the paper:
`Searching for MobileNetV3`_.
.. math::
\text{Hardswish}(x) = \begin{cases}
0 & \text{if~} x \le -3, \\
x & \text{if~} x \ge +3, \\
x \cdot (x + 3) /6 & \text{otherwise}
\end{cases}
See :class:`~torch.nn.Hardswish` for more details.
.. _`Searching for MobileNetV3`:
https://arxiv.org/abs/1905.02244
"""
if has_torch_function_unary(input):
return handle_torch_function(hardswish, (input,), input, inplace=inplace)
if inplace:
return torch._C._nn.hardswish_(input)
return torch._C._nn.hardswish(input)
def _no_grad_embedding_renorm_(
weight: Tensor,
input: Tensor,
max_norm: float,
norm_type: float,
) -> tuple[Tensor, Tensor]:
torch.embedding_renorm_(weight.detach(), input, max_norm, norm_type)
def embedding(
input: Tensor,
weight: Tensor,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
) -> Tensor:
r"""Generate a simple lookup table that looks up embeddings in a fixed dictionary and size.
This module is often used to retrieve word embeddings using indices.
The input to the module is a list of indices, and the embedding matrix,
and the output is the corresponding word embeddings.
See :class:`torch.nn.Embedding` for more details.
.. note::
Note that the analytical gradients of this function with respect to
entries in :attr:`weight` at the row specified by :attr:`padding_idx`
are expected to differ from the numerical ones.
.. note::
Note that `:class:`torch.nn.Embedding` differs from this function in
that it initializes the row of :attr:`weight` specified by
:attr:`padding_idx` to all zeros on construction.
Args:
input (LongTensor): Tensor containing indices into the embedding matrix
weight (Tensor): The embedding matrix with number of rows equal to the maximum possible index + 1,
and number of columns equal to the embedding size
padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;
therefore, the embedding vector at :attr:`padding_idx` is not updated during training,
i.e. it remains as a fixed "pad".
max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
is renormalized to have norm :attr:`max_norm`.
Note: this will modify :attr:`weight` in-place.
norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse of frequency of
the words in the mini-batch. Default ``False``.
sparse (bool, optional): If ``True``, gradient w.r.t. :attr:`weight` will be a sparse tensor. See Notes under
:class:`torch.nn.Embedding` for more details regarding sparse gradients.
Shape:
- Input: LongTensor of arbitrary shape containing the indices to extract
- Weight: Embedding matrix of floating point type with shape `(V, embedding_dim)`,
where V = maximum index + 1 and embedding_dim = the embedding size
- Output: `(*, embedding_dim)`, where `*` is the input shape
Examples::
>>> # a batch of 2 samples of 4 indices each
>>> input = torch.tensor([[1, 2, 4, 5], [4, 3, 2, 9]])
>>> # an embedding matrix containing 10 tensors of size 3
>>> embedding_matrix = torch.rand(10, 3)
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> F.embedding(input, embedding_matrix)
tensor([[[ 0.8490, 0.9625, 0.6753],
[ 0.9666, 0.7761, 0.6108],
[ 0.6246, 0.9751, 0.3618],
[ 0.4161, 0.2419, 0.7383]],
[[ 0.6246, 0.9751, 0.3618],
[ 0.0237, 0.7794, 0.0528],
[ 0.9666, 0.7761, 0.6108],
[ 0.3385, 0.8612, 0.1867]]])
>>> # example with padding_idx
>>> weights = torch.rand(10, 3)
>>> weights[0, :].zero_()
>>> embedding_matrix = weights
>>> input = torch.tensor([[0, 2, 0, 5]])
>>> F.embedding(input, embedding_matrix, padding_idx=0)
tensor([[[ 0.0000, 0.0000, 0.0000],
[ 0.5609, 0.5384, 0.8720],
[ 0.0000, 0.0000, 0.0000],
[ 0.6262, 0.2438, 0.7471]]])
"""
if has_torch_function_variadic(input, weight):
return handle_torch_function(
embedding,
(input, weight),
input,
weight,
padding_idx=padding_idx,
max_norm=max_norm,
norm_type=norm_type,
scale_grad_by_freq=scale_grad_by_freq,
sparse=sparse,
)
if padding_idx is not None:
if padding_idx > 0:
assert padding_idx < weight.size(
0
), "Padding_idx must be within num_embeddings"
elif padding_idx < 0:
assert padding_idx >= -weight.size(
0
), "Padding_idx must be within num_embeddings"
padding_idx = weight.size(0) + padding_idx
else:
padding_idx = -1
if max_norm is not None:
# Note [embedding_renorm contiguous]
# `embedding_renorm_` will call .contiguous() on input anyways, so we
# call it here and take advantage of the improved locality in the
# `embedding` call below too.
input = input.contiguous()
# Note [embedding_renorm set_grad_enabled]
# XXX: equivalent to
# with torch.no_grad():
# torch.embedding_renorm_
# remove once script supports set_grad_enabled
_no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
def embedding_bag(
input: Tensor,
weight: Tensor,
offsets: Optional[Tensor] = None,
max_norm: Optional[float] = None,
norm_type: float = 2,
scale_grad_by_freq: bool = False,
mode: str = "mean",
sparse: bool = False,
per_sample_weights: Optional[Tensor] = None,
include_last_offset: bool = False,
padding_idx: Optional[int] = None,
) -> Tensor:
r"""Compute sums, means or maxes of `bags` of embeddings.
Calculation is done without instantiating the intermediate embeddings.
See :class:`torch.nn.EmbeddingBag` for more details.
Note:
{backward_reproducibility_note}
Args:
input (LongTensor): Tensor containing bags of indices into the embedding matrix
weight (Tensor): The embedding matrix with number of rows equal to the maximum possible index + 1,
and number of columns equal to the embedding size
offsets (LongTensor, optional): Only used when :attr:`input` is 1D. :attr:`offsets` determines
the starting index position of each bag (sequence) in :attr:`input`.
max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
is renormalized to have norm :attr:`max_norm`.
Note: this will modify :attr:`weight` in-place.
norm_type (float, optional): The ``p`` in the ``p``-norm to compute for the :attr:`max_norm` option.
Default ``2``.
scale_grad_by_freq (bool, optional): if given, this will scale gradients by the inverse of frequency of
the words in the mini-batch. Default ``False``.
Note: this option is not supported when ``mode="max"``.
mode (str, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag.
Default: ``"mean"``
sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` will be a sparse tensor. See Notes under
:class:`torch.nn.Embedding` for more details regarding sparse gradients.
Note: this option is not supported when ``mode="max"``.
per_sample_weights (Tensor, optional): a tensor of float / double weights, or None
to indicate all weights should be taken to be 1. If specified, :attr:`per_sample_weights`
must have exactly the same shape as input and is treated as having the same
:attr:`offsets`, if those are not None.
include_last_offset (bool, optional): if ``True``, the size of offsets is equal to the number of bags + 1.
The last element is the size of the input, or the ending index position of the last bag (sequence).
padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the
gradient; therefore, the embedding vector at :attr:`padding_idx` is not updated
during training, i.e. it remains as a fixed "pad". Note that the embedding
vector at :attr:`padding_idx` is excluded from the reduction.
Shape:
- :attr:`input` (LongTensor) and :attr:`offsets` (LongTensor, optional)
- If :attr:`input` is 2D of shape `(B, N)`, it will be treated as ``B`` bags (sequences)
each of fixed length ``N``, and this will return ``B`` values aggregated in a way
depending on the :attr:`mode`. :attr:`offsets` is ignored and required to be ``None`` in this case.
- If :attr:`input` is 1D of shape `(N)`, it will be treated as a concatenation of
multiple bags (sequences). :attr:`offsets` is required to be a 1D tensor containing
the starting index positions of each bag in :attr:`input`. Therefore, for :attr:`offsets`
of shape `(B)`, :attr:`input` will be viewed as having ``B`` bags.
Empty bags (i.e., having 0-length) will have returned vectors filled by zeros.
- :attr:`weight` (Tensor): the learnable weights of the module of shape `(num_embeddings, embedding_dim)`
- :attr:`per_sample_weights` (Tensor, optional). Has the same shape as :attr:`input`.
- :attr:`output`: aggregated embedding values of shape `(B, embedding_dim)`
Examples::
>>> # an Embedding module containing 10 tensors of size 3
>>> embedding_matrix = torch.rand(10, 3)
>>> # a batch of 2 samples of 4 indices each
>>> input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
>>> offsets = torch.tensor([0, 4])
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> F.embedding_bag(input, embedding_matrix, offsets)
tensor([[ 0.3397, 0.3552, 0.5545],
[ 0.5893, 0.4386, 0.5882]])
>>> # example with padding_idx
>>> embedding_matrix = torch.rand(10, 3)
>>> input = torch.tensor([2, 2, 2, 2, 4, 3, 2, 9])
>>> offsets = torch.tensor([0, 4])
>>> F.embedding_bag(input, embedding_matrix, offsets, padding_idx=2, mode='sum')
tensor([[ 0.0000, 0.0000, 0.0000],
[-0.7082, 3.2145, -2.6251]])
"""
if has_torch_function_variadic(input, weight, offsets, per_sample_weights):
return handle_torch_function(
embedding_bag,
(input, weight, offsets, per_sample_weights),
input,
weight,
offsets=offsets,
max_norm=max_norm,
norm_type=norm_type,
scale_grad_by_freq=scale_grad_by_freq,
mode=mode,
sparse=sparse,
per_sample_weights=per_sample_weights,
include_last_offset=include_last_offset,
padding_idx=padding_idx,
)
# Check for backward compatibility.
# Used to be embedding_bag(weight, input, ...)
# Now is embedding_bag(input, weight, ...)
if weight.dtype == torch.long and input.is_floating_point():
warnings.warn(
"Argument order of nn.functional.embedding_bag was changed. "
"Usage `embedding_bag(weight, input, ...)` is deprecated, "
"and should now be `embedding_bag(input, weight, ...)`."
)
weight, input = input, weight
if per_sample_weights is not None and input.size() != per_sample_weights.size():
raise ValueError(
f"embedding_bag: If per_sample_weights ({per_sample_weights.shape}) is not None, "
f"then it must have the same shape as the input ({input.shape})"
)
if not weight.dim() == 2:
raise ValueError(
f"weight has to be a 2D Tensor, but got Tensor of dimension {weight.dim()}"
)
if not torch.jit.is_scripting() and input.dim() == 2 and input.is_nested:
include_last_offset = True
offsets = input.offsets()
input = input.values().reshape(-1)
if per_sample_weights is not None:
if not per_sample_weights.is_nested:
raise ValueError(
"If input is nested, then per_sample_weights must be nested if specified"
)
per_sample_weights = per_sample_weights.values().reshape(-1)
elif input.dim() == 2:
if offsets is not None:
type_str = "<unknown>"
# TODO: Remove this once script supports type() calls
if not torch.jit.is_scripting():
type_str = str(type(offsets))
raise ValueError(
"if input is 2D, then offsets has to be None"
", as input is treated is a mini-batch of"
" fixed length sequences. However, found "
f"offsets of type {type_str}"
)
offsets = torch.arange(
0, input.numel(), input.size(1), dtype=input.dtype, device=input.device
)
input = input.reshape(-1)
if per_sample_weights is not None:
per_sample_weights = per_sample_weights.reshape(-1)
elif input.dim() == 1:
if offsets is None:
raise ValueError("offsets has to be a 1D Tensor but got None")
if offsets.dim() != 1:
raise ValueError("offsets has to be a 1D Tensor")
else:
raise ValueError(
f"input has to be 1D or 2D Tensor, but got Tensor of dimension {input.dim()}"
)
if mode == "sum":
mode_enum = 0
elif mode == "mean":
mode_enum = 1
elif mode == "max":
mode_enum = 2
if scale_grad_by_freq:
raise ValueError(
"max mode does not support scaling the gradient by the frequency"
)
if sparse:
raise ValueError("max mode does not support sparse weights")
else:
raise ValueError("mode has to be one of sum, mean or max")
if max_norm is not None:
# XXX: equivalent to
# with torch.no_grad():
# torch.nembedding_renorm_
# remove once script supports set_grad_enabled
_no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
if per_sample_weights is not None and mode != "sum":
raise NotImplementedError(
"embedding_bag: per_sample_weights was not None. "
"per_sample_weights is only supported for mode='sum' "
f"(got mode='{mode}'). Please open a feature request on GitHub."
)
ret, _, _, _ = torch.embedding_bag(
weight,
input,
offsets,
scale_grad_by_freq,
mode_enum,
sparse,
per_sample_weights,
include_last_offset,
padding_idx,
)
return ret
if embedding_bag.__doc__:
embedding_bag.__doc__ = embedding_bag.__doc__.format(**reproducibility_notes)
def _verify_batch_size(size: list[int]) -> None:
# XXX: JIT script does not support the reduce from functools, and mul op is a
# builtin, which cannot be used as a value to a func yet, so rewrite this size
# check to a simple equivalent for loop
#
# TODO: make use of reduce like below when JIT is ready with the missing features:
# from operator import mul
# from functools import reduce
#
# if reduce(mul, size[2:], size[0]) == 1
size_prods = size[0]
for i in range(len(size) - 2):
size_prods *= size[i + 2]
if size_prods == 1:
raise ValueError(
f"Expected more than 1 value per channel when training, got input size {size}"
)
def batch_norm(
input: Tensor,
running_mean: Optional[Tensor],
running_var: Optional[Tensor],
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
training: bool = False,
momentum: float = 0.1,
eps: float = 1e-5,
) -> Tensor:
r"""Apply Batch Normalization for each channel across a batch of data.
See :class:`~torch.nn.BatchNorm1d`, :class:`~torch.nn.BatchNorm2d`,
:class:`~torch.nn.BatchNorm3d` for details.
"""
if has_torch_function_variadic(input, running_mean, running_var, weight, bias):
return handle_torch_function(
batch_norm,
(input, running_mean, running_var, weight, bias),
input,
running_mean,
running_var,
weight=weight,
bias=bias,
training=training,
momentum=momentum,
eps=eps,
)
if training:
_verify_batch_size(input.size())
return torch.batch_norm(
input,
weight,
bias,
running_mean,
running_var,
training,
momentum,
eps,
torch.backends.cudnn.enabled,
)
def _verify_spatial_size(size: list[int]) -> None:
# Verify that there is > 1 spatial element for instance norm calculation.
size_prods = 1
for i in range(2, len(size)):
size_prods *= size[i]
if size_prods == 1:
raise ValueError(
f"Expected more than 1 spatial element when training, got input size {size}"
)
def instance_norm(
input: Tensor,
running_mean: Optional[Tensor] = None,
running_var: Optional[Tensor] = None,
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
use_input_stats: bool = True,
momentum: float = 0.1,
eps: float = 1e-5,
) -> Tensor:
r"""Apply Instance Normalization independently for each channel in every data sample within a batch.
See :class:`~torch.nn.InstanceNorm1d`, :class:`~torch.nn.InstanceNorm2d`,
:class:`~torch.nn.InstanceNorm3d` for details.
"""
if has_torch_function_variadic(input, running_mean, running_var, weight, bias):
return handle_torch_function(
instance_norm,
(input, running_mean, running_var, weight, bias),
input,
running_mean=running_mean,
running_var=running_var,
weight=weight,
bias=bias,
use_input_stats=use_input_stats,
momentum=momentum,
eps=eps,
)
if use_input_stats:
_verify_spatial_size(input.size())
return torch.instance_norm(
input,
weight,
bias,
running_mean,
running_var,
use_input_stats,
momentum,
eps,
torch.backends.cudnn.enabled,
)
def layer_norm(
input: Tensor,
normalized_shape: list[int],
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
eps: float = 1e-5,
) -> Tensor:
r"""Apply Layer Normalization for last certain number of dimensions.
See :class:`~torch.nn.LayerNorm` for details.
"""
if has_torch_function_variadic(input, weight, bias):
return handle_torch_function(
layer_norm,
(input, weight, bias),
input,
normalized_shape,
weight=weight,
bias=bias,
eps=eps,
)
return torch.layer_norm(
input, normalized_shape, weight, bias, eps, torch.backends.cudnn.enabled
)
def rms_norm(
input: Tensor,
normalized_shape: list[int],
weight: Optional[Tensor] = None,
eps: Optional[float] = None,
) -> Tensor:
r"""Apply Root Mean Square Layer Normalization.
See :class:`~torch.nn.RMSNorm` for details.
"""
if has_torch_function_variadic(input, weight):
return handle_torch_function(
rms_norm, (input, weight), input, normalized_shape, weight=weight, eps=eps
)
return torch.rms_norm(input, normalized_shape, weight, eps)
def group_norm(
input: Tensor,
num_groups: int,
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
eps: float = 1e-5,
) -> Tensor:
r"""Apply Group Normalization for last certain number of dimensions.
See :class:`~torch.nn.GroupNorm` for details.
"""
if has_torch_function_variadic(input, weight, bias):
return handle_torch_function(
group_norm,
(
input,
weight,
bias,
),
input,
num_groups,
weight=weight,
bias=bias,
eps=eps,
)
if input.dim() < 2:
raise RuntimeError(
f"Expected at least 2 dimensions for input tensor but received {input.dim()}"
)
_verify_batch_size(
[input.size(0) * input.size(1) // num_groups, num_groups]
+ list(input.size()[2:])
)
return torch.group_norm(
input, num_groups, weight, bias, eps, torch.backends.cudnn.enabled
)
def local_response_norm(
input: Tensor,
size: int,
alpha: float = 1e-4,
beta: float = 0.75,
k: float = 1.0,
) -> Tensor:
r"""Apply local response normalization over an input signal.
The input signal is composed of several input planes, where channels occupy the second dimension.
Normalization is applied across channels.
See :class:`~torch.nn.LocalResponseNorm` for details.
"""
if has_torch_function_unary(input):
return handle_torch_function(
local_response_norm, (input,), input, size, alpha=alpha, beta=beta, k=k
)
dim = input.dim()
if dim < 3:
raise ValueError(
f"Expected 3D or higher dimensionality input (got {dim} dimensions)"
)
if input.numel() == 0:
return input
div = input.mul(input)
if dim == 3:
div = div.unsqueeze(1)
div = pad(div, (0, 0, size // 2, (size - 1) // 2))
div = avg_pool2d(div, (size, 1), stride=1).squeeze(1)
else:
sizes = input.size()
div = div.view(sizes[0], 1, sizes[1], sizes[2], -1)
div = pad(div, (0, 0, 0, 0, size // 2, (size - 1) // 2))
div = avg_pool3d(div, (size, 1, 1), stride=1).squeeze(1)
div = div.view(sizes)
div = div.mul(alpha).add(k).pow(beta)
return input / div
# loss
def ctc_loss(
log_probs: Tensor,
targets: Tensor,
input_lengths: Tensor,
target_lengths: Tensor,
blank: int = 0,
reduction: str = "mean",
zero_infinity: bool = False,
) -> Tensor:
r"""Apply the Connectionist Temporal Classification loss.
See :class:`~torch.nn.CTCLoss` for details.
Note:
{cudnn_reproducibility_note}
Note:
{backward_reproducibility_note}
Args:
log_probs: :math:`(T, N, C)` or :math:`(T, C)` where `C = number of characters in alphabet including blank`,
`T = input length`, and `N = batch size`.
The logarithmized probabilities of the outputs
(e.g. obtained with :func:`torch.nn.functional.log_softmax`).
targets: :math:`(N, S)` or `(sum(target_lengths))`.
Targets cannot be blank. In the second form, the targets are assumed to be concatenated.
input_lengths: :math:`(N)` or :math:`()`.
Lengths of the inputs (must each be :math:`\leq T`)
target_lengths: :math:`(N)` or :math:`()`.
Lengths of the targets
blank (int, optional):
Blank label. Default :math:`0`.
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the output losses will be divided by the target lengths and
then the mean over the batch is taken, ``'sum'``: the output will be
summed. Default: ``'mean'``
zero_infinity (bool, optional):
Whether to zero infinite losses and the associated gradients.
Default: ``False``
Infinite losses mainly occur when the inputs are too short
to be aligned to the targets.
Example::
>>> log_probs = torch.randn(50, 16, 20).log_softmax(2).detach().requires_grad_()
>>> targets = torch.randint(1, 20, (16, 30), dtype=torch.long)
>>> input_lengths = torch.full((16,), 50, dtype=torch.long)
>>> target_lengths = torch.randint(10, 30, (16,), dtype=torch.long)
>>> loss = F.ctc_loss(log_probs, targets, input_lengths, target_lengths)
>>> loss.backward()
"""
if has_torch_function_variadic(log_probs, targets, input_lengths, target_lengths):
return handle_torch_function(
ctc_loss,
(log_probs, targets, input_lengths, target_lengths),
log_probs,
targets,
input_lengths,
target_lengths,
blank=blank,
reduction=reduction,
zero_infinity=zero_infinity,
)
return torch.ctc_loss(
log_probs,
targets,
input_lengths,
target_lengths,
blank,
_Reduction.get_enum(reduction),
zero_infinity,
)
if ctc_loss.__doc__:
ctc_loss.__doc__ = ctc_loss.__doc__.format(**reproducibility_notes)
def nll_loss(
input: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
size_average: Optional[bool] = None,
ignore_index: int = -100,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor:
r"""Compute the negative log likelihood loss.
See :class:`~torch.nn.NLLLoss` for details.
Args:
input: :math:`(N, C)` where `C = number of classes` or :math:`(N, C, H, W)`
in case of 2D Loss, or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K \geq 1`
in the case of K-dimensional loss. `input` is expected to be log-probabilities.
target: :math:`(N)` where each value is :math:`0 \leq \text{targets}[i] \leq C-1`,
or :math:`(N, d_1, d_2, ..., d_K)` where :math:`K \geq 1` for
K-dimensional loss.
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, has to be a Tensor of size `C`
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when reduce is ``False``. Default: ``True``
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When :attr:`size_average` is
``True``, the loss is averaged over non-ignored targets. Default: -100
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Example::
>>> # input is of size N x C = 3 x 5
>>> input = torch.randn(3, 5, requires_grad=True)
>>> # each element in target has to have 0 <= value < C
>>> target = torch.tensor([1, 0, 4])
>>> output = F.nll_loss(F.log_softmax(input, dim=1), target)
>>> output.backward()
"""
if has_torch_function_variadic(input, target, weight):
return handle_torch_function(
nll_loss,
(input, target, weight),
input,
target,
weight=weight,
size_average=size_average,
ignore_index=ignore_index,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction = _Reduction.legacy_get_string(size_average, reduce)
return torch._C._nn.nll_loss_nd(
input, target, weight, _Reduction.get_enum(reduction), ignore_index
)
def poisson_nll_loss(
input: Tensor,
target: Tensor,
log_input: bool = True,
full: bool = False,
size_average: Optional[bool] = None,
eps: float = 1e-8,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor:
r"""Poisson negative log likelihood loss.
See :class:`~torch.nn.PoissonNLLLoss` for details.
Args:
input: expectation of underlying Poisson distribution.
target: random sample :math:`target \sim \text{Poisson}(input)`.
log_input: if ``True`` the loss is computed as
:math:`\exp(\text{input}) - \text{target} * \text{input}`, if ``False`` then loss is
:math:`\text{input} - \text{target} * \log(\text{input}+\text{eps})`. Default: ``True``
full: whether to compute full loss, i. e. to add the Stirling
approximation term. Default: ``False``
:math:`\text{target} * \log(\text{target}) - \text{target} + 0.5 * \log(2 * \pi * \text{target})`.
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when reduce is ``False``. Default: ``True``
eps (float, optional): Small value to avoid evaluation of :math:`\log(0)` when
:attr:`log_input`\ =\ ``False``. Default: 1e-8
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
"""
if has_torch_function_variadic(input, target):
return handle_torch_function(
poisson_nll_loss,
(input, target),
input,
target,
log_input=log_input,
full=full,
size_average=size_average,
eps=eps,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction = _Reduction.legacy_get_string(size_average, reduce)
if reduction != "none" and reduction != "mean" and reduction != "sum":
ret = input
raise ValueError(reduction + " is not a valid value for reduction")
ret = torch.poisson_nll_loss(
input, target, log_input, full, eps, _Reduction.get_enum(reduction)
)
return ret
def gaussian_nll_loss(
input: Tensor,
target: Tensor,
var: Union[Tensor, float],
full: bool = False,
eps: float = 1e-6,
reduction: str = "mean",
) -> Tensor:
r"""Gaussian negative log likelihood loss.
See :class:`~torch.nn.GaussianNLLLoss` for details.
Args:
input: expectation of the Gaussian distribution.
target: sample from the Gaussian distribution.
var: tensor of positive variance(s), one for each of the expectations
in the input (heteroscedastic), or a single one (homoscedastic),
or a positive scalar value to be used for all expectations.
full (bool, optional): include the constant term in the loss calculation. Default: ``False``.
eps (float, optional): value added to var, for stability. Default: 1e-6.
reduction (str, optional): specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the output is the average of all batch member losses,
``'sum'``: the output is the sum of all batch member losses.
Default: ``'mean'``.
"""
if has_torch_function_variadic(input, target, var):
return handle_torch_function(
gaussian_nll_loss,
(input, target, var),
input,
target,
var,
full=full,
eps=eps,
reduction=reduction,
)
# Entries of var must be non-negative
if isinstance(var, float):
if var < 0:
raise ValueError("var has negative entry/entries")
var = var * torch.ones_like(input)
elif torch.any(var < 0):
raise ValueError("var has negative entry/entries")
# Check var size
# If var.size == input.size, the case is heteroscedastic and no further checks are needed.
# Otherwise:
if var.size() != input.size():
# If var is one dimension short of input, but the sizes match otherwise, then this is a homoscedastic case.
# e.g. input.size = (10, 2, 3), var.size = (10, 2)
# -> unsqueeze var so that var.shape = (10, 2, 1)
# this is done so that broadcasting can happen in the loss calculation
if input.size()[:-1] == var.size():
var = torch.unsqueeze(var, -1)
# This checks if the sizes match up to the final dimension, and the final dimension of var is of size 1.
# This is also a homoscedastic case.
# e.g. input.size = (10, 2, 3), var.size = (10, 2, 1)
elif (
input.size()[:-1] == var.size()[:-1] and var.size(-1) == 1
): # Heteroscedastic case
pass
# If none of the above pass, then the size of var is incorrect.
else:
raise ValueError("var is of incorrect size")
# Check validity of reduction mode
if reduction != "none" and reduction != "mean" and reduction != "sum":
raise ValueError(reduction + " is not valid")
# Clamp for stability
var = var.clone()
with torch.no_grad():
var.clamp_(min=eps)
# Calculate the loss
loss = 0.5 * (torch.log(var) + (input - target) ** 2 / var)
if full:
loss += 0.5 * math.log(2 * math.pi)
if reduction == "mean":
return loss.mean()
elif reduction == "sum":
return loss.sum()
else:
return loss
def kl_div(
input: Tensor,
target: Tensor,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
log_target: bool = False,
) -> Tensor:
r"""Compute the KL Divergence loss.
Refer - The `Kullback-Leibler divergence Loss
<https://en.wikipedia.org/wiki/Kullback-Leibler_divergence>`__
See :class:`~torch.nn.KLDivLoss` for details.
Args:
input: Tensor of arbitrary shape in log-probabilities.
target: Tensor of the same shape as input. See :attr:`log_target` for
the target's interpretation.
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when reduce is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``.
``'none'``: no reduction will be applied
``'batchmean'``: the sum of the output will be divided by the batchsize
``'sum'``: the output will be summed
``'mean'``: the output will be divided by the number of elements in the output
Default: ``'mean'``
log_target (bool): A flag indicating whether ``target`` is passed in the log space.
It is recommended to pass certain distributions (like ``softmax``)
in the log space to avoid numerical issues caused by explicit ``log``.
Default: ``False``
.. note::
:attr:`size_average` and :attr:`reduce` are in the process of being deprecated,
and in the meantime, specifying either of those two args will override :attr:`reduction`.
.. warning::
:attr:`reduction` = ``'mean'`` doesn't return the true kl divergence value, please use
:attr:`reduction` = ``'batchmean'`` which aligns with KL math definition.
"""
if has_torch_function_variadic(input, target):
return handle_torch_function(
kl_div,
(input, target),
input,
target,
size_average=size_average,
reduce=reduce,
reduction=reduction,
log_target=log_target,
)
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
if reduction == "mean":
warnings.warn(
"reduction: 'mean' divides the total loss by both the batch size and the support size."
"'batchmean' divides only by the batch size, and aligns with the KL div math definition."
"'mean' will be changed to behave the same as 'batchmean' in the next major release."
)
# special case for batchmean
if reduction == "batchmean":
reduction_enum = _Reduction.get_enum("sum")
else:
reduction_enum = _Reduction.get_enum(reduction)
reduced = torch.kl_div(input, target, reduction_enum, log_target=log_target)
if reduction == "batchmean" and input.dim() != 0:
reduced = reduced / input.size()[0]
return reduced
def cross_entropy(
input: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
size_average: Optional[bool] = None,
ignore_index: int = -100,
reduce: Optional[bool] = None,
reduction: str = "mean",
label_smoothing: float = 0.0,
) -> Tensor:
r"""Compute the cross entropy loss between input logits and target.
See :class:`~torch.nn.CrossEntropyLoss` for details.
Args:
input (Tensor) : Predicted unnormalized logits;
see Shape section below for supported shapes.
target (Tensor) : Ground truth class indices or class probabilities;
see Shape section below for supported shapes.
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, has to be a Tensor of size `C`
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when reduce is ``False``. Default: ``True``
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When :attr:`size_average` is
``True``, the loss is averaged over non-ignored targets. Note that
:attr:`ignore_index` is only applicable when the target contains class indices.
Default: -100
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
label_smoothing (float, optional): A float in [0.0, 1.0]. Specifies the amount
of smoothing when computing the loss, where 0.0 means no smoothing. The targets
become a mixture of the original ground truth and a uniform distribution as described in
`Rethinking the Inception Architecture for Computer Vision <https://arxiv.org/abs/1512.00567>`__. Default: :math:`0.0`.
Shape:
- Input: Shape :math:`(C)`, :math:`(N, C)` or :math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`
in the case of `K`-dimensional loss.
- Target: If containing class indices, shape :math:`()`, :math:`(N)` or :math:`(N, d_1, d_2, ..., d_K)` with
:math:`K \geq 1` in the case of K-dimensional loss where each value should be between :math:`[0, C)`.
If containing class probabilities, same shape as the input and each value should be between :math:`[0, 1]`.
where:
.. math::
\begin{aligned}
C ={} & \text{number of classes} \\
N ={} & \text{batch size} \\
\end{aligned}
Examples::
>>> # Example of target with class indices
>>> input = torch.randn(3, 5, requires_grad=True)
>>> target = torch.randint(5, (3,), dtype=torch.int64)
>>> loss = F.cross_entropy(input, target)
>>> loss.backward()
>>>
>>> # Example of target with class probabilities
>>> input = torch.randn(3, 5, requires_grad=True)
>>> target = torch.randn(3, 5).softmax(dim=1)
>>> loss = F.cross_entropy(input, target)
>>> loss.backward()
"""
if has_torch_function_variadic(input, target, weight):
return handle_torch_function(
cross_entropy,
(input, target, weight),
input,
target,
weight=weight,
size_average=size_average,
ignore_index=ignore_index,
reduce=reduce,
reduction=reduction,
label_smoothing=label_smoothing,
)
if size_average is not None or reduce is not None:
reduction = _Reduction.legacy_get_string(size_average, reduce)
return torch._C._nn.cross_entropy_loss(
input,
target,
weight,
_Reduction.get_enum(reduction),
ignore_index,
label_smoothing,
)
def binary_cross_entropy(
input: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor:
r"""Measure Binary Cross Entropy between the target and input probabilities.
See :class:`~torch.nn.BCELoss` for details.
Args:
input: Tensor of arbitrary shape as probabilities.
target: Tensor of the same shape as input with values between 0 and 1.
weight (Tensor, optional): a manual rescaling weight
if provided it's repeated to match input tensor shape
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when reduce is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Examples::
>>> input = torch.randn(3, 2, requires_grad=True)
>>> target = torch.rand(3, 2, requires_grad=False)
>>> loss = F.binary_cross_entropy(torch.sigmoid(input), target)
>>> loss.backward()
"""
if has_torch_function_variadic(input, target, weight):
return handle_torch_function(
binary_cross_entropy,
(input, target, weight),
input,
target,
weight=weight,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
reduction_enum = _Reduction.get_enum(reduction)
if target.size() != input.size():
raise ValueError(
f"Using a target size ({target.size()}) that is different to the input size ({input.size()}) is deprecated. "
"Please ensure they have the same size."
)
if weight is not None:
new_size = _infer_size(target.size(), weight.size())
weight = weight.expand(new_size)
return torch._C._nn.binary_cross_entropy(input, target, weight, reduction_enum)
def binary_cross_entropy_with_logits(
input: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
pos_weight: Optional[Tensor] = None,
) -> Tensor:
r"""Calculate Binary Cross Entropy between target and input logits.
See :class:`~torch.nn.BCEWithLogitsLoss` for details.
Args:
input: Tensor of arbitrary shape as unnormalized scores (often referred to as logits).
target: Tensor of the same shape as input with values between 0 and 1
weight (Tensor, optional): a manual rescaling weight
if provided it's repeated to match input tensor shape
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when reduce is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
pos_weight (Tensor, optional): a weight of positive examples to be broadcasted with target.
Must be a tensor with equal size along the class dimension to the number of classes.
Pay close attention to PyTorch's broadcasting semantics in order to achieve the desired
operations. For a target of size [B, C, H, W] (where B is batch size) pos_weight of
size [B, C, H, W] will apply different pos_weights to each element of the batch or
[C, H, W] the same pos_weights across the batch. To apply the same positive weight
along all spatial dimensions for a 2D multi-class target [C, H, W] use: [C, 1, 1].
Default: ``None``
Examples::
>>> input = torch.randn(3, requires_grad=True)
>>> target = torch.empty(3).random_(2)
>>> loss = F.binary_cross_entropy_with_logits(input, target)
>>> loss.backward()
"""
if has_torch_function_variadic(input, target, weight, pos_weight):
return handle_torch_function(
binary_cross_entropy_with_logits,
(input, target, weight, pos_weight),
input,
target,
weight=weight,
size_average=size_average,
reduce=reduce,
reduction=reduction,
pos_weight=pos_weight,
)
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
reduction_enum = _Reduction.get_enum(reduction)
if not (target.size() == input.size()):
raise ValueError(
f"Target size ({target.size()}) must be the same as input size ({input.size()})"
)
return torch.binary_cross_entropy_with_logits(
input, target, weight, pos_weight, reduction_enum
)
def smooth_l1_loss(
input: Tensor,
target: Tensor,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
beta: float = 1.0,
) -> Tensor:
r"""Compute the Smooth L1 loss.
Function uses a squared term if the absolute
element-wise error falls below beta and an L1 term otherwise.
See :class:`~torch.nn.SmoothL1Loss` for details.
"""
if has_torch_function_variadic(input, target):
return handle_torch_function(
smooth_l1_loss,
(input, target),
input,
target,
size_average=size_average,
reduce=reduce,
reduction=reduction,
beta=beta,
)
if not (target.size() == input.size()):
warnings.warn(
f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). "
"This will likely lead to incorrect results due to broadcasting. "
"Please ensure they have the same size.",
stacklevel=2,
)
if size_average is not None or reduce is not None:
reduction = _Reduction.legacy_get_string(size_average, reduce)
expanded_input, expanded_target = torch.broadcast_tensors(input, target)
if beta == 0.0:
return torch._C._nn.l1_loss(
expanded_input, expanded_target, _Reduction.get_enum(reduction)
)
else:
return torch._C._nn.smooth_l1_loss(
expanded_input, expanded_target, _Reduction.get_enum(reduction), beta
)
def huber_loss(
input: Tensor,
target: Tensor,
reduction: str = "mean",
delta: float = 1.0,
weight: Optional[Tensor] = None,
) -> Tensor:
r"""huber_loss(input, target, reduction='mean', delta=1.0, weight=None) -> Tensor
Computes the Huber loss, with optional weighting.
Function uses a squared term if the absolute
element-wise error falls below delta and a delta-scaled L1 term otherwise.
When delta equals 1, this loss is equivalent to SmoothL1Loss.
In general, Huber loss differs from SmoothL1Loss by a factor of delta (AKA beta in Smooth L1).
Args:
input (Tensor): Predicted values.
target (Tensor): Ground truth values.
reduction (str, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'mean': the mean of the output is taken.
'sum': the output will be summed. 'none': no reduction will be applied.
Default: 'mean'.
delta (float, optional): The threshold at which to change between delta-scaled L1 and L2 loss. Default: 1.0.
weight (Tensor, optional): Weights for each sample. Default: None.
Returns:
Tensor: Huber loss (optionally weighted).
"""
if has_torch_function_variadic(input, target, weight):
return handle_torch_function(
huber_loss,
(input, target, weight),
input,
target,
reduction=reduction,
delta=delta,
weight=weight,
)
if not (target.size() == input.size()):
warnings.warn(
f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). "
"This will likely lead to incorrect results due to broadcasting. "
"Please ensure they have the same size.",
stacklevel=2,
)
expanded_input, expanded_target = torch.broadcast_tensors(input, target)
if weight is None:
# Use the optimized C++ backend for standard Huber loss
return torch._C._nn.huber_loss(
expanded_input, expanded_target, _Reduction.get_enum(reduction), delta
)
else:
if weight.size() != input.size():
raise ValueError("Weights and input must have the same size.")
# Calculate the unweighted loss first
unweighted_loss = torch._C._nn.huber_loss(
expanded_input, expanded_target, _Reduction.get_enum("none"), delta
)
# Apply weight to the unweighted loss
weighted_loss = unweighted_loss * weight
if reduction == "none":
return weighted_loss
elif reduction == "sum":
return torch.sum(weighted_loss)
elif reduction == "mean":
return weighted_loss.mean()
else:
raise ValueError(
f"Invalid reduction mode: {reduction}. Expected one of 'none', 'mean', 'sum'."
)
def l1_loss(
input: Tensor,
target: Tensor,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
weight: Optional[Tensor] = None,
) -> Tensor: # noqa: D400,D402
r"""l1_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
Function that takes the mean element-wise absolute value difference.
See :class:`~torch.nn.L1Loss` for details.
"""
if has_torch_function_variadic(input, target):
return handle_torch_function(
l1_loss,
(input, target, weight),
input,
target,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
if not (target.size() == input.size()):
warnings.warn(
f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). "
"This will likely lead to incorrect results due to broadcasting. "
"Please ensure they have the same size.",
stacklevel=2,
)
if size_average is not None or reduce is not None:
reduction = _Reduction.legacy_get_string(size_average, reduce)
expanded_input, expanded_target = torch.broadcast_tensors(input, target)
if weight is not None:
if weight.size() != input.size():
raise ValueError("Weights and input must have the same size.")
absolute_errors = torch.abs(expanded_input - expanded_target)
weighted_absolute_errors = absolute_errors * weight
if reduction == "none":
return weighted_absolute_errors
elif reduction == "sum":
return torch.sum(weighted_absolute_errors)
elif reduction == "mean":
return torch.sum(weighted_absolute_errors) / torch.sum(weight)
else:
raise ValueError(
f"Invalid reduction mode: {reduction}. Expected one of 'none', 'mean', 'sum'."
)
else:
return torch._C._nn.l1_loss(
expanded_input, expanded_target, _Reduction.get_enum(reduction)
)
def mse_loss(
input: Tensor,
target: Tensor,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
weight: Optional[Tensor] = None,
) -> Tensor:
r"""mse_loss(input, target, size_average=None, reduce=None, reduction='mean', weight=None) -> Tensor
Measures the element-wise mean squared error, with optional weighting.
Args:
input (Tensor): Predicted values.
target (Tensor): Ground truth values.
size_average (bool, optional): Deprecated (use reduction).
reduce (bool, optional): Deprecated (use reduction).
reduction (str, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'mean': the mean of the output is taken.
'sum': the output will be summed. 'none': no reduction will be applied.
Default: 'mean'.
weight (Tensor, optional): Weights for each sample. Default: None.
Returns:
Tensor: Mean Squared Error loss (optionally weighted).
"""
if has_torch_function_variadic(input, target, weight):
return handle_torch_function(
mse_loss,
(input, target, weight),
input,
target,
size_average=size_average,
reduce=reduce,
reduction=reduction,
weight=weight,
)
if not (target.size() == input.size()):
warnings.warn(
f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). "
"This will likely lead to incorrect results due to broadcasting. "
"Please ensure they have the same size.",
stacklevel=2,
)
if size_average is not None or reduce is not None:
reduction = _Reduction.legacy_get_string(size_average, reduce)
expanded_input, expanded_target = torch.broadcast_tensors(input, target)
if weight is not None:
if weight.size() != input.size():
raise ValueError("Weights and input must have the same size.")
# Perform weighted MSE loss manually
squared_errors = torch.pow(expanded_input - expanded_target, 2)
weighted_squared_errors = squared_errors * weight
if reduction == "none":
return weighted_squared_errors
elif reduction == "sum":
return torch.sum(weighted_squared_errors)
elif reduction == "mean":
return torch.sum(weighted_squared_errors) / torch.sum(weight)
else:
raise ValueError(
f"Invalid reduction mode: {reduction}. Expected one of 'none', 'mean', 'sum'."
)
else:
return torch._C._nn.mse_loss(
expanded_input, expanded_target, _Reduction.get_enum(reduction)
)
def margin_ranking_loss(
input1: Tensor,
input2: Tensor,
target: Tensor,
margin: float = 0,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor: # noqa: D400,D402
r"""margin_ranking_loss(input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.MarginRankingLoss` for details.
"""
if has_torch_function_variadic(input1, input2, target):
return handle_torch_function(
margin_ranking_loss,
(input1, input2, target),
input1,
input2,
target,
margin=margin,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
reduction_enum = _Reduction.get_enum(reduction)
if input1.dim() != input2.dim() or input1.dim() != target.dim():
raise RuntimeError(
f"margin_ranking_loss : All input tensors should have same dimension but got sizes: "
f"input1: {input1.size()}, input2: {input2.size()}, target: {target.size()} "
)
return torch.margin_ranking_loss(input1, input2, target, margin, reduction_enum)
def hinge_embedding_loss(
input: Tensor,
target: Tensor,
margin: float = 1.0,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor: # noqa: D400,D402
r"""hinge_embedding_loss(input, target, margin=1.0, size_average=None, reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.HingeEmbeddingLoss` for details.
"""
if has_torch_function_variadic(input, target):
return handle_torch_function(
hinge_embedding_loss,
(input, target),
input,
target,
margin=margin,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
reduction_enum = _Reduction.get_enum(reduction)
return torch.hinge_embedding_loss(input, target, margin, reduction_enum)
def multilabel_margin_loss(
input: Tensor,
target: Tensor,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor: # noqa: D400,D402
r"""multilabel_margin_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.MultiLabelMarginLoss` for details.
"""
if has_torch_function_variadic(input, target):
return handle_torch_function(
multilabel_margin_loss,
(input, target),
input,
target,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
reduction_enum = _Reduction.get_enum(reduction)
return torch._C._nn.multilabel_margin_loss(input, target, reduction_enum)
def soft_margin_loss(
input: Tensor,
target: Tensor,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor: # noqa: D400,D402
r"""
soft_margin_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.SoftMarginLoss` for details.
"""
if has_torch_function_variadic(input, target):
return handle_torch_function(
soft_margin_loss,
(input, target),
input,
target,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
reduction_enum = _Reduction.get_enum(reduction)
return torch._C._nn.soft_margin_loss(input, target, reduction_enum)
def multilabel_soft_margin_loss(
input: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor: # noqa: D400,D402
r"""multilabel_soft_margin_loss(input, target, weight=None, size_average=None, reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.MultiLabelSoftMarginLoss` for details.
"""
if has_torch_function_variadic(input, target, weight):
return handle_torch_function(
multilabel_soft_margin_loss,
(input, target, weight),
input,
target,
weight=weight,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction = _Reduction.legacy_get_string(size_average, reduce)
loss = -(target * logsigmoid(input) + (1 - target) * logsigmoid(-input))
if weight is not None:
loss = loss * weight
class_dim = input.dim() - 1
C = input.size(class_dim)
loss = loss.sum(dim=class_dim) / C # only return N loss values
if reduction == "none":
ret = loss
elif reduction == "mean":
ret = loss.mean()
elif reduction == "sum":
ret = loss.sum()
else:
ret = input
raise ValueError(reduction + " is not valid")
return ret
def cosine_embedding_loss(
input1: Tensor,
input2: Tensor,
target: Tensor,
margin: float = 0,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor: # noqa: D400,D402
r"""cosine_embedding_loss(input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.CosineEmbeddingLoss` for details.
"""
if has_torch_function_variadic(input1, input2, target):
return handle_torch_function(
cosine_embedding_loss,
(input1, input2, target),
input1,
input2,
target,
margin=margin,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
reduction_enum = _Reduction.get_enum(reduction)
return torch.cosine_embedding_loss(input1, input2, target, margin, reduction_enum)
def multi_margin_loss(
input: Tensor,
target: Tensor,
p: int = 1,
margin: float = 1.0,
weight: Optional[Tensor] = None,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor: # noqa: D400,D402
r"""multi_margin_loss(input, target, p=1, margin=1, weight=None, size_average=None, reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.MultiMarginLoss` for details.
"""
if has_torch_function_variadic(input, target, weight):
return handle_torch_function(
multi_margin_loss,
(input, target, weight),
input,
target,
p=p,
margin=margin,
weight=weight,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
reduction_enum = _Reduction.get_enum(reduction)
if p != 1 and p != 2:
raise ValueError("only p == 1 and p == 2 supported")
if weight is not None:
if weight.dim() != 1:
raise ValueError("weight must be one-dimensional")
return torch._C._nn.multi_margin_loss(
input, target, p, margin, weight, reduction_enum
)
pixel_shuffle = _add_docstr(
torch.pixel_shuffle,
r"""
pixel_shuffle(input, upscale_factor) -> Tensor
Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)` to a
tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is the :attr:`upscale_factor`.
See :class:`~torch.nn.PixelShuffle` for details.
Args:
input (Tensor): the input tensor
upscale_factor (int): factor to increase spatial resolution by
Examples::
>>> input = torch.randn(1, 9, 4, 4)
>>> output = torch.nn.functional.pixel_shuffle(input, 3)
>>> print(output.size())
torch.Size([1, 1, 12, 12])
""",
)
pixel_unshuffle = _add_docstr(
torch.pixel_unshuffle,
r"""
pixel_unshuffle(input, downscale_factor) -> Tensor
Reverses the :class:`~torch.nn.PixelShuffle` operation by rearranging elements in a
tensor of shape :math:`(*, C, H \times r, W \times r)` to a tensor of shape
:math:`(*, C \times r^2, H, W)`, where r is the :attr:`downscale_factor`.
See :class:`~torch.nn.PixelUnshuffle` for details.
Args:
input (Tensor): the input tensor
downscale_factor (int): factor to increase spatial resolution by
Examples::
>>> input = torch.randn(1, 1, 12, 12)
>>> output = torch.nn.functional.pixel_unshuffle(input, 3)
>>> print(output.size())
torch.Size([1, 9, 4, 4])
""",
)
channel_shuffle = _add_docstr(
torch.channel_shuffle,
r"""
channel_shuffle(input, groups) -> Tensor
Divide the channels in a tensor of shape :math:`(*, C , H, W)`
into g groups and rearrange them as :math:`(*, C \frac g, g, H, W)`,
while keeping the original tensor shape.
See :class:`~torch.nn.ChannelShuffle` for details.
Args:
input (Tensor): the input tensor
groups (int): number of groups to divide channels in and rearrange.
Examples::
>>> input = torch.randn(1, 4, 2, 2)
>>> print(input)
[[[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]],
[[9, 10],
[11, 12]],
[[13, 14],
[15, 16]],
]]
>>> output = torch.nn.functional.channel_shuffle(input, 2)
>>> print(output)
[[[[1, 2],
[3, 4]],
[[9, 10],
[11, 12]],
[[5, 6],
[7, 8]],
[[13, 14],
[15, 16]],
]]
""",
)
native_channel_shuffle = _add_docstr(
torch.native_channel_shuffle,
r"""
native_channel_shuffle(input, groups) -> Tensor
Native kernel level implementation of the `channel_shuffle`.
This function might become private in future releases, use with caution.
Divide the channels in a tensor of shape :math:`(*, C , H, W)`
into g groups and rearrange them as :math:`(*, C \frac g, g, H, W)`,
while keeping the original tensor shape.
See :class:`~torch.nn.ChannelShuffle` for details.
Args:
input (Tensor): the input tensor
groups (int): number of groups to divide channels in and rearrange.
Examples::
>>> input = torch.randn(1, 4, 2, 2)
>>> print(input)
[[[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]],
[[9, 10],
[11, 12]],
[[13, 14],
[15, 16]],
]]
>>> output = torch.nn.functional.native_channel_shuffle(input, 2)
>>> print(output)
[[[[1, 2],
[3, 4]],
[[9, 10],
[11, 12]],
[[5, 6],
[7, 8]],
[[13, 14],
[15, 16]],
]]
""",
)
@_overload
def upsample( # noqa: F811
input: Tensor,
size: Optional[int] = None,
scale_factor: Optional[float] = None,
mode: str = "nearest",
align_corners: Optional[bool] = None,
) -> Tensor: # noqa: B950
pass
@_overload
def upsample( # noqa: F811
input: Tensor,
size: Optional[list[int]] = None,
scale_factor: Optional[float] = None,
mode: str = "nearest",
align_corners: Optional[bool] = None,
) -> Tensor: # noqa: B950
pass
def upsample( # noqa: F811
input,
size=None,
scale_factor=None,
mode="nearest",
align_corners=None,
):
r"""Upsample input.
Provided tensor is upsampled to either the given :attr:`size` or the given
:attr:`scale_factor`
.. warning::
This function is deprecated in favor of :func:`torch.nn.functional.interpolate`.
This is equivalent with ``nn.functional.interpolate(...)``.
Note:
{backward_reproducibility_note}
The algorithm used for upsampling is determined by :attr:`mode`.
Currently temporal, spatial and volumetric upsampling are supported, i.e.
expected inputs are 3-D, 4-D or 5-D in shape.
The input dimensions are interpreted in the form:
`mini-batch x channels x [optional depth] x [optional height] x width`.
The modes available for upsampling are: `nearest`, `linear` (3D-only),
`bilinear`, `bicubic` (4D-only), `trilinear` (5D-only)
Args:
input (Tensor): the input tensor
size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
output spatial size.
scale_factor (float or Tuple[float]): multiplier for spatial size. Has to match input size if it is a tuple.
mode (str): algorithm used for upsampling:
``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |
``'trilinear'``. Default: ``'nearest'``
align_corners (bool, optional): Geometrically, we consider the pixels of the
input and output as squares rather than points.
If set to ``True``, the input and output tensors are aligned by the
center points of their corner pixels, preserving the values at the corner pixels.
If set to ``False``, the input and output tensors are aligned by the corner
points of their corner pixels, and the interpolation uses edge value padding
for out-of-boundary values, making this operation *independent* of input size
when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
is ``'linear'``, ``'bilinear'``, ``'bicubic'`` or ``'trilinear'``.
Default: ``False``
.. note::
With ``mode='bicubic'``, it's possible to cause overshoot, in other words it can produce
negative values or values greater than 255 for images.
Explicitly call ``result.clamp(min=0, max=255)`` if you want to reduce the overshoot
when displaying the image.
.. warning::
With ``align_corners = True``, the linearly interpolating modes
(`linear`, `bilinear`, and `trilinear`) don't proportionally align the
output and input pixels, and thus the output values can depend on the
input size. This was the default behavior for these modes up to version
0.3.1. Since then, the default behavior is ``align_corners = False``.
See :class:`~torch.nn.Upsample` for concrete examples on how this
affects the outputs.
"""
warnings.warn(
"`nn.functional.upsample` is deprecated. "
"Use `nn.functional.interpolate` instead.",
stacklevel=2,
)
return interpolate(input, size, scale_factor, mode, align_corners)
if upsample.__doc__:
upsample.__doc__ = upsample.__doc__.format(**reproducibility_notes)
def _is_integer(x) -> bool:
r"""Type check the input number is an integer.
Will return True for int, SymInt, Numpy integers and Tensors with integer elements.
"""
if isinstance(x, (int, torch.SymInt)):
return True
if np is not None and isinstance(x, np.integer):
return True
return isinstance(x, Tensor) and not x.is_floating_point()
@_overload
def interpolate( # noqa: F811
input: Tensor,
size: Optional[int] = None,
scale_factor: Optional[list[float]] = None,
mode: str = "nearest",
align_corners: Optional[bool] = None,
recompute_scale_factor: Optional[bool] = None,
antialias: bool = False,
) -> Tensor: # noqa: B950
pass
@_overload
def interpolate( # noqa: F811
input: Tensor,
size: Optional[list[int]] = None,
scale_factor: Optional[list[float]] = None,
mode: str = "nearest",
align_corners: Optional[bool] = None,
recompute_scale_factor: Optional[bool] = None,
antialias: bool = False,
) -> Tensor: # noqa: B950
pass
@_overload
def interpolate( # noqa: F811
input: Tensor,
size: Optional[int] = None,
scale_factor: Optional[float] = None,
mode: str = "nearest",
align_corners: Optional[bool] = None,
recompute_scale_factor: Optional[bool] = None,
antialias: bool = False,
) -> Tensor: # noqa: B950
pass
@_overload
def interpolate( # noqa: F811
input: Tensor,
size: Optional[list[int]] = None,
scale_factor: Optional[float] = None,
mode: str = "nearest",
align_corners: Optional[bool] = None,
recompute_scale_factor: Optional[bool] = None,
antialias: bool = False,
) -> Tensor:
pass
def interpolate( # noqa: F811
input: Tensor,
size: Optional[int] = None,
scale_factor: Optional[list[float]] = None,
mode: str = "nearest",
align_corners: Optional[bool] = None,
recompute_scale_factor: Optional[bool] = None,
antialias: bool = False,
) -> Tensor: # noqa: B950
r"""Down/up samples the input.
Tensor interpolated to either the given :attr:`size` or the given
:attr:`scale_factor`
The algorithm used for interpolation is determined by :attr:`mode`.
Currently temporal, spatial and volumetric sampling are supported, i.e.
expected inputs are 3-D, 4-D or 5-D in shape.
The input dimensions are interpreted in the form:
`mini-batch x channels x [optional depth] x [optional height] x width`.
The modes available for resizing are: `nearest`, `linear` (3D-only),
`bilinear`, `bicubic` (4D-only), `trilinear` (5D-only), `area`, `nearest-exact`
Args:
input (Tensor): the input tensor
size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
output spatial size.
scale_factor (float or Tuple[float]): multiplier for spatial size. If `scale_factor` is a tuple,
its length has to match the number of spatial dimensions; `input.dim() - 2`.
mode (str): algorithm used for upsampling:
``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |
``'trilinear'`` | ``'area'`` | ``'nearest-exact'``. Default: ``'nearest'``
align_corners (bool, optional): Geometrically, we consider the pixels of the
input and output as squares rather than points.
If set to ``True``, the input and output tensors are aligned by the
center points of their corner pixels, preserving the values at the corner pixels.
If set to ``False``, the input and output tensors are aligned by the corner
points of their corner pixels, and the interpolation uses edge value padding
for out-of-boundary values, making this operation *independent* of input size
when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
is ``'linear'``, ``'bilinear'``, ``'bicubic'`` or ``'trilinear'``.
Default: ``False``
recompute_scale_factor (bool, optional): recompute the scale_factor for use in the
interpolation calculation. If `recompute_scale_factor` is ``True``, then
`scale_factor` must be passed in and `scale_factor` is used to compute the
output `size`. The computed output `size` will be used to infer new scales for
the interpolation. Note that when `scale_factor` is floating-point, it may differ
from the recomputed `scale_factor` due to rounding and precision issues.
If `recompute_scale_factor` is ``False``, then `size` or `scale_factor` will
be used directly for interpolation. Default: ``None``.
antialias (bool, optional): flag to apply anti-aliasing. Default: ``False``. Using anti-alias
option together with ``align_corners=False``, interpolation result would match Pillow
result for downsampling operation. Supported modes: ``'bilinear'``, ``'bicubic'``.
.. note::
With ``mode='bicubic'``, it's possible to cause overshoot, in other words it can produce
negative values or values greater than 255 for images.
Explicitly call ``result.clamp(min=0, max=255)`` if you want to reduce the overshoot
when displaying the image.
.. note::
Mode ``mode='nearest-exact'`` matches Scikit-Image and PIL nearest neighbours interpolation
algorithms and fixes known issues with ``mode='nearest'``. This mode is introduced to keep
backward compatibility.
Mode ``mode='nearest'`` matches buggy OpenCV's ``INTER_NEAREST`` interpolation algorithm.
.. note::
The gradients for the dtype ``float16`` on CUDA may be inaccurate in the upsample operation
when using modes ``['linear', 'bilinear', 'bicubic', 'trilinear', 'area']``.
For more details, please refer to the discussion in
`issue#104157 <https://github.com/pytorch/pytorch/issues/104157>`_.
Note:
{backward_reproducibility_note}
"""
if has_torch_function_unary(input):
return handle_torch_function(
interpolate,
(input,),
input,
size=size,
scale_factor=scale_factor,
mode=mode,
align_corners=align_corners,
recompute_scale_factor=recompute_scale_factor,
antialias=antialias,
)
if mode in ("nearest", "area", "nearest-exact"):
if align_corners is not None:
raise ValueError(
"align_corners option can only be set with the "
"interpolating modes: linear | bilinear | bicubic | trilinear"
)
else:
if align_corners is None:
align_corners = False
dim = input.dim() - 2 # Number of spatial dimensions.
# Process size and scale_factor. Validate that exactly one is set.
# Validate its length if it is a list, or expand it if it is a scalar.
# After this block, exactly one of output_size and scale_factors will
# be non-None, and it will be a list (or tuple).
if size is not None and scale_factor is not None:
raise ValueError("only one of size or scale_factor should be defined")
elif size is not None:
assert scale_factor is None
scale_factors = None
if isinstance(size, (list, tuple)):
if len(size) != dim:
raise ValueError(
"Input and output must have the same number of spatial dimensions, but got "
f"input with spatial dimensions of {list(input.shape[2:])} and output size of {size}. "
"Please provide input tensor in (N, C, d1, d2, ...,dK) format and "
"output size in (o1, o2, ...,oK) format."
)
if not torch.jit.is_scripting():
if not all(_is_integer(x) for x in size):
raise TypeError(
"expected size to be one of int or Tuple[int] or Tuple[int, int] or "
f"Tuple[int, int, int], but got size with types {[type(x) for x in size]}"
)
output_size = size
else:
output_size = [size for _ in range(dim)]
elif scale_factor is not None:
assert size is None
output_size = None
if isinstance(scale_factor, (list, tuple)):
if len(scale_factor) != dim:
raise ValueError(
"Input and scale_factor must have the same number of spatial dimensions, but "
f"got input with spatial dimensions of {list(input.shape[2:])} and "
f"scale_factor of shape {scale_factor}. "
"Please provide input tensor in (N, C, d1, d2, ...,dK) format and "
"scale_factor in (s1, s2, ...,sK) format."
)
scale_factors = scale_factor
else:
scale_factors = [scale_factor for _ in range(dim)]
else:
raise ValueError("either size or scale_factor should be defined")
if (
recompute_scale_factor is not None
and recompute_scale_factor
and size is not None
):
raise ValueError(
"recompute_scale_factor is not meaningful with an explicit size."
)
# "area" mode always requires an explicit size rather than scale factor.
# Re-use the recompute_scale_factor code path.
if mode == "area" and output_size is None:
recompute_scale_factor = True
if recompute_scale_factor is not None and recompute_scale_factor:
# We compute output_size here, then un-set scale_factors.
# The C++ code will recompute it based on the (integer) output size.
assert scale_factors is not None
if not torch.jit.is_scripting() and torch._C._get_tracing_state():
# make scale_factor a tensor in tracing so constant doesn't get baked in
output_size = [
(
torch.floor(
(
input.size(i + 2).float()
* torch.tensor(scale_factors[i], dtype=torch.float32)
).float()
)
)
for i in range(dim)
]
elif torch.jit.is_scripting():
output_size = [
int(math.floor(float(input.size(i + 2)) * scale_factors[i]))
for i in range(dim)
]
else:
output_size = [
_sym_int(input.size(i + 2) * scale_factors[i]) for i in range(dim)
]
scale_factors = None
if antialias and not (mode in ("bilinear", "bicubic") and input.ndim == 4):
raise ValueError(
"Anti-alias option is restricted to bilinear and bicubic modes and requires a 4-D tensor as input"
)
if input.dim() == 3 and mode == "nearest":
return torch._C._nn.upsample_nearest1d(input, output_size, scale_factors)
if input.dim() == 4 and mode == "nearest":
return torch._C._nn.upsample_nearest2d(input, output_size, scale_factors)
if input.dim() == 5 and mode == "nearest":
return torch._C._nn.upsample_nearest3d(input, output_size, scale_factors)
if input.dim() == 3 and mode == "nearest-exact":
return torch._C._nn._upsample_nearest_exact1d(input, output_size, scale_factors)
if input.dim() == 4 and mode == "nearest-exact":
return torch._C._nn._upsample_nearest_exact2d(input, output_size, scale_factors)
if input.dim() == 5 and mode == "nearest-exact":
return torch._C._nn._upsample_nearest_exact3d(input, output_size, scale_factors)
if input.dim() == 3 and mode == "area":
assert output_size is not None
return adaptive_avg_pool1d(input, output_size)
if input.dim() == 4 and mode == "area":
assert output_size is not None
return adaptive_avg_pool2d(input, output_size)
if input.dim() == 5 and mode == "area":
assert output_size is not None
return adaptive_avg_pool3d(input, output_size)
if input.dim() == 3 and mode == "linear":
assert align_corners is not None
return torch._C._nn.upsample_linear1d(
input, output_size, align_corners, scale_factors
)
if input.dim() == 4 and mode == "bilinear":
assert align_corners is not None
if antialias:
return torch._C._nn._upsample_bilinear2d_aa(
input, output_size, align_corners, scale_factors
)
# Two levels are necessary to prevent TorchScript from touching
# are_deterministic_algorithms_enabled.
if not torch.jit.is_scripting():
if torch.are_deterministic_algorithms_enabled() and (
input.is_cuda or input.is_xpu
):
# Use slow decomp whose backward will be in terms of index_put
# importlib is required because the import cannot be top level
# (cycle) and cannot be nested (TS doesn't support)
return importlib.import_module(
"torch._decomp.decompositions"
)._upsample_linear_vec(input, output_size, align_corners, scale_factors)
return torch._C._nn.upsample_bilinear2d(
input, output_size, align_corners, scale_factors
)
if input.dim() == 5 and mode == "trilinear":
assert align_corners is not None
return torch._C._nn.upsample_trilinear3d(
input, output_size, align_corners, scale_factors
)
if input.dim() == 4 and mode == "bicubic":
assert align_corners is not None
if antialias:
return torch._C._nn._upsample_bicubic2d_aa(
input, output_size, align_corners, scale_factors
)
return torch._C._nn.upsample_bicubic2d(
input, output_size, align_corners, scale_factors
)
if input.dim() == 3 and mode == "bilinear":
raise NotImplementedError("Got 3D input, but bilinear mode needs 4D input")
if input.dim() == 3 and mode == "trilinear":
raise NotImplementedError("Got 3D input, but trilinear mode needs 5D input")
if input.dim() == 4 and mode == "linear":
raise NotImplementedError("Got 4D input, but linear mode needs 3D input")
if input.dim() == 4 and mode == "trilinear":
raise NotImplementedError("Got 4D input, but trilinear mode needs 5D input")
if input.dim() == 5 and mode == "linear":
raise NotImplementedError("Got 5D input, but linear mode needs 3D input")
if input.dim() == 5 and mode == "bilinear":
raise NotImplementedError("Got 5D input, but bilinear mode needs 4D input")
raise NotImplementedError(
"Input Error: Only 3D, 4D and 5D input Tensors supported"
f" (got {input.dim()}D) for the modes: nearest | linear | bilinear | bicubic | trilinear | area | nearest-exact"
f" (got {mode})"
)
if interpolate.__doc__:
interpolate.__doc__ = interpolate.__doc__.format(**reproducibility_notes)
@_overload
def upsample_nearest( # noqa: F811
input: Tensor,
size: Optional[int] = None,
scale_factor: Optional[float] = None,
) -> Tensor:
pass
@_overload
def upsample_nearest( # noqa: F811
input: Tensor,
size: Optional[list[int]] = None,
scale_factor: Optional[float] = None,
) -> Tensor:
pass
def upsample_nearest(input, size=None, scale_factor=None): # noqa: F811
r"""Upsamples the input, using nearest neighbours' pixel values.
.. warning::
This function is deprecated in favor of :func:`torch.nn.functional.interpolate`.
This is equivalent with ``nn.functional.interpolate(..., mode='nearest')``.
Currently spatial and volumetric upsampling are supported (i.e. expected
inputs are 4 or 5 dimensional).
Args:
input (Tensor): input
size (int or Tuple[int, int] or Tuple[int, int, int]): output spatia
size.
scale_factor (int): multiplier for spatial size. Has to be an integer.
Note:
{backward_reproducibility_note}
"""
# DeprecationWarning is ignored by default
warnings.warn(
"`nn.functional.upsample_nearest` is deprecated. "
"Use `nn.functional.interpolate` instead.",
stacklevel=2,
)
return interpolate(input, size, scale_factor, mode="nearest")
if upsample_nearest.__doc__:
upsample_nearest.__doc__ = upsample_nearest.__doc__.format(**reproducibility_notes)
@_overload
def upsample_bilinear( # noqa: F811
input: Tensor,
size: Optional[int] = None,
scale_factor: Optional[float] = None,
) -> Tensor:
pass
@_overload
def upsample_bilinear( # noqa: F811
input: Tensor,
size: Optional[list[int]] = None,
scale_factor: Optional[float] = None,
) -> Tensor:
pass
@_overload
def upsample_bilinear( # noqa: F811
input: Tensor,
size: Optional[int] = None,
scale_factor: Optional[list[float]] = None,
) -> Tensor:
pass
@_overload
def upsample_bilinear( # noqa: F811
input: Tensor,
size: Optional[list[int]] = None,
scale_factor: Optional[list[float]] = None,
) -> Tensor:
pass
def upsample_bilinear(input, size=None, scale_factor=None): # noqa: F811
r"""Upsamples the input, using bilinear upsampling.
.. warning::
This function is deprecated in favor of :func:`torch.nn.functional.interpolate`.
This is equivalent with
``nn.functional.interpolate(..., mode='bilinear', align_corners=True)``.
Expected inputs are spatial (4 dimensional). Use `upsample_trilinear` fo
volumetric (5 dimensional) inputs.
Args:
input (Tensor): input
size (int or Tuple[int, int]): output spatial size.
scale_factor (int or Tuple[int, int]): multiplier for spatial size
Note:
{backward_reproducibility_note}
"""
# DeprecationWarning is ignored by default
warnings.warn(
"`nn.functional.upsample_bilinear` is deprecated. "
"Use `nn.functional.interpolate` instead.",
stacklevel=2,
)
return interpolate(input, size, scale_factor, mode="bilinear", align_corners=True)
if upsample_bilinear.__doc__:
upsample_bilinear.__doc__ = upsample_bilinear.__doc__.format(
**reproducibility_notes
)
GRID_SAMPLE_INTERPOLATION_MODES = {
"bilinear": 0,
"nearest": 1,
"bicubic": 2,
}
GRID_SAMPLE_PADDING_MODES = {
"zeros": 0,
"border": 1,
"reflection": 2,
}
def grid_sample(
input: Tensor,
grid: Tensor,
mode: str = "bilinear",
padding_mode: str = "zeros",
align_corners: Optional[bool] = None,
) -> Tensor:
r"""Compute grid sample.
Given an :attr:`input` and a flow-field :attr:`grid`, computes the
``output`` using :attr:`input` values and pixel locations from :attr:`grid`.
Currently, only spatial (4-D) and volumetric (5-D) :attr:`input` are
supported.
In the spatial (4-D) case, for :attr:`input` with shape
:math:`(N, C, H_\text{in}, W_\text{in})` and :attr:`grid` with shape
:math:`(N, H_\text{out}, W_\text{out}, 2)`, the output will have shape
:math:`(N, C, H_\text{out}, W_\text{out})`.
For each output location ``output[n, :, h, w]``, the size-2 vector
``grid[n, h, w]`` specifies :attr:`input` pixel locations ``x`` and ``y``,
which are used to interpolate the output value ``output[n, :, h, w]``.
In the case of 5D inputs, ``grid[n, d, h, w]`` specifies the
``x``, ``y``, ``z`` pixel locations for interpolating
``output[n, :, d, h, w]``. :attr:`mode` argument specifies ``nearest`` or
``bilinear`` interpolation method to sample the input pixels.
:attr:`grid` specifies the sampling pixel locations normalized by the
:attr:`input` spatial dimensions. Therefore, it should have most values in
the range of ``[-1, 1]``. For example, values ``x = -1, y = -1`` is the
left-top pixel of :attr:`input`, and values ``x = 1, y = 1`` is the
right-bottom pixel of :attr:`input`.
If :attr:`grid` has values outside the range of ``[-1, 1]``, the corresponding
outputs are handled as defined by :attr:`padding_mode`. Options are
* ``padding_mode="zeros"``: use ``0`` for out-of-bound grid locations,
* ``padding_mode="border"``: use border values for out-of-bound grid locations,
* ``padding_mode="reflection"``: use values at locations reflected by
the border for out-of-bound grid locations. For location far away
from the border, it will keep being reflected until becoming in bound,
e.g., (normalized) pixel location ``x = -3.5`` reflects by border ``-1``
and becomes ``x' = 1.5``, then reflects by border ``1`` and becomes
``x'' = -0.5``.
Note:
This function is often used in conjunction with :func:`affine_grid`
to build `Spatial Transformer Networks`_ .
Note:
When using the CUDA backend, this operation may induce nondeterministic
behaviour in its backward pass that is not easily switched off.
Please see the notes on :doc:`/notes/randomness` for background.
Note:
NaN values in :attr:`grid` would be interpreted as ``-1``.
Args:
input (Tensor): input of shape :math:`(N, C, H_\text{in}, W_\text{in})` (4-D case)
or :math:`(N, C, D_\text{in}, H_\text{in}, W_\text{in})` (5-D case)
grid (Tensor): flow-field of shape :math:`(N, H_\text{out}, W_\text{out}, 2)` (4-D case)
or :math:`(N, D_\text{out}, H_\text{out}, W_\text{out}, 3)` (5-D case)
mode (str): interpolation mode to calculate output values
``'bilinear'`` | ``'nearest'`` | ``'bicubic'``. Default: ``'bilinear'``
Note: ``mode='bicubic'`` supports only 4-D input.
When ``mode='bilinear'`` and the input is 5-D, the interpolation mode
used internally will actually be trilinear. However, when the input is 4-D,
the interpolation mode will legitimately be bilinear.
padding_mode (str): padding mode for outside grid values
``'zeros'`` | ``'border'`` | ``'reflection'``. Default: ``'zeros'``
align_corners (bool, optional): Geometrically, we consider the pixels of the
input as squares rather than points.
If set to ``True``, the extrema (``-1`` and ``1``) are considered as referring
to the center points of the input's corner pixels. If set to ``False``, they
are instead considered as referring to the corner points of the input's corner
pixels, making the sampling more resolution agnostic.
This option parallels the ``align_corners`` option in
:func:`interpolate`, and so whichever option is used here
should also be used there to resize the input image before grid sampling.
Default: ``False``
Returns:
output (Tensor): output Tensor
.. _`Spatial Transformer Networks`:
https://arxiv.org/abs/1506.02025
.. warning::
When ``align_corners = True``, the grid positions depend on the pixel
size relative to the input image size, and so the locations sampled by
:func:`grid_sample` will differ for the same input given at different
resolutions (that is, after being upsampled or downsampled).
The default behavior up to version 1.2.0 was ``align_corners = True``.
Since then, the default behavior has been changed to ``align_corners = False``,
in order to bring it in line with the default for :func:`interpolate`.
.. note::
``mode='bicubic'`` is implemented using the `cubic convolution algorithm`_ with :math:`\alpha=-0.75`.
The constant :math:`\alpha` might be different from packages to packages.
For example, `PIL`_ and `OpenCV`_ use -0.5 and -0.75 respectively.
This algorithm may "overshoot" the range of values it's interpolating.
For example, it may produce negative values or values greater than 255 when interpolating input in [0, 255].
Clamp the results with :func:`torch.clamp` to ensure they are within the valid range.
.. _`cubic convolution algorithm`: https://en.wikipedia.org/wiki/Bicubic_interpolation
.. _`PIL`: https://github.com/python-pillow/Pillow/blob/4634eafe3c695a014267eefdce830b4a825beed7/src/libImaging/Resample.c#L51
.. _`OpenCV`: https://github.com/opencv/opencv/blob/f345ed564a06178670750bad59526cfa4033be55/modules/imgproc/src/resize.cpp#L908
"""
if has_torch_function_variadic(input, grid):
return handle_torch_function(
grid_sample,
(input, grid),
input,
grid,
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
)
if mode != "bilinear" and mode != "nearest" and mode != "bicubic":
raise ValueError(
f"nn.functional.grid_sample(): expected mode to be 'bilinear', 'nearest' or 'bicubic', but got: '{mode}'"
)
if (
padding_mode != "zeros"
and padding_mode != "border"
and padding_mode != "reflection"
):
raise ValueError(
"nn.functional.grid_sample(): expected padding_mode "
"to be 'zeros', 'border', or 'reflection', "
f"but got: '{padding_mode}'"
)
if mode == "bilinear":
mode_enum = 0
elif mode == "nearest":
mode_enum = 1
else: # mode == 'bicubic'
mode_enum = 2
if padding_mode == "zeros":
padding_mode_enum = 0
elif padding_mode == "border":
padding_mode_enum = 1
else: # padding_mode == 'reflection'
padding_mode_enum = 2
if align_corners is None:
warnings.warn(
"Default grid_sample and affine_grid behavior has changed "
"to align_corners=False since 1.3.0. Please specify "
"align_corners=True if the old behavior is desired. "
"See the documentation of grid_sample for details."
)
align_corners = False
return torch.grid_sampler(input, grid, mode_enum, padding_mode_enum, align_corners)
def affine_grid(
theta: Tensor,
size: list[int],
align_corners: Optional[bool] = None,
) -> Tensor:
r"""Generate 2D or 3D flow field (sampling grid), given a batch of affine matrices :attr:`theta`.
.. note::
This function is often used in conjunction with :func:`grid_sample`
to build `Spatial Transformer Networks`_ .
Args:
theta (Tensor): input batch of affine matrices with shape
(:math:`N \times 2 \times 3`) for 2D or
(:math:`N \times 3 \times 4`) for 3D
size (torch.Size): the target output image size.
(:math:`N \times C \times H \times W` for 2D or
:math:`N \times C \times D \times H \times W` for 3D)
Example: torch.Size((32, 3, 24, 24))
align_corners (bool, optional): if ``True``, consider ``-1`` and ``1``
to refer to the centers of the corner pixels rather than the image corners.
Refer to :func:`grid_sample` for a more complete description.
A grid generated by :func:`affine_grid` should be passed to :func:`grid_sample`
with the same setting for this option.
Default: ``False``
Returns:
output (Tensor): output Tensor of size (:math:`N \times H \times W \times 2`)
.. _`Spatial Transformer Networks`:
https://arxiv.org/abs/1506.02025
.. warning::
When ``align_corners = True``, the grid positions depend on the pixel
size relative to the input image size, and so the locations sampled by
:func:`grid_sample` will differ for the same input given at different
resolutions (that is, after being upsampled or downsampled).
The default behavior up to version 1.2.0 was ``align_corners = True``.
Since then, the default behavior has been changed to ``align_corners = False``,
in order to bring it in line with the default for :func:`interpolate`.
.. warning::
When ``align_corners = True``, 2D affine transforms on 1D data and
3D affine transforms on 2D data (that is, when one of the spatial
dimensions has unit size) are ill-defined, and not an intended use case.
This is not a problem when ``align_corners = False``.
Up to version 1.2.0, all grid points along a unit dimension were
considered arbitrarily to be at ``-1``.
From version 1.3.0, under ``align_corners = True`` all grid points
along a unit dimension are considered to be at ``0``
(the center of the input image).
"""
if has_torch_function_unary(theta):
return handle_torch_function(
affine_grid, (theta,), theta, size, align_corners=align_corners
)
if align_corners is None:
warnings.warn(
"Default grid_sample and affine_grid behavior has changed "
"to align_corners=False since 1.3.0. Please specify "
"align_corners=True if the old behavior is desired. "
"See the documentation of grid_sample for details."
)
align_corners = False
# enforce floating point dtype on theta
if not theta.is_floating_point():
raise ValueError(
f"Expected theta to have floating point type, but got {theta.dtype}"
)
# check that shapes and sizes match
if len(size) == 4:
if theta.dim() != 3 or theta.shape[-2] != 2 or theta.shape[-1] != 3:
raise ValueError(
f"Expected a batch of 2D affine matrices of shape Nx2x3 for size {size}. Got {theta.shape}."
)
spatial_size = size[-2:] # spatial dimension sizes
elif len(size) == 5:
if theta.dim() != 3 or theta.shape[-2] != 3 or theta.shape[-1] != 4:
raise ValueError(
f"Expected a batch of 3D affine matrices of shape Nx3x4 for size {size}. Got {theta.shape}."
)
spatial_size = size[-3:] # spatial dimension sizes
else:
raise NotImplementedError(
"affine_grid only supports 4D and 5D sizes, "
"for 2D and 3D affine transforms, respectively. "
f"Got size {size}."
)
# check for empty span
if align_corners and min(spatial_size) == 1:
warnings.warn(
"Since version 1.3.0, affine_grid behavior has changed "
"for unit-size grids when align_corners=True. "
"This is not an intended use case of affine_grid. "
"See the documentation of affine_grid for details."
)
elif min(size) <= 0:
raise ValueError(f"Expected non-zero, positive output size. Got {size}")
return torch.affine_grid_generator(theta, size, align_corners)
def pad(
input: Tensor,
pad: list[int],
mode: str = "constant",
value: Optional[float] = None,
) -> Tensor:
r"""
pad(input, pad, mode="constant", value=None) -> Tensor
Pads tensor.
Padding size:
The padding size by which to pad some dimensions of :attr:`input`
are described starting from the last dimension and moving forward.
:math:`\left\lfloor\frac{\text{len(pad)}}{2}\right\rfloor` dimensions
of ``input`` will be padded.
For example, to pad only the last dimension of the input tensor, then
:attr:`pad` has the form
:math:`(\text{padding\_left}, \text{padding\_right})`;
to pad the last 2 dimensions of the input tensor, then use
:math:`(\text{padding\_left}, \text{padding\_right},`
:math:`\text{padding\_top}, \text{padding\_bottom})`;
to pad the last 3 dimensions, use
:math:`(\text{padding\_left}, \text{padding\_right},`
:math:`\text{padding\_top}, \text{padding\_bottom}`
:math:`\text{padding\_front}, \text{padding\_back})`.
Padding mode:
See :class:`torch.nn.CircularPad2d`, :class:`torch.nn.ConstantPad2d`,
:class:`torch.nn.ReflectionPad2d`, and :class:`torch.nn.ReplicationPad2d`
for concrete examples on how each of the padding modes works. Constant
padding is implemented for arbitrary dimensions. Circular, replicate and
reflection padding are implemented for padding the last 3 dimensions of a
4D or 5D input tensor, the last 2 dimensions of a 3D or 4D input tensor,
or the last dimension of a 2D or 3D input tensor.
Note:
When using the CUDA backend, this operation may induce nondeterministic
behaviour in its backward pass that is not easily switched off.
Please see the notes on :doc:`/notes/randomness` for background.
Args:
input (Tensor): N-dimensional tensor
pad (tuple): m-elements tuple, where
:math:`\frac{m}{2} \leq` input dimensions and :math:`m` is even.
mode: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``.
Default: ``'constant'``
value: fill value for ``'constant'`` padding. Default: ``0``
Examples::
>>> t4d = torch.empty(3, 3, 4, 2)
>>> p1d = (1, 1) # pad last dim by 1 on each side
>>> out = F.pad(t4d, p1d, "constant", 0) # effectively zero padding
>>> print(out.size())
torch.Size([3, 3, 4, 4])
>>> p2d = (1, 1, 2, 2) # pad last dim by (1, 1) and 2nd to last by (2, 2)
>>> out = F.pad(t4d, p2d, "constant", 0)
>>> print(out.size())
torch.Size([3, 3, 8, 4])
>>> t4d = torch.empty(3, 3, 4, 2)
>>> p3d = (0, 1, 2, 1, 3, 3) # pad by (0, 1), (2, 1), and (3, 3)
>>> out = F.pad(t4d, p3d, "constant", 0)
>>> print(out.size())
torch.Size([3, 9, 7, 3])
"""
if has_torch_function_unary(input):
return handle_torch_function(
torch.nn.functional.pad, (input,), input, pad, mode=mode, value=value
)
if not torch.jit.is_scripting():
if torch.are_deterministic_algorithms_enabled() and (
input.is_cuda or input.is_xpu
):
if mode == "replicate":
# Use slow decomp whose backward will be in terms of index_put.
# importlib is required because the import cannot be top level
# (cycle) and cannot be nested (TS doesn't support)
return importlib.import_module(
"torch._decomp.decompositions"
)._replication_pad(input, pad)
return torch._C._nn.pad(input, pad, mode, value)
# TODO: Fix via https://github.com/pytorch/pytorch/issues/75798
pad.__module__ = "torch.nn.functional"
# distance
pairwise_distance = _add_docstr(
torch.pairwise_distance,
r"""
pairwise_distance(x1, x2, p=2.0, eps=1e-6, keepdim=False) -> Tensor
See :class:`torch.nn.PairwiseDistance` for details
""",
)
pdist = _add_docstr(
torch.pdist,
r"""
pdist(input, p=2) -> Tensor
Computes the p-norm distance between every pair of row vectors in the input.
This is identical to the upper triangular portion, excluding the diagonal, of
`torch.norm(input[:, None] - input, dim=2, p=p)`. This function will be faster
if the rows are contiguous.
If input has shape :math:`N \times M` then the output will have shape
:math:`\frac{1}{2} N (N - 1)`.
This function is equivalent to ``scipy.spatial.distance.pdist(input,
'minkowski', p=p)`` if :math:`p \in (0, \infty)`. When :math:`p = 0` it is
equivalent to ``scipy.spatial.distance.pdist(input, 'hamming') * M``.
When :math:`p = \infty`, the closest scipy function is
``scipy.spatial.distance.pdist(xn, lambda x, y: np.abs(x - y).max())``.
Args:
input: input tensor of shape :math:`N \times M`.
p: p value for the p-norm distance to calculate between each vector pair
:math:`\in [0, \infty]`.
""",
)
cosine_similarity = _add_docstr(
torch.cosine_similarity,
r"""
cosine_similarity(x1, x2, dim=1, eps=1e-8) -> Tensor
Returns cosine similarity between ``x1`` and ``x2``, computed along dim. ``x1`` and ``x2`` must be broadcastable
to a common shape. ``dim`` refers to the dimension in this common shape. Dimension ``dim`` of the output is
squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 fewer dimension.
.. math ::
\text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2, \epsilon) \cdot \max(\Vert x_2 \Vert _2, \epsilon)}
Supports :ref:`type promotion <type-promotion-doc>`.
Args:
x1 (Tensor): First input.
x2 (Tensor): Second input.
dim (int, optional): Dimension along which cosine similarity is computed. Default: 1
eps (float, optional): Small value to avoid division by zero.
Default: 1e-8
Example::
>>> input1 = torch.randn(100, 128)
>>> input2 = torch.randn(100, 128)
>>> output = F.cosine_similarity(input1, input2)
>>> print(output)
""",
)
one_hot = _add_docstr(
torch._C._nn.one_hot,
r"""
one_hot(tensor, num_classes=-1) -> LongTensor
Takes LongTensor with index values of shape ``(*)`` and returns a tensor
of shape ``(*, num_classes)`` that have zeros everywhere except where the
index of last dimension matches the corresponding value of the input tensor,
in which case it will be 1.
See also `One-hot on Wikipedia`_ .
.. _One-hot on Wikipedia:
https://en.wikipedia.org/wiki/One-hot
Arguments:
tensor (LongTensor): class values of any shape.
num_classes (int, optional): Total number of classes. If set to -1, the number
of classes will be inferred as one greater than the largest class
value in the input tensor. Default: -1
Returns:
LongTensor that has one more dimension with 1 values at the
index of last dimension indicated by the input, and 0 everywhere
else.
Examples:
>>> F.one_hot(torch.arange(0, 5) % 3)
tensor([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
>>> F.one_hot(torch.arange(0, 5) % 3, num_classes=5)
tensor([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0]])
>>> F.one_hot(torch.arange(0, 6).view(3,2) % 3)
tensor([[[1, 0, 0],
[0, 1, 0]],
[[0, 0, 1],
[1, 0, 0]],
[[0, 1, 0],
[0, 0, 1]]])
""",
)
def triplet_margin_loss(
anchor: Tensor,
positive: Tensor,
negative: Tensor,
margin: float = 1.0,
p: float = 2,
eps: float = 1e-6,
swap: bool = False,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor:
r"""Compute the triplet loss between given input tensors and a margin greater than 0.
See :class:`~torch.nn.TripletMarginLoss` for details.
"""
if has_torch_function_variadic(anchor, positive, negative):
return handle_torch_function(
triplet_margin_loss,
(anchor, positive, negative),
anchor,
positive,
negative,
margin=margin,
p=p,
eps=eps,
swap=swap,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
reduction_enum = _Reduction.get_enum(reduction)
if margin <= 0:
raise ValueError(f"margin must be greater than 0, got {margin}")
return torch.triplet_margin_loss(
anchor, positive, negative, margin, p, eps, swap, reduction_enum
)
def triplet_margin_with_distance_loss(
anchor: Tensor,
positive: Tensor,
negative: Tensor,
*,
distance_function: Optional[Callable[[Tensor, Tensor], Tensor]] = None,
margin: float = 1.0,
swap: bool = False,
reduction: str = "mean",
) -> Tensor:
r"""Compute the triplet margin loss for input tensors using a custom distance function.
See :class:`~torch.nn.TripletMarginWithDistanceLoss` for details.
"""
if torch.jit.is_scripting():
raise NotImplementedError(
"F.triplet_margin_with_distance_loss does not support JIT scripting: "
"functions requiring Callables cannot be scripted."
)
if has_torch_function_variadic(anchor, positive, negative):
return handle_torch_function(
triplet_margin_with_distance_loss,
(anchor, positive, negative),
anchor,
positive,
negative,
distance_function=distance_function,
margin=margin,
swap=swap,
reduction=reduction,
)
# Check validity of reduction mode
if reduction not in ("mean", "sum", "none"):
raise ValueError(f"{reduction} is not a valid value for reduction")
# Check validity of margin
if margin <= 0:
raise ValueError(f"margin must be greater than 0, got {margin}")
# Check dimensions
a_dim = anchor.ndim
p_dim = positive.ndim
n_dim = negative.ndim
if not (a_dim == p_dim and p_dim == n_dim):
raise RuntimeError(
f"The anchor, positive, and negative tensors are expected to have "
f"the same number of dimensions, but got: anchor {a_dim}D, "
f"positive {p_dim}D, and negative {n_dim}D inputs"
)
# Calculate loss
if distance_function is None:
distance_function = torch.pairwise_distance
dist_pos = distance_function(anchor, positive)
dist_neg = distance_function(anchor, negative)
# The distance swap is described in the paper "Learning shallow
# convolutional feature descriptors with triplet losses" by V. Balntas, E.
# Riba et al. If True, and if the positive example is closer to the
# negative example than the anchor is, swaps the positive example and the
# anchor in the loss computation.
if swap:
dist_swap = distance_function(positive, negative)
dist_neg = torch.minimum(dist_neg, dist_swap)
loss = torch.clamp_min(margin + dist_pos - dist_neg, 0)
# Apply reduction
if reduction == "sum":
return torch.sum(loss)
elif reduction == "mean":
return torch.mean(loss)
else: # reduction == "none"
return loss
def normalize(
input: Tensor,
p: float = 2.0,
dim: int = 1,
eps: float = 1e-12,
out: Optional[Tensor] = None,
) -> Tensor:
r"""Perform :math:`L_p` normalization of inputs over specified dimension.
For a tensor :attr:`input` of sizes :math:`(n_0, ..., n_{dim}, ..., n_k)`, each
:math:`n_{dim}` -element vector :math:`v` along dimension :attr:`dim` is transformed as
.. math::
v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}.
With the default arguments it uses the Euclidean norm over vectors along dimension :math:`1` for normalization.
Args:
input: input tensor of any shape
p (float): the exponent value in the norm formulation. Default: 2
dim (int or tuple of ints): the dimension to reduce. Default: 1
eps (float): small value to avoid division by zero. Default: 1e-12
out (Tensor, optional): the output tensor. If :attr:`out` is used, this
operation won't be differentiable.
"""
if has_torch_function_variadic(input, out):
return handle_torch_function(
normalize, (input, out), input, p=p, dim=dim, eps=eps, out=out
)
if out is None:
denom = input.norm(p, dim, keepdim=True).clamp_min(eps).expand_as(input)
return input / denom
else:
denom = input.norm(p, dim, keepdim=True).clamp_min_(eps).expand_as(input)
return torch.div(input, denom, out=out)
def assert_int_or_pair(arg: list[int], arg_name: str, message: str) -> None:
assert isinstance(arg, int) or len(arg) == 2, message.format(arg_name)
def unfold(
input: Tensor,
kernel_size: BroadcastingList2[int],
dilation: BroadcastingList2[int] = 1,
padding: BroadcastingList2[int] = 0,
stride: BroadcastingList2[int] = 1,
) -> Tensor:
r"""Extract sliding local blocks from a batched input tensor.
.. warning::
Currently, only 4-D input tensors (batched image-like tensors) are
supported.
.. warning::
More than one element of the unfolded tensor may refer to a single
memory location. As a result, in-place operations (especially ones that
are vectorized) may result in incorrect behavior. If you need to write
to the tensor, please clone it first.
See :class:`torch.nn.Unfold` for details
"""
if has_torch_function_unary(input):
return handle_torch_function(
unfold,
(input,),
input,
kernel_size,
dilation=dilation,
padding=padding,
stride=stride,
)
return torch._C._nn.im2col(
input, _pair(kernel_size), _pair(dilation), _pair(padding), _pair(stride)
)
def fold(
input: Tensor,
output_size: BroadcastingList2[int],
kernel_size: BroadcastingList2[int],
dilation: BroadcastingList2[int] = 1,
padding: BroadcastingList2[int] = 0,
stride: BroadcastingList2[int] = 1,
) -> Tensor:
r"""Combine an array of sliding local blocks into a large containing tensor.
.. warning::
Currently, only unbatched (3D) or batched (4D) image-like output tensors are supported.
See :class:`torch.nn.Fold` for details
"""
if has_torch_function_unary(input):
return handle_torch_function(
fold,
(input,),
input,
output_size,
kernel_size,
dilation=dilation,
padding=padding,
stride=stride,
)
return torch._C._nn.col2im(
input,
_pair(output_size),
_pair(kernel_size),
_pair(dilation),
_pair(padding),
_pair(stride),
)
#
# multihead attention
#
def _in_projection_packed(
q: Tensor,
k: Tensor,
v: Tensor,
w: Tensor,
b: Optional[Tensor] = None,
) -> list[Tensor]:
r"""Perform the in-projection step of the attention operation, using packed weights.
Output is a triple containing projection tensors for query, key and value.
Args:
q, k, v: query, key and value tensors to be projected. For self-attention,
these are typically the same tensor; for encoder-decoder attention,
k and v are typically the same tensor. (We take advantage of these
identities for performance if they are present.) Regardless, q, k and v
must share a common embedding dimension; otherwise their shapes may vary.
w: projection weights for q, k and v, packed into a single tensor. Weights
are packed along dimension 0, in q, k, v order.
b: optional projection biases for q, k and v, packed into a single tensor
in q, k, v order.
Shape:
Inputs:
- q: :math:`(..., E)` where E is the embedding dimension
- k: :math:`(..., E)` where E is the embedding dimension
- v: :math:`(..., E)` where E is the embedding dimension
- w: :math:`(E * 3, E)` where E is the embedding dimension
- b: :math:`E * 3` where E is the embedding dimension
Output:
- in output list :math:`[q', k', v']`, each output tensor will have the
same shape as the corresponding input tensor.
"""
E = q.size(-1)
if k is v:
if q is k:
# self-attention
proj = linear(q, w, b)
# reshape to 3, E and not E, 3 is deliberate for better memory coalescing and keeping same order as chunk()
proj = (
proj.unflatten(-1, (3, E))
.unsqueeze(0)
.transpose(0, -2)
.squeeze(-2)
.contiguous()
)
return proj[0], proj[1], proj[2]
else:
# encoder-decoder attention
w_q, w_kv = w.split([E, E * 2])
if b is None:
b_q = b_kv = None
else:
b_q, b_kv = b.split([E, E * 2])
q_proj = linear(q, w_q, b_q)
kv_proj = linear(k, w_kv, b_kv)
# reshape to 2, E and not E, 2 is deliberate for better memory coalescing and keeping same order as chunk()
kv_proj = (
kv_proj.unflatten(-1, (2, E))
.unsqueeze(0)
.transpose(0, -2)
.squeeze(-2)
.contiguous()
)
return (q_proj, kv_proj[0], kv_proj[1])
else:
w_q, w_k, w_v = w.chunk(3)
if b is None:
b_q = b_k = b_v = None
else:
b_q, b_k, b_v = b.chunk(3)
return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
def _in_projection(
q: Tensor,
k: Tensor,
v: Tensor,
w_q: Tensor,
w_k: Tensor,
w_v: Tensor,
b_q: Optional[Tensor] = None,
b_k: Optional[Tensor] = None,
b_v: Optional[Tensor] = None,
) -> tuple[Tensor, Tensor, Tensor]:
r"""Perform the in-projection step of the attention operation.
This is simply a triple of linear projections,
with shape constraints on the weights which
ensure embedding dimension uniformity in the projected outputs.
Output is a triple containing projection tensors for query, key and value.
Args:
q, k, v: query, key and value tensors to be projected.
w_q, w_k, w_v: weights for q, k and v, respectively.
b_q, b_k, b_v: optional biases for q, k and v, respectively.
Shape:
Inputs:
- q: :math:`(Qdims..., Eq)` where Eq is the query embedding dimension and Qdims are any
number of leading dimensions.
- k: :math:`(Kdims..., Ek)` where Ek is the key embedding dimension and Kdims are any
number of leading dimensions.
- v: :math:`(Vdims..., Ev)` where Ev is the value embedding dimension and Vdims are any
number of leading dimensions.
- w_q: :math:`(Eq, Eq)`
- w_k: :math:`(Eq, Ek)`
- w_v: :math:`(Eq, Ev)`
- b_q: :math:`(Eq)`
- b_k: :math:`(Eq)`
- b_v: :math:`(Eq)`
Output: in output triple :math:`(q', k', v')`,
- q': :math:`[Qdims..., Eq]`
- k': :math:`[Kdims..., Eq]`
- v': :math:`[Vdims..., Eq]`
"""
Eq, Ek, Ev = q.size(-1), k.size(-1), v.size(-1)
assert w_q.shape == (
Eq,
Eq,
), f"expecting query weights shape of {(Eq, Eq)}, but got {w_q.shape}"
assert w_k.shape == (
Eq,
Ek,
), f"expecting key weights shape of {(Eq, Ek)}, but got {w_k.shape}"
assert w_v.shape == (
Eq,
Ev,
), f"expecting value weights shape of {(Eq, Ev)}, but got {w_v.shape}"
assert b_q is None or b_q.shape == (
Eq,
), f"expecting query bias shape of {(Eq,)}, but got {b_q.shape}"
assert b_k is None or b_k.shape == (
Eq,
), f"expecting key bias shape of {(Eq,)}, but got {b_k.shape}"
assert b_v is None or b_v.shape == (
Eq,
), f"expecting value bias shape of {(Eq,)}, but got {b_v.shape}"
return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
scaled_dot_product_attention = _add_docstr(
torch._C._nn.scaled_dot_product_attention,
r"""scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=0.0,
is_causal=False, scale=None, enable_gqa=False) -> Tensor:
Computes scaled dot product attention on query, key and value tensors, using an optional attention mask if passed,
and applying dropout if a probability greater than 0.0 is specified. The optional scale argument can only be
specified as a keyword argument.
.. code-block:: python
# Efficient implementation equivalent to the following:
def scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=0.0,
is_causal=False, scale=None, enable_gqa=False) -> torch.Tensor:
L, S = query.size(-2), key.size(-2)
scale_factor = 1 / math.sqrt(query.size(-1)) if scale is None else scale
attn_bias = torch.zeros(L, S, dtype=query.dtype, device=query.device)
if is_causal:
assert attn_mask is None
temp_mask = torch.ones(L, S, dtype=torch.bool).tril(diagonal=0)
attn_bias.masked_fill_(temp_mask.logical_not(), float("-inf"))
attn_bias.to(query.dtype)
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_bias.masked_fill_(attn_mask.logical_not(), float("-inf"))
else:
attn_bias = attn_mask + attn_bias
if enable_gqa:
key = key.repeat_interleave(query.size(-3)//key.size(-3), -3)
value = value.repeat_interleave(query.size(-3)//value.size(-3), -3)
attn_weight = query @ key.transpose(-2, -1) * scale_factor
attn_weight += attn_bias
attn_weight = torch.softmax(attn_weight, dim=-1)
attn_weight = torch.dropout(attn_weight, dropout_p, train=True)
return attn_weight @ value
.. warning::
This function is beta and subject to change.
.. warning::
This function always applies dropout according to the specified ``dropout_p`` argument.
To disable dropout during evaluation, be sure to pass a value of ``0.0`` when the module
that makes the function call is not in training mode.
For example:
.. code-block:: python
class MyModel(nn.Module):
def __init__(self, p=0.5):
super().__init__()
self.p = p
def forward(self, ...):
return F.scaled_dot_product_attention(...,
dropout_p=(self.p if self.training else 0.0))
Note:
There are currently three supported implementations of scaled dot product attention:
- `FlashAttention-2: Faster Attention with Better Parallelism and Work Partitioning`_
- `Memory-Efficient Attention`_
- A PyTorch implementation defined in C++ matching the above formulation
The function may call optimized kernels for improved performance when using the CUDA backend.
For all other backends, the PyTorch implementation will be used.
All implementations are enabled by default. Scaled dot product attention attempts to automatically select the
most optimal implementation based on the inputs. In order to provide more fine-grained control over what implementation
is used, the following functions are provided for enabling and disabling implementations.
The context manager is the preferred mechanism:
- :func:`torch.nn.attention.sdpa_kernel`: A context manager used to enable or disable any of the implementations.
- :func:`torch.backends.cuda.enable_flash_sdp`: Globally enables or disables FlashAttention.
- :func:`torch.backends.cuda.enable_mem_efficient_sdp`: Globally enables or disables Memory-Efficient Attention.
- :func:`torch.backends.cuda.enable_math_sdp`: Globally enables or disables the PyTorch C++ implementation.
Each of the fused kernels has specific input limitations. If the user requires the use of a specific fused implementation,
disable the PyTorch C++ implementation using :func:`torch.nn.attention.sdpa_kernel`.
In the event that a fused implementation is not available, a warning will be raised with the
reasons why the fused implementation cannot run.
Due to the nature of fusing floating point operations, the output of this function may be different
depending on what backend kernel is chosen.
The c++ implementation supports torch.float64 and can be used when higher precision is required.
For math backend, all intermediates are kept in torch.float if inputs are in torch.half or torch.bfloat16.
For more information please see :doc:`/notes/numerical_accuracy`
Grouped Query Attention (GQA) is an experimental feature. It currently works only for Flash_attention
and math kernel on CUDA tensor, and does not support Nested tensor.
Constraints for GQA:
- number_of_heads_query % number_of_heads_key_value == 0 and,
- number_of_heads_key == number_of_heads_value
Note:
{cudnn_reproducibility_note}
""".format(
**reproducibility_notes
)
+ r"""
Args:
query (Tensor): Query tensor; shape :math:`(N, ..., Hq, L, E)`.
key (Tensor): Key tensor; shape :math:`(N, ..., H, S, E)`.
value (Tensor): Value tensor; shape :math:`(N, ..., H, S, Ev)`.
attn_mask (optional Tensor): Attention mask; shape must be broadcastable to the shape of attention weights,
which is :math:`(N,..., L, S)`. Two types of masks are supported.
A boolean mask where a value of True indicates that the element *should* take part in attention.
A float mask of the same type as query, key, value that is added to the attention score.
dropout_p (float): Dropout probability; if greater than 0.0, dropout is applied
is_causal (bool): If set to true, the attention masking is a lower triangular matrix when the mask is a
square matrix. The attention masking has the form of the upper left causal bias due to the alignment
(see :class:`torch.nn.attention.bias.CausalBias`) when the mask is a non-square matrix.
An error is thrown if both attn_mask and is_causal are set.
scale (optional float, keyword-only): Scaling factor applied prior to softmax. If None, the default value is set
to :math:`\frac{1}{\sqrt{E}}`.
enable_gqa (bool): If set to True, Grouped Query Attention (GQA) is enabled, by default it is set to False.
Returns:
output (Tensor): Attention output; shape :math:`(N, ..., Hq, L, Ev)`.
Shape legend:
- :math:`N: \text{Batch size} ... : \text{Any number of other batch dimensions (optional)}`
- :math:`S: \text{Source sequence length}`
- :math:`L: \text{Target sequence length}`
- :math:`E: \text{Embedding dimension of the query and key}`
- :math:`Ev: \text{Embedding dimension of the value}`
- :math:`Hq: \text{Number of heads of query}`
- :math:`H: \text{Number of heads of key and value}`
Examples:
>>> # Optionally use the context manager to ensure one of the fused kernels is run
>>> query = torch.rand(32, 8, 128, 64, dtype=torch.float16, device="cuda")
>>> key = torch.rand(32, 8, 128, 64, dtype=torch.float16, device="cuda")
>>> value = torch.rand(32, 8, 128, 64, dtype=torch.float16, device="cuda")
>>> with sdpa_kernel(backends=[SDPBackend.FLASH_ATTENTION]):
>>> F.scaled_dot_product_attention(query,key,value)
>>> # Sample for GQA for llama3
>>> query = torch.rand(32, 32, 128, 64, dtype=torch.float16, device="cuda")
>>> key = torch.rand(32, 8, 128, 64, dtype=torch.float16, device="cuda")
>>> value = torch.rand(32, 8, 128, 64, dtype=torch.float16, device="cuda")
>>> with sdpa_kernel(backends=[SDPBackend.MATH]):
>>> F.scaled_dot_product_attention(query,key,value,enable_gqa=True)
.. _FlashAttention-2\: Faster Attention with Better Parallelism and Work Partitioning:
https://arxiv.org/abs/2307.08691
.. _Memory-Efficient Attention:
https://github.com/facebookresearch/xformers
.. _Grouped-Query Attention:
https://arxiv.org/pdf/2305.13245
""",
)
def _mha_shape_check(
query: Tensor,
key: Tensor,
value: Tensor,
key_padding_mask: Optional[Tensor],
attn_mask: Optional[Tensor],
num_heads: int,
):
# Verifies the expected shape for `query, `key`, `value`, `key_padding_mask` and `attn_mask`
# and returns if the input is batched or not.
# Raises an error if `query` is not 2-D (unbatched) or 3-D (batched) tensor.
# Shape check.
if query.dim() == 3:
# Batched Inputs
is_batched = True
assert key.dim() == 3 and value.dim() == 3, (
"For batched (3-D) `query`, expected `key` and `value` to be 3-D"
f" but found {key.dim()}-D and {value.dim()}-D tensors respectively"
)
if key_padding_mask is not None:
assert key_padding_mask.dim() == 2, (
"For batched (3-D) `query`, expected `key_padding_mask` to be `None` or 2-D"
f" but found {key_padding_mask.dim()}-D tensor instead"
)
if attn_mask is not None:
assert attn_mask.dim() in (2, 3), (
"For batched (3-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D"
f" but found {attn_mask.dim()}-D tensor instead"
)
elif query.dim() == 2:
# Unbatched Inputs
is_batched = False
assert key.dim() == 2 and value.dim() == 2, (
"For unbatched (2-D) `query`, expected `key` and `value` to be 2-D"
f" but found {key.dim()}-D and {value.dim()}-D tensors respectively"
)
if key_padding_mask is not None:
assert key_padding_mask.dim() == 1, (
"For unbatched (2-D) `query`, expected `key_padding_mask` to be `None` or 1-D"
f" but found {key_padding_mask.dim()}-D tensor instead"
)
if attn_mask is not None:
assert attn_mask.dim() in (2, 3), (
"For unbatched (2-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D"
f" but found {attn_mask.dim()}-D tensor instead"
)
if attn_mask.dim() == 3:
expected_shape = (num_heads, query.shape[0], key.shape[0])
assert (
attn_mask.shape == expected_shape
), f"Expected `attn_mask` shape to be {expected_shape} but got {attn_mask.shape}"
else:
raise AssertionError(
f"query should be unbatched 2D or batched 3D tensor but received {query.dim()}-D query tensor"
)
return is_batched
def _canonical_mask(
mask: Optional[Tensor],
mask_name: str,
other_type: Optional[DType],
other_name: str,
target_type: DType,
check_other: bool = True,
) -> Optional[Tensor]:
if mask is not None:
_mask_dtype = mask.dtype
_mask_is_float = torch.is_floating_point(mask)
if _mask_dtype != torch.bool and not _mask_is_float:
raise AssertionError(
f"only bool and floating types of {mask_name} are supported"
)
if check_other and other_type is not None:
if _mask_dtype != other_type:
warnings.warn(
f"Support for mismatched {mask_name} and {other_name} "
"is deprecated. Use same type for both instead."
)
if not _mask_is_float:
mask = torch.zeros_like(mask, dtype=target_type).masked_fill_(
mask, float("-inf")
)
return mask
def _none_or_dtype(input: Optional[Tensor]) -> Optional[DType]:
if input is None:
return None
elif isinstance(input, torch.Tensor):
return input.dtype
raise RuntimeError("input to _none_or_dtype() must be None or torch.Tensor")
def _check_key_padding_mask(
key_padding_mask: torch.Tensor, src_len: int, bsz: int
) -> None:
torch._check_with(
AssertionError,
key_padding_mask.shape[0] == bsz,
lambda: f"Expected key_padded_mask.shape[0] to be {bsz}, but got {key_padding_mask.shape[0]}",
)
torch._check_with(
AssertionError,
key_padding_mask.shape[1] == src_len,
lambda: f"Expected key_padded_mask.shape[1] to be {src_len}, but got {key_padding_mask.shape[1]}",
)
def multi_head_attention_forward(
query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Optional[Tensor],
in_proj_bias: Optional[Tensor],
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Optional[Tensor],
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
average_attn_weights: bool = True,
is_causal: bool = False,
) -> tuple[Tensor, Optional[Tensor]]:
r"""Forward method for MultiHeadAttention.
.. note::
See `this tutorial <https://pytorch.org/tutorials/intermediate/transformer_building_blocks.html>`_
for an in depth discussion of the performant building blocks PyTorch offers for building your own
transformer layers.
See :class:`torch.nn.MultiheadAttention` for details.
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
Default: `True`
Note: `needs_weight` defaults to `True`, but should be set to `False`
For best performance when attention weights are not needed.
*Setting needs_weights to `True`
leads to a significant performance degradation.*
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
is_causal: If specified, applies a causal mask as attention mask, and ignores
attn_mask for computing scaled dot product attention.
Default: ``False``.
.. warning::
is_causal is provides a hint that the attn_mask is the
causal mask.Providing incorrect hints can result in
incorrect execution, including forward and backward
compatibility.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across heads.
Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an effect
when ``need_weights=True.``. Default: True
Shape:
Inputs:
- query: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, E)` or :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, E)` or :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(S)` or :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a FloatTensor is provided, it will be directly added to the value.
If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: Only returned when ``need_weights=True``. If ``average_attn_weights=True``, returns
attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
:math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
:math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
head of shape :math:`(num_heads, L, S)` when input is unbatched or :math:`(N, num_heads, L, S)`.
"""
tens_ops = (
query,
key,
value,
in_proj_weight,
in_proj_bias,
bias_k,
bias_v,
out_proj_weight,
out_proj_bias,
)
if has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward,
tens_ops,
query,
key,
value,
embed_dim_to_check,
num_heads,
in_proj_weight,
in_proj_bias,
bias_k,
bias_v,
add_zero_attn,
dropout_p,
out_proj_weight,
out_proj_bias,
training=training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
is_causal=is_causal,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight,
k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight,
static_k=static_k,
static_v=static_v,
average_attn_weights=average_attn_weights,
)
is_batched = _mha_shape_check(
query, key, value, key_padding_mask, attn_mask, num_heads
)
# For unbatched input, we unsqueeze at the expected batch-dim to pretend that the input
# is batched, run the computation and before returning squeeze the
# batch dimension so that the output doesn't carry this temporary batch dimension.
if not is_batched:
# unsqueeze if the input is unbatched
query = query.unsqueeze(1)
key = key.unsqueeze(1)
value = value.unsqueeze(1)
if key_padding_mask is not None:
key_padding_mask = key_padding_mask.unsqueeze(0)
# set up shape vars
tgt_len, bsz, embed_dim = query.shape
src_len, _, _ = key.shape
key_padding_mask = _canonical_mask(
mask=key_padding_mask,
mask_name="key_padding_mask",
other_type=_none_or_dtype(attn_mask),
other_name="attn_mask",
target_type=query.dtype,
)
if is_causal and attn_mask is None:
raise RuntimeError(
"Need attn_mask if specifying the is_causal hint. "
"You may use the Transformer module method "
"`generate_square_subsequent_mask` to create this mask."
)
if is_causal and key_padding_mask is None and not need_weights:
# when we have a kpm or need weights, we need attn_mask
# Otherwise, we use the is_causal hint go as is_causal
# indicator to SDPA.
attn_mask = None
else:
attn_mask = _canonical_mask(
mask=attn_mask,
mask_name="attn_mask",
other_type=None,
other_name="",
target_type=query.dtype,
check_other=False,
)
if key_padding_mask is not None:
# We have the attn_mask, and use that to merge kpm into it.
# Turn off use of is_causal hint, as the merged mask is no
# longer causal.
is_causal = False
assert (
embed_dim == embed_dim_to_check
), f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
if isinstance(embed_dim, torch.Tensor):
# embed_dim can be a tensor when JIT tracing
head_dim = embed_dim.div(num_heads, rounding_mode="trunc")
else:
head_dim = embed_dim // num_heads
assert (
head_dim * num_heads == embed_dim
), f"embed_dim {embed_dim} not divisible by num_heads {num_heads}"
if use_separate_proj_weight:
# allow MHA to have different embedding dimensions when separate projection weights are used
assert (
key.shape[:2] == value.shape[:2]
), f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
else:
assert (
key.shape == value.shape
), f"key shape {key.shape} does not match value shape {value.shape}"
#
# compute in-projection
#
if not use_separate_proj_weight:
assert (
in_proj_weight is not None
), "use_separate_proj_weight is False but in_proj_weight is None"
q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias)
else:
assert (
q_proj_weight is not None
), "use_separate_proj_weight is True but q_proj_weight is None"
assert (
k_proj_weight is not None
), "use_separate_proj_weight is True but k_proj_weight is None"
assert (
v_proj_weight is not None
), "use_separate_proj_weight is True but v_proj_weight is None"
if in_proj_bias is None:
b_q = b_k = b_v = None
else:
b_q, b_k, b_v = in_proj_bias.chunk(3)
q, k, v = _in_projection(
query,
key,
value,
q_proj_weight,
k_proj_weight,
v_proj_weight,
b_q,
b_k,
b_v,
)
# prep attention mask
if attn_mask is not None:
# ensure attn_mask's dim is 3
if attn_mask.dim() == 2:
correct_2d_size = (tgt_len, src_len)
if attn_mask.shape != correct_2d_size:
raise RuntimeError(
f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}."
)
attn_mask = attn_mask.unsqueeze(0)
elif attn_mask.dim() == 3:
correct_3d_size = (bsz * num_heads, tgt_len, src_len)
if attn_mask.shape != correct_3d_size:
raise RuntimeError(
f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}."
)
else:
raise RuntimeError(
f"attn_mask's dimension {attn_mask.dim()} is not supported"
)
# add bias along batch dimension (currently second)
if bias_k is not None and bias_v is not None:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert bias_k is None
assert bias_v is None
#
# reshape q, k, v for multihead attention and make them batch first
#
q = q.view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is None:
k = k.view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
else:
# TODO finish disentangling control flow so we don't do in-projections when statics are passed
assert (
static_k.size(0) == bsz * num_heads
), f"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}"
assert (
static_k.size(2) == head_dim
), f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}"
k = static_k
if static_v is None:
v = v.view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
else:
# TODO finish disentangling control flow so we don't do in-projections when statics are passed
assert (
static_v.size(0) == bsz * num_heads
), f"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}"
assert (
static_v.size(2) == head_dim
), f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}"
v = static_v
# add zero attention along batch dimension (now first)
if add_zero_attn:
zero_attn_shape = (bsz * num_heads, 1, head_dim)
k = torch.cat(
[k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1
)
v = torch.cat(
[v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1
)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
# update source sequence length after adjustments
src_len = k.size(1)
# merge key padding and attention masks
if key_padding_mask is not None:
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_check_key_padding_mask(key_padding_mask, src_len, bsz)
key_padding_mask = (
key_padding_mask.view(bsz, 1, 1, src_len)
.expand(-1, num_heads, -1, -1)
.reshape(bsz * num_heads, 1, src_len)
)
if attn_mask is None:
attn_mask = key_padding_mask
else:
attn_mask = attn_mask + key_padding_mask
# adjust dropout probability
if not training:
dropout_p = 0.0
#
# (deep breath) calculate attention and out projection
#
if need_weights:
_B, _Nt, E = q.shape
q_scaled = q * math.sqrt(1.0 / float(E))
assert not (
is_causal and attn_mask is None
), "FIXME: is_causal not implemented for need_weights"
if attn_mask is not None:
attn_output_weights = torch.baddbmm(
attn_mask, q_scaled, k.transpose(-2, -1)
)
else:
attn_output_weights = torch.bmm(q_scaled, k.transpose(-2, -1))
attn_output_weights = softmax(attn_output_weights, dim=-1)
if dropout_p > 0.0:
attn_output_weights = dropout(attn_output_weights, p=dropout_p)
attn_output = torch.bmm(attn_output_weights, v)
attn_output = (
attn_output.transpose(0, 1).contiguous().view(tgt_len * bsz, embed_dim)
)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
# optionally average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
if average_attn_weights:
attn_output_weights = attn_output_weights.mean(dim=1)
if not is_batched:
# squeeze the output if input was unbatched
attn_output = attn_output.squeeze(1)
attn_output_weights = attn_output_weights.squeeze(0)
return attn_output, attn_output_weights
else:
# attn_mask can be either (L,S) or (N*num_heads, L, S)
# if attn_mask's shape is (1, L, S) we need to unsqueeze to (1, 1, L, S)
# in order to match the input for SDPA of (N, num_heads, L, S)
if attn_mask is not None:
if attn_mask.size(0) == 1 and attn_mask.dim() == 3:
attn_mask = attn_mask.unsqueeze(0)
else:
attn_mask = attn_mask.view(bsz, num_heads, -1, src_len)
q = q.view(bsz, num_heads, tgt_len, head_dim)
k = k.view(bsz, num_heads, src_len, head_dim)
v = v.view(bsz, num_heads, src_len, head_dim)
attn_output = scaled_dot_product_attention(
q, k, v, attn_mask, dropout_p, is_causal
)
attn_output = (
attn_output.permute(2, 0, 1, 3).contiguous().view(bsz * tgt_len, embed_dim)
)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
if not is_batched:
# squeeze the output if input was unbatched
attn_output = attn_output.squeeze(1)
return attn_output, None
```
|
========================================================================================================
SOURCE CODE FILE: grad.py
LINES: 1
SIZE: 9.97 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\grad.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""Gradient interface."""
import torch
from torch.nn.modules.utils import _pair, _single, _triple
def conv1d_input(
input_size,
weight,
grad_output,
stride=1,
padding=0,
dilation=1,
groups=1,
):
r"""Compute the gradient of conv1d with respect to the input of the convolution.
This is same as the 1D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weight tensor (out_channels x in_channels/groups x kW)
grad_output : output gradient tensor (minibatch x out_channels x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1, 1, 3, requires_grad=True)
>>> weight = torch.randn(1, 1, 1, requires_grad=True)
>>> output = F.conv1d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv1d_input(input.shape, weight, grad_output)
"""
input = grad_output.new_empty(1).expand(input_size)
return torch.ops.aten.convolution_backward(
grad_output,
input,
weight,
None,
_single(stride),
_single(padding),
_single(dilation),
False,
[0],
groups,
(True, False, False),
)[0]
def conv1d_weight(
input,
weight_size,
grad_output,
stride=1,
padding=0,
dilation=1,
groups=1,
):
r"""Compute the gradient of conv1d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1, 1, 3, requires_grad=True)
>>> weight = torch.randn(1, 1, 1, requires_grad=True)
>>> output = F.conv1d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> # xdoctest: +SKIP
>>> grad_weight = torch.autograd.grad(output, filter, grad_output)
>>> F.grad.conv1d_weight(input, weight.shape, grad_output)
"""
weight = grad_output.new_empty(1).expand(weight_size)
return torch.ops.aten.convolution_backward(
grad_output,
input,
weight,
None,
_single(stride),
_single(padding),
_single(dilation),
False,
[0],
groups,
(False, True, False),
)[1]
def conv2d_input(
input_size,
weight,
grad_output,
stride=1,
padding=0,
dilation=1,
groups=1,
):
r"""Compute the gradient of conv2d with respect to the input of the convolution.
This is same as the 2D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weight tensor (out_channels x in_channels/groups x kH x kW)
grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1, 1, 3, 3, requires_grad=True)
>>> weight = torch.randn(1, 1, 1, 2, requires_grad=True)
>>> output = F.conv2d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv2d_input(input.shape, weight, grad_output)
"""
input = grad_output.new_empty(1).expand(input_size)
return torch.ops.aten.convolution_backward(
grad_output,
input,
weight,
None,
_pair(stride),
_pair(padding),
_pair(dilation),
False,
[0],
groups,
(True, False, False),
)[0]
def conv2d_weight(
input,
weight_size,
grad_output,
stride=1,
padding=0,
dilation=1,
groups=1,
):
r"""Compute the gradient of conv2d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iH x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1, 1, 3, 3, requires_grad=True)
>>> weight = torch.randn(1, 1, 1, 2, requires_grad=True)
>>> output = F.conv2d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> # xdoctest: +SKIP
>>> grad_weight = torch.autograd.grad(output, filter, grad_output)
>>> F.grad.conv2d_weight(input, weight.shape, grad_output)
"""
weight = grad_output.new_empty(1).expand(weight_size)
return torch.ops.aten.convolution_backward(
grad_output,
input,
weight,
None,
_pair(stride),
_pair(padding),
_pair(dilation),
False,
[0],
groups,
(False, True, False),
)[1]
def conv3d_input(
input_size,
weight,
grad_output,
stride=1,
padding=0,
dilation=1,
groups=1,
):
r"""Compute the gradient of conv3d with respect to the input of the convolution.
This is same as the 3D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weights tensor (out_channels x in_channels/groups x kT x kH x kW)
grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
>>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
>>> output = F.conv3d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv3d_input(input.shape, weight, grad_output)
"""
input = grad_output.new_empty(1).expand(input_size)
return torch.ops.aten.convolution_backward(
grad_output,
input,
weight,
None,
_triple(stride),
_triple(padding),
_triple(dilation),
False,
[0],
groups,
(True, False, False),
)[0]
def conv3d_weight(
input,
weight_size,
grad_output,
stride=1,
padding=0,
dilation=1,
groups=1,
):
r"""Compute the gradient of conv3d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iT x iH x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
>>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
>>> output = F.conv3d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_weight = torch.autograd.grad(output, weight, grad_output)
>>> F.grad.conv3d_weight(input, weight.shape, grad_output)
"""
weight = grad_output.new_empty(1).expand(weight_size)
return torch.ops.aten.convolution_backward(
grad_output,
input,
weight,
None,
_triple(stride),
_triple(padding),
_triple(dilation),
False,
[0],
groups,
(False, True, False),
)[1]
```
|
========================================================================================================
SOURCE CODE FILE: init.py
LINES: 1
SIZE: 24.28 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\init.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""This file contains utilities for initializing neural network parameters."""
import math
import warnings
from typing import Optional as _Optional
import torch
from torch import Tensor
# These no_grad_* functions are necessary as wrappers around the parts of these
# functions that use `with torch.no_grad()`. The JIT doesn't support context
# managers, so these need to be implemented as builtins. Using these wrappers
# lets us keep those builtins small and re-usable.
def _no_grad_uniform_(tensor, a, b, generator=None):
with torch.no_grad():
return tensor.uniform_(a, b, generator=generator)
def _no_grad_normal_(tensor, mean, std, generator=None):
with torch.no_grad():
return tensor.normal_(mean, std, generator=generator)
def _no_grad_trunc_normal_(tensor, mean, std, a, b, generator=None):
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2,
)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1, generator=generator)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def _no_grad_fill_(tensor, val):
with torch.no_grad():
return tensor.fill_(val)
def _no_grad_zero_(tensor):
with torch.no_grad():
return tensor.zero_()
def calculate_gain(nonlinearity, param=None):
r"""Return the recommended gain value for the given nonlinearity function.
The values are as follows:
================= ====================================================
nonlinearity gain
================= ====================================================
Linear / Identity :math:`1`
Conv{1,2,3}D :math:`1`
Sigmoid :math:`1`
Tanh :math:`\frac{5}{3}`
ReLU :math:`\sqrt{2}`
Leaky Relu :math:`\sqrt{\frac{2}{1 + \text{negative\_slope}^2}}`
SELU :math:`\frac{3}{4}`
================= ====================================================
.. warning::
In order to implement `Self-Normalizing Neural Networks`_ ,
you should use ``nonlinearity='linear'`` instead of ``nonlinearity='selu'``.
This gives the initial weights a variance of ``1 / N``,
which is necessary to induce a stable fixed point in the forward pass.
In contrast, the default gain for ``SELU`` sacrifices the normalization
effect for more stable gradient flow in rectangular layers.
Args:
nonlinearity: the non-linear function (`nn.functional` name)
param: optional parameter for the non-linear function
Examples:
>>> gain = nn.init.calculate_gain('leaky_relu', 0.2) # leaky_relu with negative_slope=0.2
.. _Self-Normalizing Neural Networks: https://papers.nips.cc/paper/2017/hash/5d44ee6f2c3f71b73125876103c8f6c4-Abstract.html
"""
linear_fns = [
"linear",
"conv1d",
"conv2d",
"conv3d",
"conv_transpose1d",
"conv_transpose2d",
"conv_transpose3d",
]
if nonlinearity in linear_fns or nonlinearity == "sigmoid":
return 1
elif nonlinearity == "tanh":
return 5.0 / 3
elif nonlinearity == "relu":
return math.sqrt(2.0)
elif nonlinearity == "leaky_relu":
if param is None:
negative_slope = 0.01
elif (
not isinstance(param, bool)
and isinstance(param, int)
or isinstance(param, float)
):
# True/False are instances of int, hence check above
negative_slope = param
else:
raise ValueError(f"negative_slope {param} not a valid number")
return math.sqrt(2.0 / (1 + negative_slope**2))
elif nonlinearity == "selu":
return (
3.0 / 4
) # Value found empirically (https://github.com/pytorch/pytorch/pull/50664)
else:
raise ValueError(f"Unsupported nonlinearity {nonlinearity}")
def uniform_(
tensor: Tensor,
a: float = 0.0,
b: float = 1.0,
generator: _Optional[torch.Generator] = None,
) -> Tensor:
r"""Fill the input Tensor with values drawn from the uniform distribution.
:math:`\mathcal{U}(a, b)`.
Args:
tensor: an n-dimensional `torch.Tensor`
a: the lower bound of the uniform distribution
b: the upper bound of the uniform distribution
generator: the torch Generator to sample from (default: None)
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.uniform_(w)
"""
if torch.overrides.has_torch_function_variadic(tensor):
return torch.overrides.handle_torch_function(
uniform_, (tensor,), tensor=tensor, a=a, b=b, generator=generator
)
return _no_grad_uniform_(tensor, a, b, generator)
def normal_(
tensor: Tensor,
mean: float = 0.0,
std: float = 1.0,
generator: _Optional[torch.Generator] = None,
) -> Tensor:
r"""Fill the input Tensor with values drawn from the normal distribution.
:math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
generator: the torch Generator to sample from (default: None)
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.normal_(w)
"""
if torch.overrides.has_torch_function_variadic(tensor):
return torch.overrides.handle_torch_function(
normal_, (tensor,), tensor=tensor, mean=mean, std=std, generator=generator
)
return _no_grad_normal_(tensor, mean, std, generator)
def trunc_normal_(
tensor: Tensor,
mean: float = 0.0,
std: float = 1.0,
a: float = -2.0,
b: float = 2.0,
generator: _Optional[torch.Generator] = None,
) -> Tensor:
r"""Fill the input Tensor with values drawn from a truncated normal distribution.
The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
generator: the torch Generator to sample from (default: None)
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b, generator=generator)
def constant_(tensor: Tensor, val: float) -> Tensor:
r"""Fill the input Tensor with the value :math:`\text{val}`.
Args:
tensor: an n-dimensional `torch.Tensor`
val: the value to fill the tensor with
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.constant_(w, 0.3)
"""
if torch.overrides.has_torch_function_variadic(tensor):
return torch.overrides.handle_torch_function(
constant_, (tensor,), tensor=tensor, val=val
)
return _no_grad_fill_(tensor, val)
def ones_(tensor: Tensor) -> Tensor:
r"""Fill the input Tensor with the scalar value `1`.
Args:
tensor: an n-dimensional `torch.Tensor`
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.ones_(w)
"""
return _no_grad_fill_(tensor, 1.0)
def zeros_(tensor: Tensor) -> Tensor:
r"""Fill the input Tensor with the scalar value `0`.
Args:
tensor: an n-dimensional `torch.Tensor`
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.zeros_(w)
"""
return _no_grad_zero_(tensor)
def eye_(tensor):
r"""Fill the 2-dimensional input `Tensor` with the identity matrix.
Preserves the identity of the inputs in `Linear` layers, where as
many inputs are preserved as possible.
Args:
tensor: a 2-dimensional `torch.Tensor`
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.eye_(w)
"""
if tensor.ndimension() != 2:
raise ValueError("Only tensors with 2 dimensions are supported")
with torch.no_grad():
torch.eye(*tensor.shape, out=tensor, requires_grad=tensor.requires_grad)
return tensor
def dirac_(tensor, groups=1):
r"""Fill the {3, 4, 5}-dimensional input `Tensor` with the Dirac delta function.
Preserves the identity of the inputs in `Convolutional`
layers, where as many input channels are preserved as possible. In case
of groups>1, each group of channels preserves identity
Args:
tensor: a {3, 4, 5}-dimensional `torch.Tensor`
groups (int, optional): number of groups in the conv layer (default: 1)
Examples:
>>> w = torch.empty(3, 16, 5, 5)
>>> nn.init.dirac_(w)
>>> w = torch.empty(3, 24, 5, 5)
>>> nn.init.dirac_(w, 3)
"""
dimensions = tensor.ndimension()
if dimensions not in [3, 4, 5]:
raise ValueError("Only tensors with 3, 4, or 5 dimensions are supported")
sizes = tensor.size()
if sizes[0] % groups != 0:
raise ValueError("dim 0 must be divisible by groups")
out_chans_per_grp = sizes[0] // groups
min_dim = min(out_chans_per_grp, sizes[1])
with torch.no_grad():
tensor.zero_()
for g in range(groups):
for d in range(min_dim):
if dimensions == 3: # Temporal convolution
tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2] = 1
elif dimensions == 4: # Spatial convolution
tensor[
g * out_chans_per_grp + d,
d,
tensor.size(2) // 2,
tensor.size(3) // 2,
] = 1
else: # Volumetric convolution
tensor[
g * out_chans_per_grp + d,
d,
tensor.size(2) // 2,
tensor.size(3) // 2,
tensor.size(4) // 2,
] = 1
return tensor
def _calculate_fan_in_and_fan_out(tensor):
dimensions = tensor.dim()
if dimensions < 2:
raise ValueError(
"Fan in and fan out can not be computed for tensor with fewer than 2 dimensions"
)
num_input_fmaps = tensor.size(1)
num_output_fmaps = tensor.size(0)
receptive_field_size = 1
if tensor.dim() > 2:
# math.prod is not always available, accumulate the product manually
# we could use functools.reduce but that is not supported by TorchScript
for s in tensor.shape[2:]:
receptive_field_size *= s
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
def xavier_uniform_(
tensor: Tensor,
gain: float = 1.0,
generator: _Optional[torch.Generator] = None,
) -> Tensor:
r"""Fill the input `Tensor` with values using a Xavier uniform distribution.
The method is described in `Understanding the difficulty of training
deep feedforward neural networks` - Glorot, X. & Bengio, Y. (2010).
The resulting tensor will have values sampled from
:math:`\mathcal{U}(-a, a)` where
.. math::
a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + \text{fan\_out}}}
Also known as Glorot initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
gain: an optional scaling factor
generator: the torch Generator to sample from (default: None)
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('relu'))
Note:
Be aware that ``fan_in`` and ``fan_out`` are calculated assuming
that the weight matrix is used in a transposed manner,
(i.e., ``x @ w.T`` in ``Linear`` layers, where ``w.shape = [fan_out, fan_in]``).
This is important for correct initialization.
If you plan to use ``x @ w``, where ``w.shape = [fan_in, fan_out]``,
pass in a transposed weight matrix, i.e. ``nn.init.xavier_uniform_(w.T, ...)``.
"""
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
return _no_grad_uniform_(tensor, -a, a, generator)
def xavier_normal_(
tensor: Tensor,
gain: float = 1.0,
generator: _Optional[torch.Generator] = None,
) -> Tensor:
r"""Fill the input `Tensor` with values using a Xavier normal distribution.
The method is described in `Understanding the difficulty of training deep feedforward
neural networks` - Glorot, X. & Bengio, Y. (2010). The resulting tensor
will have values sampled from :math:`\mathcal{N}(0, \text{std}^2)` where
.. math::
\text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan\_in} + \text{fan\_out}}}
Also known as Glorot initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
gain: an optional scaling factor
generator: the torch Generator to sample from (default: None)
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.xavier_normal_(w)
Note:
Be aware that ``fan_in`` and ``fan_out`` are calculated assuming
that the weight matrix is used in a transposed manner,
(i.e., ``x @ w.T`` in ``Linear`` layers, where ``w.shape = [fan_out, fan_in]``).
This is important for correct initialization.
If you plan to use ``x @ w``, where ``w.shape = [fan_in, fan_out]``,
pass in a transposed weight matrix, i.e. ``nn.init.xavier_normal_(w.T, ...)``.
"""
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
return _no_grad_normal_(tensor, 0.0, std, generator)
def _calculate_correct_fan(tensor, mode):
mode = mode.lower()
valid_modes = ["fan_in", "fan_out"]
if mode not in valid_modes:
raise ValueError(f"Mode {mode} not supported, please use one of {valid_modes}")
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
return fan_in if mode == "fan_in" else fan_out
def kaiming_uniform_(
tensor: Tensor,
a: float = 0,
mode: str = "fan_in",
nonlinearity: str = "leaky_relu",
generator: _Optional[torch.Generator] = None,
):
r"""Fill the input `Tensor` with values using a Kaiming uniform distribution.
The method is described in `Delving deep into rectifiers: Surpassing
human-level performance on ImageNet classification` - He, K. et al. (2015).
The resulting tensor will have values sampled from
:math:`\mathcal{U}(-\text{bound}, \text{bound})` where
.. math::
\text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}}
Also known as He initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
a: the negative slope of the rectifier used after this layer (only
used with ``'leaky_relu'``)
mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
backwards pass.
nonlinearity: the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
generator: the torch Generator to sample from (default: None)
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.kaiming_uniform_(w, mode='fan_in', nonlinearity='relu')
Note:
Be aware that ``fan_in`` and ``fan_out`` are calculated assuming
that the weight matrix is used in a transposed manner,
(i.e., ``x @ w.T`` in ``Linear`` layers, where ``w.shape = [fan_out, fan_in]``).
This is important for correct initialization.
If you plan to use ``x @ w``, where ``w.shape = [fan_in, fan_out]``,
pass in a transposed weight matrix, i.e. ``nn.init.kaiming_uniform_(w.T, ...)``.
"""
if torch.overrides.has_torch_function_variadic(tensor):
return torch.overrides.handle_torch_function(
kaiming_uniform_,
(tensor,),
tensor=tensor,
a=a,
mode=mode,
nonlinearity=nonlinearity,
generator=generator,
)
if 0 in tensor.shape:
warnings.warn("Initializing zero-element tensors is a no-op")
return tensor
fan = _calculate_correct_fan(tensor, mode)
gain = calculate_gain(nonlinearity, a)
std = gain / math.sqrt(fan)
bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
with torch.no_grad():
return tensor.uniform_(-bound, bound, generator=generator)
def kaiming_normal_(
tensor: Tensor,
a: float = 0,
mode: str = "fan_in",
nonlinearity: str = "leaky_relu",
generator: _Optional[torch.Generator] = None,
):
r"""Fill the input `Tensor` with values using a Kaiming normal distribution.
The method is described in `Delving deep into rectifiers: Surpassing
human-level performance on ImageNet classification` - He, K. et al. (2015).
The resulting tensor will have values sampled from
:math:`\mathcal{N}(0, \text{std}^2)` where
.. math::
\text{std} = \frac{\text{gain}}{\sqrt{\text{fan\_mode}}}
Also known as He initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
a: the negative slope of the rectifier used after this layer (only
used with ``'leaky_relu'``)
mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
backwards pass.
nonlinearity: the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
generator: the torch Generator to sample from (default: None)
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.kaiming_normal_(w, mode='fan_out', nonlinearity='relu')
Note:
Be aware that ``fan_in`` and ``fan_out`` are calculated assuming
that the weight matrix is used in a transposed manner,
(i.e., ``x @ w.T`` in ``Linear`` layers, where ``w.shape = [fan_out, fan_in]``).
This is important for correct initialization.
If you plan to use ``x @ w``, where ``w.shape = [fan_in, fan_out]``,
pass in a transposed weight matrix, i.e. ``nn.init.kaiming_normal_(w.T, ...)``.
"""
if 0 in tensor.shape:
warnings.warn("Initializing zero-element tensors is a no-op")
return tensor
fan = _calculate_correct_fan(tensor, mode)
gain = calculate_gain(nonlinearity, a)
std = gain / math.sqrt(fan)
with torch.no_grad():
return tensor.normal_(0, std, generator=generator)
def orthogonal_(
tensor,
gain=1,
generator: _Optional[torch.Generator] = None,
):
r"""Fill the input `Tensor` with a (semi) orthogonal matrix.
Described in `Exact solutions to the nonlinear dynamics of learning in deep
linear neural networks` - Saxe, A. et al. (2013). The input tensor must have
at least 2 dimensions, and for tensors with more than 2 dimensions the
trailing dimensions are flattened.
Args:
tensor: an n-dimensional `torch.Tensor`, where :math:`n \geq 2`
gain: optional scaling factor
generator: the torch Generator to sample from (default: None)
Examples:
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK)
>>> w = torch.empty(3, 5)
>>> nn.init.orthogonal_(w)
"""
if tensor.ndimension() < 2:
raise ValueError("Only tensors with 2 or more dimensions are supported")
if tensor.numel() == 0:
# no-op
return tensor
rows = tensor.size(0)
cols = tensor.numel() // rows
flattened = tensor.new_empty((rows, cols)).normal_(0, 1, generator=generator)
if rows < cols:
flattened.t_()
# Compute the qr factorization
q, r = torch.linalg.qr(flattened)
# Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf
d = torch.diag(r, 0)
ph = d.sign()
q *= ph
if rows < cols:
q.t_()
with torch.no_grad():
tensor.view_as(q).copy_(q)
tensor.mul_(gain)
return tensor
def sparse_(
tensor,
sparsity,
std=0.01,
generator: _Optional[torch.Generator] = None,
):
r"""Fill the 2D input `Tensor` as a sparse matrix.
The non-zero elements will be drawn from the normal distribution
:math:`\mathcal{N}(0, 0.01)`, as described in `Deep learning via
Hessian-free optimization` - Martens, J. (2010).
Args:
tensor: an n-dimensional `torch.Tensor`
sparsity: The fraction of elements in each column to be set to zero
std: the standard deviation of the normal distribution used to generate
the non-zero values
generator: the torch Generator to sample from (default: None)
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.sparse_(w, sparsity=0.1)
"""
if tensor.ndimension() != 2:
raise ValueError("Only tensors with 2 dimensions are supported")
rows, cols = tensor.shape
num_zeros = int(math.ceil(sparsity * rows))
with torch.no_grad():
tensor.normal_(0, std, generator=generator)
for col_idx in range(cols):
row_indices = torch.randperm(rows)
zero_indices = row_indices[:num_zeros]
tensor[zero_indices, col_idx] = 0
return tensor
# for backward compatibility
def _make_deprecate(meth):
new_name = meth.__name__
old_name = new_name[:-1]
def deprecated_init(*args, **kwargs):
warnings.warn(
f"`nn.init.{old_name}` is now deprecated in favor of `nn.init.{new_name}`.",
FutureWarning,
stacklevel=2,
)
return meth(*args, **kwargs)
deprecated_init.__doc__ = rf"""
{old_name}(...)
.. warning::
This method is now deprecated in favor of :func:`torch.nn.init.{new_name}`.
See :func:`~torch.nn.init.{new_name}` for details."""
deprecated_init.__name__ = old_name
return deprecated_init
uniform = _make_deprecate(uniform_)
normal = _make_deprecate(normal_)
constant = _make_deprecate(constant_)
eye = _make_deprecate(eye_)
dirac = _make_deprecate(dirac_)
xavier_uniform = _make_deprecate(xavier_uniform_)
xavier_normal = _make_deprecate(xavier_normal_)
kaiming_uniform = _make_deprecate(kaiming_uniform_)
kaiming_normal = _make_deprecate(kaiming_normal_)
orthogonal = _make_deprecate(orthogonal_)
sparse = _make_deprecate(sparse_)
```
|
======================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.72 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\intrinsic\__init__.py
ENCODING: utf-8
```py
from torch.ao.nn.intrinsic import (
BNReLU2d,
BNReLU3d,
ConvBn1d,
ConvBn2d,
ConvBn3d,
ConvBnReLU1d,
ConvBnReLU2d,
ConvBnReLU3d,
ConvReLU1d,
ConvReLU2d,
ConvReLU3d,
LinearBn1d,
LinearReLU,
)
from torch.ao.nn.intrinsic.modules.fused import _FusedModule # noqa: F401
# Include the subpackages in case user imports from it directly
from torch.nn.intrinsic import modules, qat, quantized # noqa: F401
__all__ = [
"ConvBn1d",
"ConvBn2d",
"ConvBn3d",
"ConvBnReLU1d",
"ConvBnReLU2d",
"ConvBnReLU3d",
"ConvReLU1d",
"ConvReLU2d",
"ConvReLU3d",
"LinearReLU",
"BNReLU2d",
"BNReLU3d",
"LinearBn1d",
]
```
|
==============================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.54 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\intrinsic\modules\__init__.py
ENCODING: utf-8
```py
from torch.nn.intrinsic.modules.fused import (
_FusedModule,
BNReLU2d,
BNReLU3d,
ConvBn1d,
ConvBn2d,
ConvBn3d,
ConvBnReLU1d,
ConvBnReLU2d,
ConvBnReLU3d,
ConvReLU1d,
ConvReLU2d,
ConvReLU3d,
LinearBn1d,
LinearReLU,
)
__all__ = [
"BNReLU2d",
"BNReLU3d",
"ConvBn1d",
"ConvBn2d",
"ConvBn3d",
"ConvBnReLU1d",
"ConvBnReLU2d",
"ConvBnReLU3d",
"ConvReLU1d",
"ConvReLU2d",
"ConvReLU3d",
"LinearBn1d",
"LinearReLU",
]
```
|
===========================================================================================================================
SOURCE CODE FILE: fused.py
LINES: 1
SIZE: 0.58 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\intrinsic\modules\fused.py
ENCODING: utf-8
```py
from torch.ao.nn.intrinsic import (
BNReLU2d,
BNReLU3d,
ConvBn1d,
ConvBn2d,
ConvBn3d,
ConvBnReLU1d,
ConvBnReLU2d,
ConvBnReLU3d,
ConvReLU1d,
ConvReLU2d,
ConvReLU3d,
LinearBn1d,
LinearReLU,
)
from torch.ao.nn.intrinsic.modules.fused import _FusedModule # noqa: F401
__all__ = [
"BNReLU2d",
"BNReLU3d",
"ConvBn1d",
"ConvBn2d",
"ConvBn3d",
"ConvBnReLU1d",
"ConvBnReLU2d",
"ConvBnReLU3d",
"ConvReLU1d",
"ConvReLU2d",
"ConvReLU3d",
"LinearBn1d",
"LinearReLU",
]
```
|
==========================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.06 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\intrinsic\qat\__init__.py
ENCODING: utf-8
```py
from torch.nn.intrinsic.qat.modules import * # noqa: F403
```
|
==================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.65 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\intrinsic\qat\modules\__init__.py
ENCODING: utf-8
```py
from torch.nn.intrinsic.qat.modules.conv_fused import (
ConvBn1d,
ConvBn2d,
ConvBn3d,
ConvBnReLU1d,
ConvBnReLU2d,
ConvBnReLU3d,
ConvReLU1d,
ConvReLU2d,
ConvReLU3d,
freeze_bn_stats,
update_bn_stats,
)
from torch.nn.intrinsic.qat.modules.linear_fused import LinearBn1d
from torch.nn.intrinsic.qat.modules.linear_relu import LinearReLU
__all__ = [
"LinearReLU",
"LinearBn1d",
"ConvReLU1d",
"ConvReLU2d",
"ConvReLU3d",
"ConvBn1d",
"ConvBn2d",
"ConvBn3d",
"ConvBnReLU1d",
"ConvBnReLU2d",
"ConvBnReLU3d",
"update_bn_stats",
"freeze_bn_stats",
]
```
|
====================================================================================================================================
SOURCE CODE FILE: conv_fused.py
LINES: 1
SIZE: 0.87 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\intrinsic\qat\modules\conv_fused.py
ENCODING: utf-8
```py
# flake8: noqa: F401
r"""Intrinsic QAT Modules.
This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate file under the `torch/ao/nn/intrinsic/qat/modules`,
while adding an import statement here.
"""
from torch.ao.nn.intrinsic.qat import (
ConvBn1d,
ConvBn2d,
ConvBn3d,
ConvBnReLU1d,
ConvBnReLU2d,
ConvBnReLU3d,
ConvReLU1d,
ConvReLU2d,
ConvReLU3d,
freeze_bn_stats,
update_bn_stats,
)
__all__ = [
# Modules
"ConvBn1d",
"ConvBnReLU1d",
"ConvReLU1d",
"ConvBn2d",
"ConvBnReLU2d",
"ConvReLU2d",
"ConvBn3d",
"ConvBnReLU3d",
"ConvReLU3d",
# Utilities
"freeze_bn_stats",
"update_bn_stats",
]
```
|
======================================================================================================================================
SOURCE CODE FILE: linear_fused.py
LINES: 1
SIZE: 0.46 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\intrinsic\qat\modules\linear_fused.py
ENCODING: utf-8
```py
# flake8: noqa: F401
r"""Intrinsic QAT Modules.
This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate file under the `torch/ao/nn/intrinsic/qat/modules`,
while adding an import statement here.
"""
from torch.ao.nn.intrinsic.qat import LinearBn1d
__all__ = [
"LinearBn1d",
]
```
|
=====================================================================================================================================
SOURCE CODE FILE: linear_relu.py
LINES: 1
SIZE: 0.46 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\intrinsic\qat\modules\linear_relu.py
ENCODING: utf-8
```py
# flake8: noqa: F401
r"""Intrinsic QAT Modules.
This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate file under the `torch/ao/nn/intrinsic/qat/modules`,
while adding an import statement here.
"""
from torch.ao.nn.intrinsic.qat import LinearReLU
__all__ = [
"LinearReLU",
]
```
|
================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.34 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\intrinsic\quantized\__init__.py
ENCODING: utf-8
```py
# to ensure customers can use the module below
# without importing it directly
from torch.nn.intrinsic.quantized import dynamic, modules # noqa: F401
from torch.nn.intrinsic.quantized.modules import * # noqa: F403
__all__ = [
"BNReLU2d",
"BNReLU3d",
"ConvReLU1d",
"ConvReLU2d",
"ConvReLU3d",
"LinearReLU",
]
```
|
========================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.07 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\intrinsic\quantized\dynamic\__init__.py
ENCODING: utf-8
```py
from torch.nn.intrinsic.quantized.dynamic.modules import * # noqa: F403
```
|
================================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.12 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\intrinsic\quantized\dynamic\modules\__init__.py
ENCODING: utf-8
```py
from torch.nn.intrinsic.quantized.dynamic.modules.linear_relu import LinearReLU
__all__ = [
"LinearReLU",
]
```
|
===================================================================================================================================================
SOURCE CODE FILE: linear_relu.py
LINES: 1
SIZE: 0.10 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\intrinsic\quantized\dynamic\modules\linear_relu.py
ENCODING: utf-8
```py
from torch.ao.nn.intrinsic.quantized.dynamic import LinearReLU
__all__ = [
"LinearReLU",
]
```
|
========================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.39 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\intrinsic\quantized\modules\__init__.py
ENCODING: utf-8
```py
from torch.nn.intrinsic.quantized.modules.bn_relu import BNReLU2d, BNReLU3d
from torch.nn.intrinsic.quantized.modules.conv_relu import (
ConvReLU1d,
ConvReLU2d,
ConvReLU3d,
)
from torch.nn.intrinsic.quantized.modules.linear_relu import LinearReLU
__all__ = [
"LinearReLU",
"ConvReLU1d",
"ConvReLU2d",
"ConvReLU3d",
"BNReLU2d",
"BNReLU3d",
]
```
|
=======================================================================================================================================
SOURCE CODE FILE: bn_relu.py
LINES: 1
SIZE: 0.12 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\intrinsic\quantized\modules\bn_relu.py
ENCODING: utf-8
```py
from torch.ao.nn.intrinsic.quantized import BNReLU2d, BNReLU3d
__all__ = [
"BNReLU2d",
"BNReLU3d",
]
```
|
=========================================================================================================================================
SOURCE CODE FILE: conv_relu.py
LINES: 1
SIZE: 0.15 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\intrinsic\quantized\modules\conv_relu.py
ENCODING: utf-8
```py
from torch.ao.nn.intrinsic.quantized import ConvReLU1d, ConvReLU2d, ConvReLU3d
__all__ = [
"ConvReLU1d",
"ConvReLU2d",
"ConvReLU3d",
]
```
|
===========================================================================================================================================
SOURCE CODE FILE: linear_relu.py
LINES: 1
SIZE: 0.09 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\intrinsic\quantized\modules\linear_relu.py
ENCODING: utf-8
```py
from torch.ao.nn.intrinsic.quantized import LinearReLU
__all__ = [
"LinearReLU",
]
```
|
====================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 6.67 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\__init__.py
ENCODING: utf-8
```py
from .module import Module # usort: skip
from .linear import Bilinear, Identity, LazyLinear, Linear # usort: skip
from .activation import (
CELU,
ELU,
GELU,
GLU,
Hardshrink,
Hardsigmoid,
Hardswish,
Hardtanh,
LeakyReLU,
LogSigmoid,
LogSoftmax,
Mish,
MultiheadAttention,
PReLU,
ReLU,
ReLU6,
RReLU,
SELU,
Sigmoid,
SiLU,
Softmax,
Softmax2d,
Softmin,
Softplus,
Softshrink,
Softsign,
Tanh,
Tanhshrink,
Threshold,
)
from .adaptive import AdaptiveLogSoftmaxWithLoss
from .batchnorm import (
BatchNorm1d,
BatchNorm2d,
BatchNorm3d,
LazyBatchNorm1d,
LazyBatchNorm2d,
LazyBatchNorm3d,
SyncBatchNorm,
)
from .channelshuffle import ChannelShuffle
from .container import (
Container,
ModuleDict,
ModuleList,
ParameterDict,
ParameterList,
Sequential,
)
from .conv import (
Conv1d,
Conv2d,
Conv3d,
ConvTranspose1d,
ConvTranspose2d,
ConvTranspose3d,
LazyConv1d,
LazyConv2d,
LazyConv3d,
LazyConvTranspose1d,
LazyConvTranspose2d,
LazyConvTranspose3d,
)
from .distance import CosineSimilarity, PairwiseDistance
from .dropout import (
AlphaDropout,
Dropout,
Dropout1d,
Dropout2d,
Dropout3d,
FeatureAlphaDropout,
)
from .flatten import Flatten, Unflatten
from .fold import Fold, Unfold
from .instancenorm import (
InstanceNorm1d,
InstanceNorm2d,
InstanceNorm3d,
LazyInstanceNorm1d,
LazyInstanceNorm2d,
LazyInstanceNorm3d,
)
from .loss import (
BCELoss,
BCEWithLogitsLoss,
CosineEmbeddingLoss,
CrossEntropyLoss,
CTCLoss,
GaussianNLLLoss,
HingeEmbeddingLoss,
HuberLoss,
KLDivLoss,
L1Loss,
MarginRankingLoss,
MSELoss,
MultiLabelMarginLoss,
MultiLabelSoftMarginLoss,
MultiMarginLoss,
NLLLoss,
NLLLoss2d,
PoissonNLLLoss,
SmoothL1Loss,
SoftMarginLoss,
TripletMarginLoss,
TripletMarginWithDistanceLoss,
)
from .normalization import (
CrossMapLRN2d,
GroupNorm,
LayerNorm,
LocalResponseNorm,
RMSNorm,
)
from .padding import (
CircularPad1d,
CircularPad2d,
CircularPad3d,
ConstantPad1d,
ConstantPad2d,
ConstantPad3d,
ReflectionPad1d,
ReflectionPad2d,
ReflectionPad3d,
ReplicationPad1d,
ReplicationPad2d,
ReplicationPad3d,
ZeroPad1d,
ZeroPad2d,
ZeroPad3d,
)
from .pixelshuffle import PixelShuffle, PixelUnshuffle
from .pooling import (
AdaptiveAvgPool1d,
AdaptiveAvgPool2d,
AdaptiveAvgPool3d,
AdaptiveMaxPool1d,
AdaptiveMaxPool2d,
AdaptiveMaxPool3d,
AvgPool1d,
AvgPool2d,
AvgPool3d,
FractionalMaxPool2d,
FractionalMaxPool3d,
LPPool1d,
LPPool2d,
LPPool3d,
MaxPool1d,
MaxPool2d,
MaxPool3d,
MaxUnpool1d,
MaxUnpool2d,
MaxUnpool3d,
)
from .rnn import GRU, GRUCell, LSTM, LSTMCell, RNN, RNNBase, RNNCell, RNNCellBase
from .sparse import Embedding, EmbeddingBag
from .transformer import (
Transformer,
TransformerDecoder,
TransformerDecoderLayer,
TransformerEncoder,
TransformerEncoderLayer,
)
from .upsampling import Upsample, UpsamplingBilinear2d, UpsamplingNearest2d
__all__ = [
"AdaptiveAvgPool1d",
"AdaptiveAvgPool2d",
"AdaptiveAvgPool3d",
"AdaptiveLogSoftmaxWithLoss",
"AdaptiveMaxPool1d",
"AdaptiveMaxPool2d",
"AdaptiveMaxPool3d",
"AlphaDropout",
"AvgPool1d",
"AvgPool2d",
"AvgPool3d",
"BCELoss",
"BCEWithLogitsLoss",
"BatchNorm1d",
"BatchNorm2d",
"BatchNorm3d",
"Bilinear",
"CELU",
"CTCLoss",
"ChannelShuffle",
"CircularPad1d",
"CircularPad2d",
"CircularPad3d",
"ConstantPad1d",
"ConstantPad2d",
"ConstantPad3d",
"Container",
"Conv1d",
"Conv2d",
"Conv3d",
"ConvTranspose1d",
"ConvTranspose2d",
"ConvTranspose3d",
"CosineEmbeddingLoss",
"CosineSimilarity",
"CrossEntropyLoss",
"CrossMapLRN2d",
"Dropout",
"Dropout1d",
"Dropout2d",
"Dropout3d",
"ELU",
"Embedding",
"EmbeddingBag",
"FeatureAlphaDropout",
"Flatten",
"Fold",
"FractionalMaxPool2d",
"FractionalMaxPool3d",
"GELU",
"GLU",
"GRU",
"GRUCell",
"GaussianNLLLoss",
"GroupNorm",
"Hardshrink",
"Hardsigmoid",
"Hardswish",
"Hardtanh",
"HingeEmbeddingLoss",
"HuberLoss",
"Identity",
"InstanceNorm1d",
"InstanceNorm2d",
"InstanceNorm3d",
"KLDivLoss",
"L1Loss",
"LPPool1d",
"LPPool2d",
"LPPool3d",
"LSTM",
"LSTMCell",
"LayerNorm",
"LazyBatchNorm1d",
"LazyBatchNorm2d",
"LazyBatchNorm3d",
"LazyConv1d",
"LazyConv2d",
"LazyConv3d",
"LazyConvTranspose1d",
"LazyConvTranspose2d",
"LazyConvTranspose3d",
"LazyInstanceNorm1d",
"LazyInstanceNorm2d",
"LazyInstanceNorm3d",
"LazyLinear",
"LeakyReLU",
"Linear",
"LocalResponseNorm",
"LogSigmoid",
"LogSoftmax",
"MSELoss",
"MarginRankingLoss",
"MaxPool1d",
"MaxPool2d",
"MaxPool3d",
"MaxUnpool1d",
"MaxUnpool2d",
"MaxUnpool3d",
"Mish",
"Module",
"ModuleDict",
"ModuleList",
"MultiLabelMarginLoss",
"MultiLabelSoftMarginLoss",
"MultiMarginLoss",
"MultiheadAttention",
"NLLLoss",
"NLLLoss2d",
"PReLU",
"PairwiseDistance",
"ParameterDict",
"ParameterList",
"PixelShuffle",
"PixelUnshuffle",
"PoissonNLLLoss",
"RMSNorm",
"RNN",
"RNNBase",
"RNNCell",
"RNNCellBase",
"RReLU",
"ReLU",
"ReLU6",
"ReflectionPad1d",
"ReflectionPad2d",
"ReflectionPad3d",
"ReplicationPad1d",
"ReplicationPad2d",
"ReplicationPad3d",
"SELU",
"Sequential",
"SiLU",
"Sigmoid",
"SmoothL1Loss",
"SoftMarginLoss",
"Softmax",
"Softmax2d",
"Softmin",
"Softplus",
"Softshrink",
"Softsign",
"SyncBatchNorm",
"Tanh",
"Tanhshrink",
"Threshold",
"Transformer",
"TransformerDecoder",
"TransformerDecoderLayer",
"TransformerEncoder",
"TransformerEncoderLayer",
"TripletMarginLoss",
"TripletMarginWithDistanceLoss",
"Unflatten",
"Unfold",
"Upsample",
"UpsamplingBilinear2d",
"UpsamplingNearest2d",
"ZeroPad1d",
"ZeroPad2d",
"ZeroPad3d",
]
# Please keep this list sorted
assert __all__ == sorted(__all__)
```
|
======================================================================================================================
SOURCE CODE FILE: _functions.py
LINES: 1
SIZE: 12.02 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\_functions.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import torch
import torch.distributed as dist
from torch.autograd.function import Function
class SyncBatchNorm(Function):
@staticmethod
def forward(
self,
input,
weight,
bias,
running_mean,
running_var,
eps,
momentum,
process_group,
world_size,
):
if not (
input.is_contiguous(memory_format=torch.channels_last)
or input.is_contiguous(memory_format=torch.channels_last_3d)
):
input = input.contiguous()
if weight is not None:
weight = weight.contiguous()
size = int(input.numel() // input.size(1))
if size == 1 and world_size < 2:
raise ValueError(
f"Expected more than 1 value per channel when training, got input size {size}"
)
num_channels = input.shape[1]
if input.numel() > 0:
# calculate mean/invstd for input.
mean, invstd = torch.batch_norm_stats(input, eps)
count = torch.full(
(1,),
input.numel() // input.size(1),
dtype=mean.dtype,
device=mean.device,
)
# C, C, 1 -> (2C + 1)
combined = torch.cat([mean, invstd, count], dim=0)
else:
# for empty input, set stats and the count to zero. The stats with
# zero count will be filtered out later when computing global mean
# & invstd, but they still needs to participate the all_gather
# collective communication to unblock other peer processes.
combined = torch.zeros(
2 * num_channels + 1, dtype=input.dtype, device=input.device
)
# Use allgather instead of allreduce because count could be different across
# ranks, simple all reduce op can not give correct results.
# batch_norm_gather_stats_with_counts calculates global mean & invstd based on
# all gathered mean, invstd and count.
# for nccl backend, use the optimized version of all gather.
# The Gloo backend does not support `all_gather_into_tensor`.
if process_group._get_backend_name() != "gloo":
# world_size * (2C + 1)
combined_size = combined.numel()
combined_flat = torch.empty(
1,
combined_size * world_size,
dtype=combined.dtype,
device=combined.device,
)
dist.all_gather_into_tensor(
combined_flat, combined, process_group, async_op=False
)
combined = torch.reshape(combined_flat, (world_size, combined_size))
# world_size * (2C + 1) -> world_size * C, world_size * C, world_size * 1
mean_all, invstd_all, count_all = torch.split(combined, num_channels, dim=1)
else:
# world_size * (2C + 1)
combined_list = [torch.empty_like(combined) for _ in range(world_size)]
dist.all_gather(combined_list, combined, process_group, async_op=False)
combined = torch.stack(combined_list, dim=0)
# world_size * (2C + 1) -> world_size * C, world_size * C, world_size * 1
mean_all, invstd_all, count_all = torch.split(combined, num_channels, dim=1)
if not (torch.cuda.is_available() and torch.cuda.is_current_stream_capturing()):
# The lines below force a synchronization between CUDA and CPU, because
# the shape of the result count_all depends on the values in mask tensor.
# Such synchronizations break CUDA Graph capturing.
# See https://github.com/pytorch/pytorch/issues/78549
# FIXME: https://github.com/pytorch/pytorch/issues/78656 describes
# a better longer-term solution.
# remove stats from empty inputs
mask = count_all.squeeze(-1) >= 1
count_all = count_all[mask]
mean_all = mean_all[mask]
invstd_all = invstd_all[mask]
# calculate global mean & invstd
counts = count_all.view(-1)
if running_mean is not None and counts.dtype != running_mean.dtype:
counts = counts.to(running_mean.dtype)
mean, invstd = torch.batch_norm_gather_stats_with_counts(
input,
mean_all,
invstd_all,
running_mean,
running_var,
momentum,
eps,
counts,
)
self.save_for_backward(input, weight, mean, invstd, count_all.to(torch.int32))
self.process_group = process_group
# apply element-wise normalization
if input.numel() > 0:
return torch.batch_norm_elemt(input, weight, bias, mean, invstd, eps)
else:
return torch.empty_like(input)
@staticmethod
def backward(self, grad_output):
if not (
grad_output.is_contiguous(memory_format=torch.channels_last)
or grad_output.is_contiguous(memory_format=torch.channels_last_3d)
):
grad_output = grad_output.contiguous()
saved_input, weight, mean, invstd, count_tensor = self.saved_tensors
grad_input = grad_weight = grad_bias = None
process_group = self.process_group
if saved_input.numel() > 0:
# calculate local stats as well as grad_weight / grad_bias
(
sum_dy,
sum_dy_xmu,
grad_weight,
grad_bias,
) = torch.batch_norm_backward_reduce(
grad_output,
saved_input,
mean,
invstd,
weight,
self.needs_input_grad[0],
self.needs_input_grad[1],
self.needs_input_grad[2],
)
if self.needs_input_grad[0]:
# synchronizing stats used to calculate input gradient.
num_channels = sum_dy.shape[0]
combined = torch.cat([sum_dy, sum_dy_xmu], dim=0)
torch.distributed.all_reduce(
combined,
torch.distributed.ReduceOp.SUM,
process_group,
async_op=False,
)
sum_dy, sum_dy_xmu = torch.split(combined, num_channels)
# backward pass for gradient calculation
if weight is not None and weight.dtype != mean.dtype:
weight = weight.to(mean.dtype)
grad_input = torch.batch_norm_backward_elemt(
grad_output,
saved_input,
mean,
invstd,
weight,
sum_dy,
sum_dy_xmu,
count_tensor,
)
# synchronizing of grad_weight / grad_bias is not needed as distributed
# training would handle all reduce.
if weight is None or not self.needs_input_grad[1]:
grad_weight = None
if weight is None or not self.needs_input_grad[2]:
grad_bias = None
else:
# This process got an empty input tensor in the forward pass.
# Although this process can directly set grad_input as an empty
# tensor of zeros, it still needs to participate in the collective
# communication to unblock its peers, as other peer processes might
# have received non-empty inputs.
num_channels = saved_input.shape[1]
if self.needs_input_grad[0]:
# launch all_reduce to unblock other peer processes
combined = torch.zeros(
2 * num_channels, dtype=saved_input.dtype, device=saved_input.device
)
torch.distributed.all_reduce(
combined,
torch.distributed.ReduceOp.SUM,
process_group,
async_op=False,
)
# Leave grad_input, grad_weight and grad_bias as None, which will be
# interpreted by the autograd engine as Tensors full of zeros.
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None
class CrossMapLRN2d(Function):
@staticmethod
def forward(ctx, input, size, alpha=1e-4, beta=0.75, k=1):
ctx.size = size
ctx.alpha = alpha
ctx.beta = beta
ctx.k = k
ctx.scale = None
if input.dim() != 4:
raise ValueError(
f"CrossMapLRN2d: Expected input to be 4D, got {input.dim()}D instead."
)
ctx.scale = ctx.scale or input.new()
output = input.new()
channels = input.size(1)
output.resize_as_(input)
ctx.scale.resize_as_(input)
# use output storage as temporary buffer
input_square = output
torch.pow(input, 2, out=input_square)
pre_pad = int((ctx.size - 1) / 2 + 1)
pre_pad_crop = min(pre_pad, channels)
scale_first = ctx.scale.select(1, 0)
scale_first.zero_()
# compute first feature map normalization
for c in range(pre_pad_crop):
scale_first.add_(input_square.select(1, c))
# reuse computations for next feature maps normalization
# by adding the next feature map and removing the previous
for c in range(1, channels):
scale_previous = ctx.scale.select(1, c - 1)
scale_current = ctx.scale.select(1, c)
scale_current.copy_(scale_previous)
if c < channels - pre_pad + 1:
square_next = input_square.select(1, c + pre_pad - 1)
scale_current.add_(square_next, alpha=1)
if c > pre_pad:
square_previous = input_square.select(1, c - pre_pad)
scale_current.add_(square_previous, alpha=-1)
ctx.scale.mul_(ctx.alpha / ctx.size).add_(ctx.k)
torch.pow(ctx.scale, -ctx.beta, out=output)
output.mul_(input)
ctx.save_for_backward(input, output)
return output
@staticmethod
def backward(ctx, grad_output):
input, output = ctx.saved_tensors
grad_input = grad_output.new()
batch_size = input.size(0)
channels = input.size(1)
input_height = input.size(2)
input_width = input.size(3)
paddded_ratio = input.new(channels + ctx.size - 1, input_height, input_width)
accum_ratio = input.new(input_height, input_width)
cache_ratio_value = 2 * ctx.alpha * ctx.beta / ctx.size
inversePrePad = int(ctx.size - (ctx.size - 1) / 2)
grad_input.resize_as_(input)
torch.pow(ctx.scale, -ctx.beta, out=grad_input).mul_(grad_output)
paddded_ratio.zero_()
padded_ratio_center = paddded_ratio.narrow(0, inversePrePad, channels)
for n in range(batch_size):
torch.mul(grad_output[n], output[n], out=padded_ratio_center)
padded_ratio_center.div_(ctx.scale[n])
torch.sum(
paddded_ratio.narrow(0, 0, ctx.size - 1),
0,
keepdim=False,
out=accum_ratio,
)
for c in range(channels):
accum_ratio.add_(paddded_ratio[c + ctx.size - 1])
grad_input[n][c].addcmul_(
input[n][c], accum_ratio, value=-cache_ratio_value
)
accum_ratio.add_(paddded_ratio[c], alpha=-1)
return grad_input, None, None, None, None
class BackwardHookFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, *args):
ctx.mark_non_differentiable(*[arg for arg in args if not arg.requires_grad])
return args
@staticmethod
def backward(ctx, *args):
return args
```
|
======================================================================================================================
SOURCE CODE FILE: activation.py
LINES: 1
SIZE: 58.28 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\activation.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import warnings
from typing import Optional
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.nn.init import constant_, xavier_normal_, xavier_uniform_
from torch.nn.parameter import Parameter
from .linear import NonDynamicallyQuantizableLinear
from .module import Module
__all__ = [
"Threshold",
"ReLU",
"RReLU",
"Hardtanh",
"ReLU6",
"Sigmoid",
"Hardsigmoid",
"Tanh",
"SiLU",
"Mish",
"Hardswish",
"ELU",
"CELU",
"SELU",
"GLU",
"GELU",
"Hardshrink",
"LeakyReLU",
"LogSigmoid",
"Softplus",
"Softshrink",
"MultiheadAttention",
"PReLU",
"Softsign",
"Tanhshrink",
"Softmin",
"Softmax",
"Softmax2d",
"LogSoftmax",
]
class Threshold(Module):
r"""Thresholds each element of the input Tensor.
Threshold is defined as:
.. math::
y =
\begin{cases}
x, &\text{ if } x > \text{threshold} \\
\text{value}, &\text{ otherwise }
\end{cases}
Args:
threshold: The value to threshold at
value: The value to replace with
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
Examples::
>>> m = nn.Threshold(0.1, 20)
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["threshold", "value", "inplace"]
threshold: float
value: float
inplace: bool
def __init__(self, threshold: float, value: float, inplace: bool = False) -> None:
super().__init__()
self.threshold = threshold
self.value = value
self.inplace = inplace
# TODO: check in THNN (if inplace == True, then assert value <= threshold)
def forward(self, input: Tensor) -> Tensor:
return F.threshold(input, self.threshold, self.value, self.inplace)
def extra_repr(self):
inplace_str = ", inplace=True" if self.inplace else ""
return f"threshold={self.threshold}, value={self.value}{inplace_str}"
class ReLU(Module):
r"""Applies the rectified linear unit function element-wise.
:math:`\text{ReLU}(x) = (x)^+ = \max(0, x)`
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/ReLU.png
Examples::
>>> m = nn.ReLU()
>>> input = torch.randn(2)
>>> output = m(input)
An implementation of CReLU - https://arxiv.org/abs/1603.05201
>>> m = nn.ReLU()
>>> input = torch.randn(2).unsqueeze(0)
>>> output = torch.cat((m(input), m(-input)))
"""
__constants__ = ["inplace"]
inplace: bool
def __init__(self, inplace: bool = False):
super().__init__()
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.relu(input, inplace=self.inplace)
def extra_repr(self) -> str:
inplace_str = "inplace=True" if self.inplace else ""
return inplace_str
class RReLU(Module):
r"""Applies the randomized leaky rectified linear unit function, element-wise.
Method described in the paper:
`Empirical Evaluation of Rectified Activations in Convolutional Network <https://arxiv.org/abs/1505.00853>`_.
The function is defined as:
.. math::
\text{RReLU}(x) =
\begin{cases}
x & \text{if } x \geq 0 \\
ax & \text{ otherwise }
\end{cases}
where :math:`a` is randomly sampled from uniform distribution
:math:`\mathcal{U}(\text{lower}, \text{upper})` during training while during
evaluation :math:`a` is fixed with :math:`a = \frac{\text{lower} + \text{upper}}{2}`.
Args:
lower: lower bound of the uniform distribution. Default: :math:`\frac{1}{8}`
upper: upper bound of the uniform distribution. Default: :math:`\frac{1}{3}`
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/RReLU.png
Examples::
>>> m = nn.RReLU(0.1, 0.3)
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["lower", "upper", "inplace"]
lower: float
upper: float
inplace: bool
def __init__(
self, lower: float = 1.0 / 8, upper: float = 1.0 / 3, inplace: bool = False
):
super().__init__()
self.lower = lower
self.upper = upper
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.rrelu(input, self.lower, self.upper, self.training, self.inplace)
def extra_repr(self):
inplace_str = ", inplace=True" if self.inplace else ""
return f"lower={self.lower}, upper={self.upper}{inplace_str}"
class Hardtanh(Module):
r"""Applies the HardTanh function element-wise.
HardTanh is defined as:
.. math::
\text{HardTanh}(x) = \begin{cases}
\text{max\_val} & \text{ if } x > \text{ max\_val } \\
\text{min\_val} & \text{ if } x < \text{ min\_val } \\
x & \text{ otherwise } \\
\end{cases}
Args:
min_val: minimum value of the linear region range. Default: -1
max_val: maximum value of the linear region range. Default: 1
inplace: can optionally do the operation in-place. Default: ``False``
Keyword arguments :attr:`min_value` and :attr:`max_value`
have been deprecated in favor of :attr:`min_val` and :attr:`max_val`.
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Hardtanh.png
Examples::
>>> m = nn.Hardtanh(-2, 2)
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["min_val", "max_val", "inplace"]
min_val: float
max_val: float
inplace: bool
def __init__(
self,
min_val: float = -1.0,
max_val: float = 1.0,
inplace: bool = False,
min_value: Optional[float] = None,
max_value: Optional[float] = None,
) -> None:
super().__init__()
if min_value is not None:
warnings.warn(
"keyword argument `min_value` is deprecated and rename to `min_val`",
FutureWarning,
stacklevel=2,
)
min_val = min_value
if max_value is not None:
warnings.warn(
"keyword argument `max_value` is deprecated and rename to `max_val`",
FutureWarning,
stacklevel=2,
)
max_val = max_value
self.min_val = min_val
self.max_val = max_val
self.inplace = inplace
assert self.max_val > self.min_val
def forward(self, input: Tensor) -> Tensor:
return F.hardtanh(input, self.min_val, self.max_val, self.inplace)
def extra_repr(self) -> str:
inplace_str = ", inplace=True" if self.inplace else ""
return f"min_val={self.min_val}, max_val={self.max_val}{inplace_str}"
class ReLU6(Hardtanh):
r"""Applies the ReLU6 function element-wise.
.. math::
\text{ReLU6}(x) = \min(\max(0,x), 6)
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/ReLU6.png
Examples::
>>> m = nn.ReLU6()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, inplace: bool = False):
super().__init__(0.0, 6.0, inplace)
def extra_repr(self) -> str:
inplace_str = "inplace=True" if self.inplace else ""
return inplace_str
class Sigmoid(Module):
r"""Applies the Sigmoid function element-wise.
.. math::
\text{Sigmoid}(x) = \sigma(x) = \frac{1}{1 + \exp(-x)}
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Sigmoid.png
Examples::
>>> m = nn.Sigmoid()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
return torch.sigmoid(input)
class Hardsigmoid(Module):
r"""Applies the Hardsigmoid function element-wise.
Hardsigmoid is defined as:
.. math::
\text{Hardsigmoid}(x) = \begin{cases}
0 & \text{if~} x \le -3, \\
1 & \text{if~} x \ge +3, \\
x / 6 + 1 / 2 & \text{otherwise}
\end{cases}
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Hardsigmoid.png
Examples::
>>> m = nn.Hardsigmoid()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["inplace"]
inplace: bool
def __init__(self, inplace: bool = False) -> None:
super().__init__()
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.hardsigmoid(input, self.inplace)
class Tanh(Module):
r"""Applies the Hyperbolic Tangent (Tanh) function element-wise.
Tanh is defined as:
.. math::
\text{Tanh}(x) = \tanh(x) = \frac{\exp(x) - \exp(-x)} {\exp(x) + \exp(-x)}
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Tanh.png
Examples::
>>> m = nn.Tanh()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
return torch.tanh(input)
class SiLU(Module):
r"""Applies the Sigmoid Linear Unit (SiLU) function, element-wise.
The SiLU function is also known as the swish function.
.. math::
\text{silu}(x) = x * \sigma(x), \text{where } \sigma(x) \text{ is the logistic sigmoid.}
.. note::
See `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_
where the SiLU (Sigmoid Linear Unit) was originally coined, and see
`Sigmoid-Weighted Linear Units for Neural Network Function Approximation
in Reinforcement Learning <https://arxiv.org/abs/1702.03118>`_ and `Swish:
a Self-Gated Activation Function <https://arxiv.org/abs/1710.05941v1>`_
where the SiLU was experimented with later.
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/SiLU.png
Examples::
>>> m = nn.SiLU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["inplace"]
inplace: bool
def __init__(self, inplace: bool = False):
super().__init__()
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.silu(input, inplace=self.inplace)
def extra_repr(self) -> str:
inplace_str = "inplace=True" if self.inplace else ""
return inplace_str
class Mish(Module):
r"""Applies the Mish function, element-wise.
Mish: A Self Regularized Non-Monotonic Neural Activation Function.
.. math::
\text{Mish}(x) = x * \text{Tanh}(\text{Softplus}(x))
.. note::
See `Mish: A Self Regularized Non-Monotonic Neural Activation Function <https://arxiv.org/abs/1908.08681>`_
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Mish.png
Examples::
>>> m = nn.Mish()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["inplace"]
inplace: bool
def __init__(self, inplace: bool = False):
super().__init__()
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.mish(input, inplace=self.inplace)
def extra_repr(self) -> str:
inplace_str = "inplace=True" if self.inplace else ""
return inplace_str
class Hardswish(Module):
r"""Applies the Hardswish function, element-wise.
Method described in the paper: `Searching for MobileNetV3 <https://arxiv.org/abs/1905.02244>`_.
Hardswish is defined as:
.. math::
\text{Hardswish}(x) = \begin{cases}
0 & \text{if~} x \le -3, \\
x & \text{if~} x \ge +3, \\
x \cdot (x + 3) /6 & \text{otherwise}
\end{cases}
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Hardswish.png
Examples::
>>> m = nn.Hardswish()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["inplace"]
inplace: bool
def __init__(self, inplace: bool = False) -> None:
super().__init__()
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.hardswish(input, self.inplace)
class ELU(Module):
r"""Applies the Exponential Linear Unit (ELU) function, element-wise.
Method described in the paper: `Fast and Accurate Deep Network Learning by Exponential Linear
Units (ELUs) <https://arxiv.org/abs/1511.07289>`__.
ELU is defined as:
.. math::
\text{ELU}(x) = \begin{cases}
x, & \text{ if } x > 0\\
\alpha * (\exp(x) - 1), & \text{ if } x \leq 0
\end{cases}
Args:
alpha: the :math:`\alpha` value for the ELU formulation. Default: 1.0
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/ELU.png
Examples::
>>> m = nn.ELU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["alpha", "inplace"]
alpha: float
inplace: bool
def __init__(self, alpha: float = 1.0, inplace: bool = False) -> None:
super().__init__()
self.alpha = alpha
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.elu(input, self.alpha, self.inplace)
def extra_repr(self) -> str:
inplace_str = ", inplace=True" if self.inplace else ""
return f"alpha={self.alpha}{inplace_str}"
class CELU(Module):
r"""Applies the CELU function element-wise.
.. math::
\text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1))
More details can be found in the paper `Continuously Differentiable Exponential Linear Units`_ .
Args:
alpha: the :math:`\alpha` value for the CELU formulation. Default: 1.0
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/CELU.png
Examples::
>>> m = nn.CELU()
>>> input = torch.randn(2)
>>> output = m(input)
.. _`Continuously Differentiable Exponential Linear Units`:
https://arxiv.org/abs/1704.07483
"""
__constants__ = ["alpha", "inplace"]
alpha: float
inplace: bool
def __init__(self, alpha: float = 1.0, inplace: bool = False) -> None:
super().__init__()
self.alpha = alpha
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.celu(input, self.alpha, self.inplace)
def extra_repr(self) -> str:
inplace_str = ", inplace=True" if self.inplace else ""
return f"alpha={self.alpha}{inplace_str}"
class SELU(Module):
r"""Applies the SELU function element-wise.
.. math::
\text{SELU}(x) = \text{scale} * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1)))
with :math:`\alpha = 1.6732632423543772848170429916717` and
:math:`\text{scale} = 1.0507009873554804934193349852946`.
.. warning::
When using ``kaiming_normal`` or ``kaiming_normal_`` for initialisation,
``nonlinearity='linear'`` should be used instead of ``nonlinearity='selu'``
in order to get `Self-Normalizing Neural Networks`_.
See :func:`torch.nn.init.calculate_gain` for more information.
More details can be found in the paper `Self-Normalizing Neural Networks`_ .
Args:
inplace (bool, optional): can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/SELU.png
Examples::
>>> m = nn.SELU()
>>> input = torch.randn(2)
>>> output = m(input)
.. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515
"""
__constants__ = ["inplace"]
inplace: bool
def __init__(self, inplace: bool = False) -> None:
super().__init__()
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.selu(input, self.inplace)
def extra_repr(self) -> str:
inplace_str = "inplace=True" if self.inplace else ""
return inplace_str
class GLU(Module):
r"""Applies the gated linear unit function.
:math:`{GLU}(a, b)= a \otimes \sigma(b)` where :math:`a` is the first half
of the input matrices and :math:`b` is the second half.
Args:
dim (int): the dimension on which to split the input. Default: -1
Shape:
- Input: :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional
dimensions
- Output: :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2`
Examples::
>>> m = nn.GLU()
>>> input = torch.randn(4, 2)
>>> output = m(input)
"""
__constants__ = ["dim"]
dim: int
def __init__(self, dim: int = -1) -> None:
super().__init__()
self.dim = dim
def forward(self, input: Tensor) -> Tensor:
return F.glu(input, self.dim)
def extra_repr(self) -> str:
return f"dim={self.dim}"
class GELU(Module):
r"""Applies the Gaussian Error Linear Units function.
.. math:: \text{GELU}(x) = x * \Phi(x)
where :math:`\Phi(x)` is the Cumulative Distribution Function for Gaussian Distribution.
When the approximate argument is 'tanh', Gelu is estimated with:
.. math:: \text{GELU}(x) = 0.5 * x * (1 + \text{Tanh}(\sqrt{2 / \pi} * (x + 0.044715 * x^3)))
Args:
approximate (str, optional): the gelu approximation algorithm to use:
``'none'`` | ``'tanh'``. Default: ``'none'``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/GELU.png
Examples::
>>> m = nn.GELU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["approximate"]
approximate: str
def __init__(self, approximate: str = "none") -> None:
super().__init__()
self.approximate = approximate
def forward(self, input: Tensor) -> Tensor:
return F.gelu(input, approximate=self.approximate)
def extra_repr(self) -> str:
return f"approximate={repr(self.approximate)}"
class Hardshrink(Module):
r"""Applies the Hard Shrinkage (Hardshrink) function element-wise.
Hardshrink is defined as:
.. math::
\text{HardShrink}(x) =
\begin{cases}
x, & \text{ if } x > \lambda \\
x, & \text{ if } x < -\lambda \\
0, & \text{ otherwise }
\end{cases}
Args:
lambd: the :math:`\lambda` value for the Hardshrink formulation. Default: 0.5
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Hardshrink.png
Examples::
>>> m = nn.Hardshrink()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["lambd"]
lambd: float
def __init__(self, lambd: float = 0.5) -> None:
super().__init__()
self.lambd = lambd
def forward(self, input: Tensor) -> Tensor:
return F.hardshrink(input, self.lambd)
def extra_repr(self) -> str:
return f"{self.lambd}"
class LeakyReLU(Module):
r"""Applies the LeakyReLU function element-wise.
.. math::
\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)
or
.. math::
\text{LeakyReLU}(x) =
\begin{cases}
x, & \text{ if } x \geq 0 \\
\text{negative\_slope} \times x, & \text{ otherwise }
\end{cases}
Args:
negative_slope: Controls the angle of the negative slope (which is used for
negative input values). Default: 1e-2
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
.. image:: ../scripts/activation_images/LeakyReLU.png
Examples::
>>> m = nn.LeakyReLU(0.1)
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["inplace", "negative_slope"]
inplace: bool
negative_slope: float
def __init__(self, negative_slope: float = 1e-2, inplace: bool = False) -> None:
super().__init__()
self.negative_slope = negative_slope
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.leaky_relu(input, self.negative_slope, self.inplace)
def extra_repr(self) -> str:
inplace_str = ", inplace=True" if self.inplace else ""
return f"negative_slope={self.negative_slope}{inplace_str}"
class LogSigmoid(Module):
r"""Applies the Logsigmoid function element-wise.
.. math::
\text{LogSigmoid}(x) = \log\left(\frac{ 1 }{ 1 + \exp(-x)}\right)
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/LogSigmoid.png
Examples::
>>> m = nn.LogSigmoid()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
return F.logsigmoid(input)
class Softplus(Module):
r"""Applies the Softplus function element-wise.
.. math::
\text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x))
SoftPlus is a smooth approximation to the ReLU function and can be used
to constrain the output of a machine to always be positive.
For numerical stability the implementation reverts to the linear function
when :math:`input \times \beta > threshold`.
Args:
beta: the :math:`\beta` value for the Softplus formulation. Default: 1
threshold: values above this revert to a linear function. Default: 20
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Softplus.png
Examples::
>>> m = nn.Softplus()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["beta", "threshold"]
beta: float
threshold: float
def __init__(self, beta: float = 1.0, threshold: float = 20.0) -> None:
super().__init__()
self.beta = beta
self.threshold = threshold
def forward(self, input: Tensor) -> Tensor:
return F.softplus(input, self.beta, self.threshold)
def extra_repr(self) -> str:
return f"beta={self.beta}, threshold={self.threshold}"
class Softshrink(Module):
r"""Applies the soft shrinkage function element-wise.
.. math::
\text{SoftShrinkage}(x) =
\begin{cases}
x - \lambda, & \text{ if } x > \lambda \\
x + \lambda, & \text{ if } x < -\lambda \\
0, & \text{ otherwise }
\end{cases}
Args:
lambd: the :math:`\lambda` (must be no less than zero) value for the Softshrink formulation. Default: 0.5
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Softshrink.png
Examples::
>>> m = nn.Softshrink()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["lambd"]
lambd: float
def __init__(self, lambd: float = 0.5) -> None:
super().__init__()
self.lambd = lambd
def forward(self, input: Tensor) -> Tensor:
return F.softshrink(input, self.lambd)
def extra_repr(self) -> str:
return str(self.lambd)
def _check_arg_device(x: Optional[torch.Tensor]) -> bool:
if x is not None:
return x.device.type in [
"cpu",
"cuda",
torch.utils.backend_registration._privateuse1_backend_name,
]
return True
def _arg_requires_grad(x: Optional[torch.Tensor]) -> bool:
if x is not None:
return x.requires_grad
return False
def _is_make_fx_tracing():
if not torch.jit.is_scripting():
torch_dispatch_mode_stack = (
torch.utils._python_dispatch._get_current_dispatch_mode_stack()
)
return any(
type(x) == torch.fx.experimental.proxy_tensor.ProxyTorchDispatchMode
for x in torch_dispatch_mode_stack
)
else:
return False
class MultiheadAttention(Module):
r"""Allows the model to jointly attend to information from different representation subspaces.
.. note::
See `this tutorial <https://pytorch.org/tutorials/intermediate/transformer_building_blocks.html>`_
for an in depth discussion of the performant building blocks PyTorch offers for building your own
transformer layers.
Method described in the paper:
`Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_.
Multi-Head Attention is defined as:
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(\text{head}_1,\dots,\text{head}_h)W^O
where :math:`\text{head}_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
``nn.MultiheadAttention`` will use the optimized implementations of
``scaled_dot_product_attention()`` when possible.
In addition to support for the new ``scaled_dot_product_attention()``
function, for speeding up Inference, MHA will use
fastpath inference with support for Nested Tensors, iff:
- self attention is being computed (i.e., ``query``, ``key``, and ``value`` are the same tensor).
- inputs are batched (3D) with ``batch_first==True``
- Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor argument ``requires_grad``
- training is disabled (using ``.eval()``)
- ``add_bias_kv`` is ``False``
- ``add_zero_attn`` is ``False``
- ``kdim`` and ``vdim`` are equal to ``embed_dim``
- if a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ is passed, neither ``key_padding_mask``
nor ``attn_mask`` is passed
- autocast is disabled
If the optimized inference fastpath implementation is in use, a
`NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be passed for
``query``/``key``/``value`` to represent padding more efficiently than using a
padding mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_
will be returned, and an additional speedup proportional to the fraction of the input
that is padding can be expected.
Args:
embed_dim: Total dimension of the model.
num_heads: Number of parallel attention heads. Note that ``embed_dim`` will be split
across ``num_heads`` (i.e. each head will have dimension ``embed_dim // num_heads``).
dropout: Dropout probability on ``attn_output_weights``. Default: ``0.0`` (no dropout).
bias: If specified, adds bias to input / output projection layers. Default: ``True``.
add_bias_kv: If specified, adds bias to the key and value sequences at dim=0. Default: ``False``.
add_zero_attn: If specified, adds a new batch of zeros to the key and value sequences at dim=1.
Default: ``False``.
kdim: Total number of features for keys. Default: ``None`` (uses ``kdim=embed_dim``).
vdim: Total number of features for values. Default: ``None`` (uses ``vdim=embed_dim``).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
Examples::
>>> # xdoctest: +SKIP
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
.. _`FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`:
https://arxiv.org/abs/2205.14135
"""
__constants__ = ["batch_first"]
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None,
batch_first=False,
device=None,
dtype=None,
) -> None:
if embed_dim <= 0 or num_heads <= 0:
raise ValueError(
f"embed_dim and num_heads must be greater than 0,"
f" got embed_dim={embed_dim} and num_heads={num_heads} instead"
)
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.batch_first = batch_first
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
if not self._qkv_same_embed_dim:
self.q_proj_weight = Parameter(
torch.empty((embed_dim, embed_dim), **factory_kwargs)
)
self.k_proj_weight = Parameter(
torch.empty((embed_dim, self.kdim), **factory_kwargs)
)
self.v_proj_weight = Parameter(
torch.empty((embed_dim, self.vdim), **factory_kwargs)
)
self.register_parameter("in_proj_weight", None)
else:
self.in_proj_weight = Parameter(
torch.empty((3 * embed_dim, embed_dim), **factory_kwargs)
)
self.register_parameter("q_proj_weight", None)
self.register_parameter("k_proj_weight", None)
self.register_parameter("v_proj_weight", None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim, **factory_kwargs))
else:
self.register_parameter("in_proj_bias", None)
self.out_proj = NonDynamicallyQuantizableLinear(
embed_dim, embed_dim, bias=bias, **factory_kwargs
)
if add_bias_kv:
self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.0)
constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if "_qkv_same_embed_dim" not in state:
state["_qkv_same_embed_dim"] = True
super().__setstate__(state)
def forward(
self,
query: Tensor,
key: Tensor,
value: Tensor,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
average_attn_weights: bool = True,
is_causal: bool = False,
) -> tuple[Tensor, Optional[Tensor]]:
r"""Compute attention outputs using query, key, and value embeddings.
Supports optional parameters for padding, masks and attention weights.
Args:
query: Query embeddings of shape :math:`(L, E_q)` for unbatched input, :math:`(L, N, E_q)` when ``batch_first=False``
or :math:`(N, L, E_q)` when ``batch_first=True``, where :math:`L` is the target sequence length,
:math:`N` is the batch size, and :math:`E_q` is the query embedding dimension ``embed_dim``.
Queries are compared against key-value pairs to produce the output.
See "Attention Is All You Need" for more details.
key: Key embeddings of shape :math:`(S, E_k)` for unbatched input, :math:`(S, N, E_k)` when ``batch_first=False``
or :math:`(N, S, E_k)` when ``batch_first=True``, where :math:`S` is the source sequence length,
:math:`N` is the batch size, and :math:`E_k` is the key embedding dimension ``kdim``.
See "Attention Is All You Need" for more details.
value: Value embeddings of shape :math:`(S, E_v)` for unbatched input, :math:`(S, N, E_v)` when
``batch_first=False`` or :math:`(N, S, E_v)` when ``batch_first=True``, where :math:`S` is the source
sequence length, :math:`N` is the batch size, and :math:`E_v` is the value embedding dimension ``vdim``.
See "Attention Is All You Need" for more details.
key_padding_mask: If specified, a mask of shape :math:`(N, S)` indicating which elements within ``key``
to ignore for the purpose of attention (i.e. treat as "padding"). For unbatched `query`, shape should be :math:`(S)`.
Binary and float masks are supported.
For a binary mask, a ``True`` value indicates that the corresponding ``key`` value will be ignored for
the purpose of attention. For a float mask, it will be directly added to the corresponding ``key`` value.
need_weights: If specified, returns ``attn_output_weights`` in addition to ``attn_outputs``.
Set ``need_weights=False`` to use the optimized ``scaled_dot_product_attention``
and achieve the best performance for MHA.
Default: ``True``.
attn_mask: If specified, a 2D or 3D mask preventing attention to certain positions. Must be of shape
:math:`(L, S)` or :math:`(N\cdot\text{num\_heads}, L, S)`, where :math:`N` is the batch size,
:math:`L` is the target sequence length, and :math:`S` is the source sequence length. A 2D mask will be
broadcasted across the batch while a 3D mask allows for a different mask for each entry in the batch.
Binary and float masks are supported. For a binary mask, a ``True`` value indicates that the
corresponding position is not allowed to attend. For a float mask, the mask values will be added to
the attention weight.
If both attn_mask and key_padding_mask are supplied, their types should match.
average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across
heads. Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an
effect when ``need_weights=True``. Default: ``True`` (i.e. average weights across heads)
is_causal: If specified, applies a causal mask as attention mask.
Default: ``False``.
Warning:
``is_causal`` provides a hint that ``attn_mask`` is the
causal mask. Providing incorrect hints can result in
incorrect execution, including forward and backward
compatibility.
Outputs:
- **attn_output** - Attention outputs of shape :math:`(L, E)` when input is unbatched,
:math:`(L, N, E)` when ``batch_first=False`` or :math:`(N, L, E)` when ``batch_first=True``,
where :math:`L` is the target sequence length, :math:`N` is the batch size, and :math:`E` is the
embedding dimension ``embed_dim``.
- **attn_output_weights** - Only returned when ``need_weights=True``. If ``average_attn_weights=True``,
returns attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
:math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
:math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
head of shape :math:`(\text{num\_heads}, L, S)` when input is unbatched or :math:`(N, \text{num\_heads}, L, S)`.
.. note::
`batch_first` argument is ignored for unbatched inputs.
""" # noqa: B950
why_not_fast_path = ""
if (
(attn_mask is not None and torch.is_floating_point(attn_mask))
or (key_padding_mask is not None)
and torch.is_floating_point(key_padding_mask)
):
why_not_fast_path = "floating-point masks are not supported for fast path."
is_batched = query.dim() == 3
key_padding_mask = F._canonical_mask(
mask=key_padding_mask,
mask_name="key_padding_mask",
other_type=F._none_or_dtype(attn_mask),
other_name="attn_mask",
target_type=query.dtype,
)
attn_mask = F._canonical_mask(
mask=attn_mask,
mask_name="attn_mask",
other_type=None,
other_name="",
target_type=query.dtype,
check_other=False,
)
is_fastpath_enabled = torch.backends.mha.get_fastpath_enabled()
if not is_fastpath_enabled:
why_not_fast_path = "torch.backends.mha.get_fastpath_enabled() was not True"
elif not is_batched:
why_not_fast_path = (
f"input not batched; expected query.dim() of 3 but got {query.dim()}"
)
elif query is not key or key is not value:
# When lifting this restriction, don't forget to either
# enforce that the dtypes all match or test cases where
# they don't!
why_not_fast_path = "non-self attention was used (query, key, and value are not the same Tensor)"
elif self.in_proj_bias is not None and query.dtype != self.in_proj_bias.dtype:
why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match"
elif self.in_proj_weight is None:
why_not_fast_path = "in_proj_weight was None"
elif query.dtype != self.in_proj_weight.dtype:
# this case will fail anyway, but at least they'll get a useful error message.
why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match"
elif self.training:
why_not_fast_path = "training is enabled"
elif (self.num_heads % 2) != 0:
why_not_fast_path = "self.num_heads is not even"
elif not self.batch_first:
why_not_fast_path = "batch_first was not True"
elif self.bias_k is not None:
why_not_fast_path = "self.bias_k was not None"
elif self.bias_v is not None:
why_not_fast_path = "self.bias_v was not None"
elif self.add_zero_attn:
why_not_fast_path = "add_zero_attn was enabled"
elif not self._qkv_same_embed_dim:
why_not_fast_path = "_qkv_same_embed_dim was not True"
elif query.is_nested and (
key_padding_mask is not None or attn_mask is not None
):
why_not_fast_path = "supplying both src_key_padding_mask and src_mask at the same time \
is not supported with NestedTensor input"
elif torch.is_autocast_enabled():
why_not_fast_path = "autocast is enabled"
if not why_not_fast_path:
tensor_args = (
query,
key,
value,
self.in_proj_weight,
self.in_proj_bias,
self.out_proj.weight,
self.out_proj.bias,
)
# We have to use list comprehensions below because TorchScript does not support
# generator expressions.
if torch.overrides.has_torch_function(tensor_args):
why_not_fast_path = "some Tensor argument has_torch_function"
elif _is_make_fx_tracing():
why_not_fast_path = "we are running make_fx tracing"
elif not all(_check_arg_device(x) for x in tensor_args):
why_not_fast_path = (
"some Tensor argument's device is neither one of "
f"cpu, cuda or {torch.utils.backend_registration._privateuse1_backend_name}"
)
elif torch.is_grad_enabled() and any(
_arg_requires_grad(x) for x in tensor_args
):
why_not_fast_path = (
"grad is enabled and at least one of query or the "
"input/output projection weights or biases requires_grad"
)
if not why_not_fast_path:
merged_mask, mask_type = self.merge_masks(
attn_mask, key_padding_mask, query
)
if self.in_proj_bias is not None and self.in_proj_weight is not None:
return torch._native_multi_head_attention(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.out_proj.weight,
self.out_proj.bias,
merged_mask,
need_weights,
average_attn_weights,
mask_type,
)
any_nested = query.is_nested or key.is_nested or value.is_nested
assert not any_nested, (
"MultiheadAttention does not support NestedTensor outside of its fast path. "
+ f"The fast path was not hit because {why_not_fast_path}"
)
if self.batch_first and is_batched:
# make sure that the transpose op does not affect the "is" property
if key is value:
if query is key:
query = key = value = query.transpose(1, 0)
else:
query, key = (x.transpose(1, 0) for x in (query, key))
value = key
else:
query, key, value = (x.transpose(1, 0) for x in (query, key, value))
if not self._qkv_same_embed_dim:
attn_output, attn_output_weights = F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight,
k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight,
average_attn_weights=average_attn_weights,
is_causal=is_causal,
)
else:
attn_output, attn_output_weights = F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
average_attn_weights=average_attn_weights,
is_causal=is_causal,
)
if self.batch_first and is_batched:
return attn_output.transpose(1, 0), attn_output_weights
else:
return attn_output, attn_output_weights
def merge_masks(
self,
attn_mask: Optional[Tensor],
key_padding_mask: Optional[Tensor],
query: Tensor,
) -> tuple[Optional[Tensor], Optional[int]]:
r"""Determine mask type and combine masks if necessary.
If only one mask is provided, that mask
and the corresponding mask type will be returned. If both masks are provided, they will be both
expanded to shape ``(batch_size, num_heads, seq_len, seq_len)``, combined with logical ``or``
and mask type 2 will be returned
Args:
attn_mask: attention mask of shape ``(seq_len, seq_len)``, mask type 0
key_padding_mask: padding mask of shape ``(batch_size, seq_len)``, mask type 1
query: query embeddings of shape ``(batch_size, seq_len, embed_dim)``
Returns:
merged_mask: merged mask
mask_type: merged mask type (0, 1, or 2)
"""
mask_type: Optional[int] = None
merged_mask: Optional[Tensor] = None
if key_padding_mask is not None:
mask_type = 1
merged_mask = key_padding_mask
if attn_mask is not None:
# In this branch query can't be a nested tensor, so it has a shape
batch_size, seq_len, _ = query.shape
mask_type = 2
# Always expands attn_mask to 4D
if attn_mask.dim() == 3:
attn_mask_expanded = attn_mask.view(batch_size, -1, seq_len, seq_len)
else: # attn_mask.dim() == 2:
attn_mask_expanded = attn_mask.view(1, 1, seq_len, seq_len).expand(
batch_size, self.num_heads, -1, -1
)
merged_mask = attn_mask_expanded
if key_padding_mask is not None:
key_padding_mask_expanded = key_padding_mask.view(
batch_size, 1, 1, seq_len
).expand(-1, self.num_heads, -1, -1)
merged_mask = attn_mask_expanded + key_padding_mask_expanded
# no attn_mask and no key_padding_mask, returns None, None
return merged_mask, mask_type
class PReLU(Module):
r"""Applies the element-wise PReLU function.
.. math::
\text{PReLU}(x) = \max(0,x) + a * \min(0,x)
or
.. math::
\text{PReLU}(x) =
\begin{cases}
x, & \text{ if } x \ge 0 \\
ax, & \text{ otherwise }
\end{cases}
Here :math:`a` is a learnable parameter. When called without arguments, `nn.PReLU()` uses a single
parameter :math:`a` across all input channels. If called with `nn.PReLU(nChannels)`,
a separate :math:`a` is used for each input channel.
.. note::
weight decay should not be used when learning :math:`a` for good performance.
.. note::
Channel dim is the 2nd dim of input. When input has dims < 2, then there is
no channel dim and the number of channels = 1.
Args:
num_parameters (int): number of :math:`a` to learn.
Although it takes an int as input, there is only two values are legitimate:
1, or the number of channels at input. Default: 1
init (float): the initial value of :math:`a`. Default: 0.25
Shape:
- Input: :math:`( *)` where `*` means, any number of additional
dimensions.
- Output: :math:`(*)`, same shape as the input.
Attributes:
weight (Tensor): the learnable weights of shape (:attr:`num_parameters`).
.. image:: ../scripts/activation_images/PReLU.png
Examples::
>>> m = nn.PReLU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["num_parameters"]
num_parameters: int
def __init__(
self, num_parameters: int = 1, init: float = 0.25, device=None, dtype=None
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
self.num_parameters = num_parameters
super().__init__()
self.init = init
self.weight = Parameter(torch.empty(num_parameters, **factory_kwargs))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.constant_(self.weight, self.init)
def forward(self, input: Tensor) -> Tensor:
return F.prelu(input, self.weight)
def extra_repr(self) -> str:
return f"num_parameters={self.num_parameters}"
class Softsign(Module):
r"""Applies the element-wise Softsign function.
.. math::
\text{SoftSign}(x) = \frac{x}{ 1 + |x|}
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Softsign.png
Examples::
>>> m = nn.Softsign()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
return F.softsign(input)
class Tanhshrink(Module):
r"""Applies the element-wise Tanhshrink function.
.. math::
\text{Tanhshrink}(x) = x - \tanh(x)
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Tanhshrink.png
Examples::
>>> m = nn.Tanhshrink()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
return F.tanhshrink(input)
class Softmin(Module):
r"""Applies the Softmin function to an n-dimensional input Tensor.
Rescales them so that the elements of the n-dimensional output Tensor
lie in the range `[0, 1]` and sum to 1.
Softmin is defined as:
.. math::
\text{Softmin}(x_{i}) = \frac{\exp(-x_i)}{\sum_j \exp(-x_j)}
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
Args:
dim (int): A dimension along which Softmin will be computed (so every slice
along dim will sum to 1).
Returns:
a Tensor of the same dimension and shape as the input, with
values in the range [0, 1]
Examples::
>>> m = nn.Softmin(dim=1)
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
__constants__ = ["dim"]
dim: Optional[int]
def __init__(self, dim: Optional[int] = None) -> None:
super().__init__()
self.dim = dim
def __setstate__(self, state):
super().__setstate__(state)
if not hasattr(self, "dim"):
self.dim = None
def forward(self, input: Tensor) -> Tensor:
return F.softmin(input, self.dim, _stacklevel=5)
def extra_repr(self):
return f"dim={self.dim}"
class Softmax(Module):
r"""Applies the Softmax function to an n-dimensional input Tensor.
Rescales them so that the elements of the n-dimensional output Tensor
lie in the range [0,1] and sum to 1.
Softmax is defined as:
.. math::
\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
When the input Tensor is a sparse tensor then the unspecified
values are treated as ``-inf``.
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1]
Args:
dim (int): A dimension along which Softmax will be computed (so every slice
along dim will sum to 1).
.. note::
This module doesn't work directly with NLLLoss,
which expects the Log to be computed between the Softmax and itself.
Use `LogSoftmax` instead (it's faster and has better numerical properties).
Examples::
>>> m = nn.Softmax(dim=1)
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
__constants__ = ["dim"]
dim: Optional[int]
def __init__(self, dim: Optional[int] = None) -> None:
super().__init__()
self.dim = dim
def __setstate__(self, state):
super().__setstate__(state)
if not hasattr(self, "dim"):
self.dim = None
def forward(self, input: Tensor) -> Tensor:
return F.softmax(input, self.dim, _stacklevel=5)
def extra_repr(self) -> str:
return f"dim={self.dim}"
class Softmax2d(Module):
r"""Applies SoftMax over features to each spatial location.
When given an image of ``Channels x Height x Width``, it will
apply `Softmax` to each location :math:`(Channels, h_i, w_j)`
Shape:
- Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`.
- Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input)
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1]
Examples::
>>> m = nn.Softmax2d()
>>> # you softmax over the 2nd dimension
>>> input = torch.randn(2, 3, 12, 13)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
if input.dim() not in (3, 4):
raise ValueError(
f"Softmax2d: expected input to be 3D or 4D, got {input.dim()}D instead"
)
return F.softmax(input, -3, _stacklevel=5)
class LogSoftmax(Module):
r"""Applies the :math:`\log(\text{Softmax}(x))` function to an n-dimensional input Tensor.
The LogSoftmax formulation can be simplified as:
.. math::
\text{LogSoftmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
Args:
dim (int): A dimension along which LogSoftmax will be computed.
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [-inf, 0)
Examples::
>>> m = nn.LogSoftmax(dim=1)
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
__constants__ = ["dim"]
dim: Optional[int]
def __init__(self, dim: Optional[int] = None) -> None:
super().__init__()
self.dim = dim
def __setstate__(self, state):
super().__setstate__(state)
if not hasattr(self, "dim"):
self.dim = None
def forward(self, input: Tensor) -> Tensor:
return F.log_softmax(input, self.dim, _stacklevel=5)
def extra_repr(self):
return f"dim={self.dim}"
```
|
====================================================================================================================
SOURCE CODE FILE: adaptive.py
LINES: 1
SIZE: 12.39 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\adaptive.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from collections import namedtuple
from collections.abc import Sequence
import torch
import torch.nn.functional as F
from torch import Tensor
from .container import ModuleList, Sequential
from .linear import Linear
from .module import Module
__all__ = ["AdaptiveLogSoftmaxWithLoss"]
_ASMoutput = namedtuple("_ASMoutput", ["output", "loss"])
class AdaptiveLogSoftmaxWithLoss(Module):
"""Efficient softmax approximation.
As described in
`Efficient softmax approximation for GPUs by Edouard Grave, Armand Joulin,
Moustapha Ciss\u00e9, David Grangier, and Herv\u00e9 J\u00e9gou
<https://arxiv.org/abs/1609.04309>`__.
""" r"""
Adaptive softmax is an approximate strategy for training models with large
output spaces. It is most effective when the label distribution is highly
imbalanced, for example in natural language modelling, where the word
frequency distribution approximately follows the `Zipf's law`_.
Adaptive softmax partitions the labels into several clusters, according to
their frequency. These clusters may contain different number of targets
each.
Additionally, clusters containing less frequent labels assign lower
dimensional embeddings to those labels, which speeds up the computation.
For each minibatch, only clusters for which at least one target is
present are evaluated.
The idea is that the clusters which are accessed frequently
(like the first one, containing most frequent labels), should also be cheap
to compute -- that is, contain a small number of assigned labels.
We highly recommend taking a look at the original paper for more details.
* :attr:`cutoffs` should be an ordered Sequence of integers sorted
in the increasing order.
It controls number of clusters and the partitioning of targets into
clusters. For example setting ``cutoffs = [10, 100, 1000]``
means that first `10` targets will be assigned
to the 'head' of the adaptive softmax, targets `11, 12, ..., 100` will be
assigned to the first cluster, and targets `101, 102, ..., 1000` will be
assigned to the second cluster, while targets
`1001, 1002, ..., n_classes - 1` will be assigned
to the last, third cluster.
* :attr:`div_value` is used to compute the size of each additional cluster,
which is given as
:math:`\left\lfloor\frac{\texttt{in\_features}}{\texttt{div\_value}^{idx}}\right\rfloor`,
where :math:`idx` is the cluster index (with clusters
for less frequent words having larger indices,
and indices starting from :math:`1`).
* :attr:`head_bias` if set to True, adds a bias term to the 'head' of the
adaptive softmax. See paper for details. Set to False in the official
implementation.
.. warning::
Labels passed as inputs to this module should be sorted according to
their frequency. This means that the most frequent label should be
represented by the index `0`, and the least frequent
label should be represented by the index `n_classes - 1`.
.. note::
This module returns a ``NamedTuple`` with ``output``
and ``loss`` fields. See further documentation for details.
.. note::
To compute log-probabilities for all classes, the ``log_prob``
method can be used.
Args:
in_features (int): Number of features in the input tensor
n_classes (int): Number of classes in the dataset
cutoffs (Sequence): Cutoffs used to assign targets to their buckets
div_value (float, optional): value used as an exponent to compute sizes
of the clusters. Default: 4.0
head_bias (bool, optional): If ``True``, adds a bias term to the 'head' of the
adaptive softmax. Default: ``False``
Returns:
``NamedTuple`` with ``output`` and ``loss`` fields:
* **output** is a Tensor of size ``N`` containing computed target
log probabilities for each example
* **loss** is a Scalar representing the computed negative
log likelihood loss
Shape:
- input: :math:`(N, \texttt{in\_features})` or :math:`(\texttt{in\_features})`
- target: :math:`(N)` or :math:`()` where each value satisfies :math:`0 <= \texttt{target[i]} <= \texttt{n\_classes}`
- output1: :math:`(N)` or :math:`()`
- output2: ``Scalar``
.. _Zipf's law: https://en.wikipedia.org/wiki/Zipf%27s_law
"""
in_features: int
n_classes: int
cutoffs: list[int]
div_value: float
head_bias: bool
head: Linear
tail: ModuleList
def __init__(
self,
in_features: int,
n_classes: int,
cutoffs: Sequence[int],
div_value: float = 4.0,
head_bias: bool = False,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
cutoffs = list(cutoffs)
if len(cutoffs) == 0:
raise ValueError("cutoffs should be a sequence of length larger than 0")
if (
(cutoffs != sorted(cutoffs))
or (min(cutoffs) <= 0)
or (max(cutoffs) > (n_classes - 1))
or (len(set(cutoffs)) != len(cutoffs))
or any(int(c) != c for c in cutoffs)
):
raise ValueError(
"cutoffs should be a sequence of unique, positive "
"integers sorted in an increasing order, where "
"each value is between 1 and n_classes-1"
)
self.in_features = in_features
self.n_classes = n_classes
self.cutoffs = cutoffs + [n_classes]
self.div_value = div_value
self.head_bias = head_bias
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
self.head = Linear(
self.in_features, self.head_size, bias=self.head_bias, **factory_kwargs
)
self.tail = ModuleList()
for i in range(self.n_clusters):
hsz = int(self.in_features // (self.div_value ** (i + 1)))
osz = self.cutoffs[i + 1] - self.cutoffs[i]
projection = Sequential(
Linear(self.in_features, hsz, bias=False, **factory_kwargs),
Linear(hsz, osz, bias=False, **factory_kwargs),
)
self.tail.append(projection)
def reset_parameters(self) -> None:
self.head.reset_parameters()
for i2h, h2o in self.tail:
i2h.reset_parameters()
h2o.reset_parameters()
def forward(self, input_: Tensor, target_: Tensor) -> _ASMoutput:
targ_dim = target_.dim()
if targ_dim == 1:
if input_.size(0) != target_.size(0):
raise RuntimeError(
"Input and target should have the same size "
"in the batch dimension."
)
if input_.dim() != 2:
raise RuntimeError(
"1D target tensor expects 2D input tensors, "
"but found inputs with size",
input_.size(),
)
elif targ_dim == 0:
if input_.dim() != 1:
raise RuntimeError(
"0D target tensor expects 1D input tensors, "
"but found inputs with size",
input_.size(),
)
else:
raise RuntimeError(
"0D or 1D target tensor expected, multi-target not supported"
)
is_batched = targ_dim > 0
input = input_ if is_batched else input_.unsqueeze(0)
target = target_ if is_batched else target_.unsqueeze(0)
used_rows = 0
batch_size = target.size(0)
output = input.new_zeros(batch_size)
gather_inds = target.new_empty(batch_size)
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
low_idx = cutoff_values[i]
high_idx = cutoff_values[i + 1]
target_mask = (target >= low_idx) & (target < high_idx)
row_indices = target_mask.nonzero().squeeze()
if row_indices.numel() == 0:
continue
if i == 0:
gather_inds.index_copy_(0, row_indices, target[target_mask])
else:
relative_target = target[target_mask] - low_idx
input_subset = input.index_select(0, row_indices)
cluster_output = self.tail[i - 1](input_subset)
cluster_index = self.shortlist_size + i - 1
gather_inds.index_fill_(0, row_indices, cluster_index)
cluster_logprob = F.log_softmax(cluster_output, dim=1)
local_logprob = cluster_logprob.gather(1, relative_target.unsqueeze(1))
output.index_copy_(0, row_indices, local_logprob.squeeze(1))
used_rows += row_indices.numel()
if used_rows != batch_size:
raise RuntimeError(
f"Target values should be in [0, {self.n_classes - 1}], "
f"but values in range [{target.min().item()}, {target.max().item()}] "
"were found. "
)
head_output = self.head(input)
head_logprob = F.log_softmax(head_output, dim=1)
output += head_logprob.gather(1, gather_inds.unsqueeze(1)).squeeze()
loss = (-output).mean()
if not is_batched:
output = output.squeeze(0)
return _ASMoutput(output, loss)
def _get_full_log_prob(self, input, head_output):
"""Given input tensor, and output of ``self.head``, compute the log of the full distribution."""
out = input.new_empty((head_output.size(0), self.n_classes))
head_logprob = F.log_softmax(head_output, dim=1)
out[:, : self.shortlist_size] = head_logprob[:, : self.shortlist_size]
for i, (start_idx, stop_idx) in enumerate(zip(self.cutoffs, self.cutoffs[1:])):
cluster_output = self.tail[i](input)
cluster_logprob = F.log_softmax(cluster_output, dim=1)
output_logprob = cluster_logprob + head_logprob[
:, self.shortlist_size + i
].unsqueeze(1)
out[:, start_idx:stop_idx] = output_logprob
return out
def log_prob(self, input: Tensor) -> Tensor:
r"""Compute log probabilities for all :math:`\texttt{n\_classes}`.
Args:
input (Tensor): a minibatch of examples
Returns:
log-probabilities of for each class :math:`c`
in range :math:`0 <= c <= \texttt{n\_classes}`, where :math:`\texttt{n\_classes}` is a
parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor.
Shape:
- Input: :math:`(N, \texttt{in\_features})`
- Output: :math:`(N, \texttt{n\_classes})`
"""
head_output = self.head(input)
return self._get_full_log_prob(input, head_output)
def predict(self, input: Tensor) -> Tensor:
r"""Return the class with the highest probability for each example in the input minibatch.
This is equivalent to ``self.log_prob(input).argmax(dim=1)``, but is more efficient in some cases.
Args:
input (Tensor): a minibatch of examples
Returns:
output (Tensor): a class with the highest probability for each example
Shape:
- Input: :math:`(N, \texttt{in\_features})`
- Output: :math:`(N)`
"""
head_output = self.head(input)
output = torch.argmax(head_output, dim=1)
not_in_shortlist = output >= self.shortlist_size
all_in_shortlist = not (not_in_shortlist.any())
if all_in_shortlist:
return output
elif not_in_shortlist.all():
log_prob = self._get_full_log_prob(input, head_output)
return torch.argmax(log_prob, dim=1)
else:
log_prob = self._get_full_log_prob(
input[not_in_shortlist], head_output[not_in_shortlist]
)
output[not_in_shortlist] = torch.argmax(log_prob, dim=1)
return output
```
|
=====================================================================================================================
SOURCE CODE FILE: batchnorm.py
LINES: 1
SIZE: 38.36 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\batchnorm.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from typing import Any, Optional
import torch
from torch import Tensor
from torch.nn import functional as F, init
from torch.nn.parameter import Parameter, UninitializedBuffer, UninitializedParameter
from ._functions import SyncBatchNorm as sync_batch_norm
from .lazy import LazyModuleMixin
from .module import Module
__all__ = [
"BatchNorm1d",
"LazyBatchNorm1d",
"BatchNorm2d",
"LazyBatchNorm2d",
"BatchNorm3d",
"LazyBatchNorm3d",
"SyncBatchNorm",
]
class _NormBase(Module):
"""Common base of _InstanceNorm and _BatchNorm."""
_version = 2
__constants__ = ["track_running_stats", "momentum", "eps", "num_features", "affine"]
num_features: int
eps: float
momentum: Optional[float]
affine: bool
track_running_stats: bool
# WARNING: weight and bias purposely not defined here.
# See https://github.com/pytorch/pytorch/issues/39670
def __init__(
self,
num_features: int,
eps: float = 1e-5,
momentum: Optional[float] = 0.1,
affine: bool = True,
track_running_stats: bool = True,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
if self.affine:
self.weight = Parameter(torch.empty(num_features, **factory_kwargs))
self.bias = Parameter(torch.empty(num_features, **factory_kwargs))
else:
self.register_parameter("weight", None)
self.register_parameter("bias", None)
if self.track_running_stats:
self.register_buffer(
"running_mean", torch.zeros(num_features, **factory_kwargs)
)
self.register_buffer(
"running_var", torch.ones(num_features, **factory_kwargs)
)
self.running_mean: Optional[Tensor]
self.running_var: Optional[Tensor]
self.register_buffer(
"num_batches_tracked",
torch.tensor(
0,
dtype=torch.long,
**{k: v for k, v in factory_kwargs.items() if k != "dtype"},
),
)
self.num_batches_tracked: Optional[Tensor]
else:
self.register_buffer("running_mean", None)
self.register_buffer("running_var", None)
self.register_buffer("num_batches_tracked", None)
self.reset_parameters()
def reset_running_stats(self) -> None:
if self.track_running_stats:
# running_mean/running_var/num_batches... are registered at runtime depending
# if self.track_running_stats is on
self.running_mean.zero_() # type: ignore[union-attr]
self.running_var.fill_(1) # type: ignore[union-attr]
self.num_batches_tracked.zero_() # type: ignore[union-attr,operator]
def reset_parameters(self) -> None:
self.reset_running_stats()
if self.affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def _check_input_dim(self, input):
raise NotImplementedError
def extra_repr(self):
return (
"{num_features}, eps={eps}, momentum={momentum}, affine={affine}, "
"track_running_stats={track_running_stats}".format(**self.__dict__)
)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if (version is None or version < 2) and self.track_running_stats:
# at version 2: added num_batches_tracked buffer
# this should have a default value of 0
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key not in state_dict:
state_dict[num_batches_tracked_key] = (
self.num_batches_tracked
if self.num_batches_tracked is not None
and self.num_batches_tracked.device != torch.device("meta")
else torch.tensor(0, dtype=torch.long)
)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
class _BatchNorm(_NormBase):
def __init__(
self,
num_features: int,
eps: float = 1e-5,
momentum: Optional[float] = 0.1,
affine: bool = True,
track_running_stats: bool = True,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(
num_features, eps, momentum, affine, track_running_stats, **factory_kwargs
)
def forward(self, input: Tensor) -> Tensor:
self._check_input_dim(input)
# exponential_average_factor is set to self.momentum
# (when it is available) only so that it gets updated
# in ONNX graph when this node is exported to ONNX.
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None: # type: ignore[has-type]
self.num_batches_tracked.add_(1) # type: ignore[has-type]
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
r"""
Decide whether the mini-batch stats should be used for normalization rather than the buffers.
Mini-batch stats are used in training mode, and in eval mode when buffers are None.
"""
if self.training:
bn_training = True
else:
bn_training = (self.running_mean is None) and (self.running_var is None)
r"""
Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
used for normalization (i.e. in eval mode when buffers are not None).
"""
return F.batch_norm(
input,
# If buffers are not to be tracked, ensure that they won't be updated
self.running_mean
if not self.training or self.track_running_stats
else None,
self.running_var if not self.training or self.track_running_stats else None,
self.weight,
self.bias,
bn_training,
exponential_average_factor,
self.eps,
)
class _LazyNormBase(LazyModuleMixin, _NormBase):
weight: UninitializedParameter # type: ignore[assignment]
bias: UninitializedParameter # type: ignore[assignment]
def __init__(
self,
eps=1e-5,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(
# affine and track_running_stats are hardcoded to False to
# avoid creating tensors that will soon be overwritten.
0,
eps,
momentum,
False,
False,
**factory_kwargs,
)
self.affine = affine
self.track_running_stats = track_running_stats
if self.affine:
self.weight = UninitializedParameter(**factory_kwargs)
self.bias = UninitializedParameter(**factory_kwargs)
if self.track_running_stats:
self.running_mean = UninitializedBuffer(**factory_kwargs)
self.running_var = UninitializedBuffer(**factory_kwargs)
self.num_batches_tracked = torch.tensor(
0,
dtype=torch.long,
**{k: v for k, v in factory_kwargs.items() if k != "dtype"},
)
def reset_parameters(self) -> None:
if not self.has_uninitialized_params() and self.num_features != 0:
super().reset_parameters()
def initialize_parameters(self, input) -> None: # type: ignore[override]
if self.has_uninitialized_params():
self.num_features = input.shape[1]
if self.affine:
assert isinstance(self.weight, UninitializedParameter)
assert isinstance(self.bias, UninitializedParameter)
self.weight.materialize((self.num_features,))
self.bias.materialize((self.num_features,))
if self.track_running_stats:
self.running_mean.materialize( # type:ignore[union-attr]
(self.num_features,)
)
self.running_var.materialize( # type:ignore[union-attr]
(self.num_features,)
)
self.reset_parameters()
class BatchNorm1d(_BatchNorm):
r"""Applies Batch Normalization over a 2D or 3D input.
Method described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
.. math::
y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the number of features or channels of the input). By default, the
elements of :math:`\gamma` are set to 1 and the elements of :math:`\beta` are set to 0.
At train time in the forward pass, the variance is calculated via the biased estimator,
equivalent to ``torch.var(input, unbiased=False)``. However, the value stored in the
moving average of the variance is calculated via the unbiased estimator, equivalent to
``torch.var(input, unbiased=True)``.
Also by default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal Batch Normalization.
Args:
num_features: number of features or channels :math:`C` of the input
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics, and initializes statistics
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
When these buffers are ``None``, this module always uses batch statistics.
in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C)` or :math:`(N, C, L)`, where :math:`N` is the batch size,
:math:`C` is the number of features or channels, and :math:`L` is the sequence length
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm1d(100, affine=False)
>>> input = torch.randn(20, 100)
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError(f"expected 2D or 3D input (got {input.dim()}D input)")
class LazyBatchNorm1d(_LazyNormBase, _BatchNorm):
r"""A :class:`torch.nn.BatchNorm1d` module with lazy initialization.
Lazy initialization based on the ``num_features`` argument of the :class:`BatchNorm1d` that is inferred
from the ``input.size(1)``.
The attributes that will be lazily initialized are `weight`, `bias`,
`running_mean` and `running_var`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics, and initializes statistics
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
When these buffers are ``None``, this module always uses batch statistics.
in both training and eval modes. Default: ``True``
"""
cls_to_become = BatchNorm1d # type: ignore[assignment]
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError(f"expected 2D or 3D input (got {input.dim()}D input)")
class BatchNorm2d(_BatchNorm):
r"""Applies Batch Normalization over a 4D input.
4D is a mini-batch of 2D inputs
with additional channel dimension. Method described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size). By default, the elements of :math:`\gamma` are set
to 1 and the elements of :math:`\beta` are set to 0. At train time in the forward pass, the
standard-deviation is calculated via the biased estimator, equivalent to
``torch.var(input, unbiased=False)``. However, the value stored in the moving average of the
standard-deviation is calculated via the unbiased estimator, equivalent to
``torch.var(input, unbiased=True)``.
Also by default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial Batch Normalization.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, H, W)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics, and initializes statistics
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
When these buffers are ``None``, this module always uses batch statistics.
in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm2d(100, affine=False)
>>> input = torch.randn(20, 100, 35, 45)
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError(f"expected 4D input (got {input.dim()}D input)")
class LazyBatchNorm2d(_LazyNormBase, _BatchNorm):
r"""A :class:`torch.nn.BatchNorm2d` module with lazy initialization.
Lazy initialization is done for the ``num_features`` argument of the :class:`BatchNorm2d` that is inferred
from the ``input.size(1)``.
The attributes that will be lazily initialized are `weight`, `bias`,
`running_mean` and `running_var`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics, and initializes statistics
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
When these buffers are ``None``, this module always uses batch statistics.
in both training and eval modes. Default: ``True``
"""
cls_to_become = BatchNorm2d # type: ignore[assignment]
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError(f"expected 4D input (got {input.dim()}D input)")
class BatchNorm3d(_BatchNorm):
r"""Applies Batch Normalization over a 5D input.
5D is a mini-batch of 3D inputs with additional channel dimension as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size). By default, the elements of :math:`\gamma` are set
to 1 and the elements of :math:`\beta` are set to 0. At train time in the forward pass, the
standard-deviation is calculated via the biased estimator, equivalent to
``torch.var(input, unbiased=False)``. However, the value stored in the moving average of the
standard-deviation is calculated via the unbiased estimator, equivalent to
``torch.var(input, unbiased=True)``.
Also by default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric Batch Normalization
or Spatio-temporal Batch Normalization.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, D, H, W)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics, and initializes statistics
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
When these buffers are ``None``, this module always uses batch statistics.
in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm3d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm3d(100, affine=False)
>>> input = torch.randn(20, 100, 35, 45, 10)
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError(f"expected 5D input (got {input.dim()}D input)")
class LazyBatchNorm3d(_LazyNormBase, _BatchNorm):
r"""A :class:`torch.nn.BatchNorm3d` module with lazy initialization.
Lazy initialization is done for the ``num_features`` argument of the :class:`BatchNorm3d` that is inferred
from the ``input.size(1)``.
The attributes that will be lazily initialized are `weight`, `bias`,
`running_mean` and `running_var`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics, and initializes statistics
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
When these buffers are ``None``, this module always uses batch statistics.
in both training and eval modes. Default: ``True``
"""
cls_to_become = BatchNorm3d # type: ignore[assignment]
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError(f"expected 5D input (got {input.dim()}D input)")
class SyncBatchNorm(_BatchNorm):
r"""Applies Batch Normalization over a N-Dimensional input.
The N-D input is a mini-batch of [N-2]D inputs with additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over all
mini-batches of the same process groups. :math:`\gamma` and :math:`\beta`
are learnable parameter vectors of size `C` (where `C` is the input size).
By default, the elements of :math:`\gamma` are sampled from
:math:`\mathcal{U}(0, 1)` and the elements of :math:`\beta` are set to 0.
The standard-deviation is calculated via the biased estimator, equivalent to
`torch.var(input, unbiased=False)`.
Also by default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done for each channel in the ``C`` dimension, computing
statistics on ``(N, +)`` slices, it's common terminology to call this Volumetric Batch
Normalization or Spatio-temporal Batch Normalization.
Currently :class:`SyncBatchNorm` only supports
:class:`~torch.nn.DistributedDataParallel` (DDP) with single GPU per process. Use
:meth:`torch.nn.SyncBatchNorm.convert_sync_batchnorm()` to convert
:attr:`BatchNorm*D` layer to :class:`SyncBatchNorm` before wrapping
Network with DDP.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, +)`
eps: a value added to the denominator for numerical stability.
Default: ``1e-5``
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics, and initializes statistics
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
When these buffers are ``None``, this module always uses batch statistics.
in both training and eval modes. Default: ``True``
process_group: synchronization of stats happen within each process group
individually. Default behavior is synchronization across the whole
world
Shape:
- Input: :math:`(N, C, +)`
- Output: :math:`(N, C, +)` (same shape as input)
.. note::
Synchronization of batchnorm statistics occurs only while training, i.e.
synchronization is disabled when ``model.eval()`` is set or if
``self.training`` is otherwise ``False``.
Examples::
>>> # xdoctest: +SKIP
>>> # With Learnable Parameters
>>> m = nn.SyncBatchNorm(100)
>>> # creating process group (optional)
>>> # ranks is a list of int identifying rank ids.
>>> ranks = list(range(8))
>>> r1, r2 = ranks[:4], ranks[4:]
>>> # Note: every rank calls into new_group for every
>>> # process group created, even if that rank is not
>>> # part of the group.
>>> process_groups = [torch.distributed.new_group(pids) for pids in [r1, r2]]
>>> process_group = process_groups[0 if dist.get_rank() <= 3 else 1]
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm3d(100, affine=False, process_group=process_group)
>>> input = torch.randn(20, 100, 35, 45, 10)
>>> output = m(input)
>>> # network is nn.BatchNorm layer
>>> sync_bn_network = nn.SyncBatchNorm.convert_sync_batchnorm(network, process_group)
>>> # only single gpu per process is currently supported
>>> ddp_sync_bn_network = torch.nn.parallel.DistributedDataParallel(
>>> sync_bn_network,
>>> device_ids=[args.local_rank],
>>> output_device=args.local_rank)
"""
def __init__(
self,
num_features: int,
eps: float = 1e-5,
momentum: Optional[float] = 0.1,
affine: bool = True,
track_running_stats: bool = True,
process_group: Optional[Any] = None,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(
num_features, eps, momentum, affine, track_running_stats, **factory_kwargs
)
self.process_group = process_group
def _check_input_dim(self, input):
if input.dim() < 2:
raise ValueError(f"expected at least 2D input (got {input.dim()}D input)")
def _check_non_zero_input_channels(self, input):
if input.size(1) == 0:
raise ValueError(
"SyncBatchNorm number of input channels should be non-zero"
)
def forward(self, input: Tensor) -> Tensor:
self._check_input_dim(input)
self._check_non_zero_input_channels(input)
# exponential_average_factor is set to self.momentum
# (when it is available) only so that it gets updated
# in ONNX graph when this node is exported to ONNX.
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
assert self.num_batches_tracked is not None
self.num_batches_tracked.add_(1)
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / self.num_batches_tracked.item()
else: # use exponential moving average
exponential_average_factor = self.momentum
r"""
Decide whether the mini-batch stats should be used for normalization rather than the buffers.
Mini-batch stats are used in training mode, and in eval mode when buffers are None.
"""
if self.training:
bn_training = True
else:
bn_training = (self.running_mean is None) and (self.running_var is None)
r"""
Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
used for normalization (i.e. in eval mode when buffers are not None).
"""
# If buffers are not to be tracked, ensure that they won't be updated
running_mean = (
self.running_mean if not self.training or self.track_running_stats else None
)
running_var = (
self.running_var if not self.training or self.track_running_stats else None
)
# Don't sync batchnorm stats in inference mode (model.eval()).
need_sync = (
bn_training
and self.training
and torch.distributed.is_available()
and torch.distributed.is_initialized()
)
if need_sync:
# currently only GPU/PrivateUse1 input is supported
if input.device.type not in [
"cuda",
"xpu",
torch._C._get_privateuse1_backend_name(),
]:
raise ValueError(
"SyncBatchNorm expected input tensor to be on GPU or XPU or "
f"{torch._C._get_privateuse1_backend_name()}"
)
process_group = torch.distributed.group.WORLD
if self.process_group:
process_group = self.process_group
world_size = torch.distributed.get_world_size(process_group)
need_sync = world_size > 1
# fallback to framework BN when synchronization is not necessary
if not need_sync:
return F.batch_norm(
input,
running_mean,
running_var,
self.weight,
self.bias,
bn_training,
exponential_average_factor,
self.eps,
)
else:
assert bn_training
return sync_batch_norm.apply(
input,
self.weight,
self.bias,
running_mean,
running_var,
self.eps,
exponential_average_factor,
process_group, # type: ignore[possibly-undefined]
world_size, # type: ignore[possibly-undefined]
)
@classmethod
def convert_sync_batchnorm(cls, module, process_group=None):
r"""Converts all :attr:`BatchNorm*D` layers in the model to :class:`torch.nn.SyncBatchNorm` layers.
Args:
module (nn.Module): module containing one or more :attr:`BatchNorm*D` layers
process_group (optional): process group to scope synchronization,
default is the whole world
Returns:
The original :attr:`module` with the converted :class:`torch.nn.SyncBatchNorm`
layers. If the original :attr:`module` is a :attr:`BatchNorm*D` layer,
a new :class:`torch.nn.SyncBatchNorm` layer object will be returned
instead.
Example::
>>> # Network with nn.BatchNorm layer
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
>>> module = torch.nn.Sequential(
>>> torch.nn.Linear(20, 100),
>>> torch.nn.BatchNorm1d(100),
>>> ).cuda()
>>> # creating process group (optional)
>>> # ranks is a list of int identifying rank ids.
>>> ranks = list(range(8))
>>> r1, r2 = ranks[:4], ranks[4:]
>>> # Note: every rank calls into new_group for every
>>> # process group created, even if that rank is not
>>> # part of the group.
>>> # xdoctest: +SKIP("distributed")
>>> process_groups = [torch.distributed.new_group(pids) for pids in [r1, r2]]
>>> process_group = process_groups[0 if dist.get_rank() <= 3 else 1]
>>> sync_bn_module = torch.nn.SyncBatchNorm.convert_sync_batchnorm(module, process_group)
"""
module_output = module
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module_output = torch.nn.SyncBatchNorm(
module.num_features,
module.eps,
module.momentum,
module.affine,
module.track_running_stats,
process_group,
)
if module.affine:
with torch.no_grad():
module_output.weight = module.weight
module_output.bias = module.bias
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = module.num_batches_tracked
module_output.training = module.training
if hasattr(module, "qconfig"):
module_output.qconfig = module.qconfig
for name, child in module.named_children():
module_output.add_module(
name, cls.convert_sync_batchnorm(child, process_group)
)
del module
return module_output
```
|
==========================================================================================================================
SOURCE CODE FILE: channelshuffle.py
LINES: 1
SIZE: 1.57 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\channelshuffle.py
ENCODING: utf-8
```py
import torch.nn.functional as F
from torch import Tensor
from .module import Module
__all__ = ["ChannelShuffle"]
class ChannelShuffle(Module):
r"""Divides and rearranges the channels in a tensor.
This operation divides the channels in a tensor of shape :math:`(N, C, *)`
into g groups as :math:`(N, \frac{C}{g}, g, *)` and shuffles them,
while retaining the original tensor shape in the final output.
Args:
groups (int): number of groups to divide channels in.
Examples::
>>> channel_shuffle = nn.ChannelShuffle(2)
>>> input = torch.arange(1, 17, dtype=torch.float32).view(1, 4, 2, 2)
>>> input
tensor([[[[ 1., 2.],
[ 3., 4.]],
[[ 5., 6.],
[ 7., 8.]],
[[ 9., 10.],
[11., 12.]],
[[13., 14.],
[15., 16.]]]])
>>> output = channel_shuffle(input)
>>> output
tensor([[[[ 1., 2.],
[ 3., 4.]],
[[ 9., 10.],
[11., 12.]],
[[ 5., 6.],
[ 7., 8.]],
[[13., 14.],
[15., 16.]]]])
"""
__constants__ = ["groups"]
groups: int
def __init__(self, groups: int) -> None:
super().__init__()
self.groups = groups
def forward(self, input: Tensor) -> Tensor:
return F.channel_shuffle(input, self.groups)
def extra_repr(self) -> str:
return f"groups={self.groups}"
```
|
=====================================================================================================================
SOURCE CODE FILE: container.py
LINES: 9
SIZE: 35.12 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\container.py
ENCODING: utf-8
```py
# mypy: allow-untyped-decorators
# mypy: allow-untyped-defs
import operator
from collections import abc as container_abcs, OrderedDict
from collections.abc import Iterable, Iterator, Mapping
from itertools import chain, islice
from typing import Any, Optional, overload, TypeVar, Union
from typing_extensions import deprecated, Self
import torch
from torch._jit_internal import _copy_to_script_wrapper
from torch.nn.parameter import Parameter
from .module import Module
__all__ = [
"Container",
"Sequential",
"ModuleList",
"ModuleDict",
"ParameterList",
"ParameterDict",
]
T = TypeVar("T", bound=Module)
# Copied from torch.nn.modules.module, required for a custom __repr__ for ModuleList
def _addindent(s_, numSpaces):
s = s_.split("\n")
# don't do anything for single-line stuff
if len(s) == 1:
return s_
first = s.pop(0)
s = [(numSpaces * " ") + line for line in s]
s = "\n".join(s)
s = first + "\n" + s
return s
@deprecated(
"`nn.Container` is deprecated. "
"All of it's functionality is now implemented in `nn.Module`. Subclass that instead.",
category=FutureWarning,
)
class Container(Module):
def __init__(self, **kwargs: Any) -> None:
super().__init__()
for key, value in kwargs.items():
self.add_module(key, value)
class Sequential(Module):
r"""A sequential container.
Modules will be added to it in the order they are passed in the
constructor. Alternatively, an ``OrderedDict`` of modules can be
passed in. The ``forward()`` method of ``Sequential`` accepts any
input and forwards it to the first module it contains. It then
"chains" outputs to inputs sequentially for each subsequent module,
finally returning the output of the last module.
The value a ``Sequential`` provides over manually calling a sequence
of modules is that it allows treating the whole container as a
single module, such that performing a transformation on the
``Sequential`` applies to each of the modules it stores (which are
each a registered submodule of the ``Sequential``).
What's the difference between a ``Sequential`` and a
:class:`torch.nn.ModuleList`? A ``ModuleList`` is exactly what it
sounds like--a list for storing ``Module`` s! On the other hand,
the layers in a ``Sequential`` are connected in a cascading way.
Example::
# Using Sequential to create a small model. When `model` is run,
# input will first be passed to `Conv2d(1,20,5)`. The output of
# `Conv2d(1,20,5)` will be used as the input to the first
# `ReLU`; the output of the first `ReLU` will become the input
# for `Conv2d(20,64,5)`. Finally, the output of
# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
model = nn.Sequential(
nn.Conv2d(1,20,5),
nn.ReLU(),
nn.Conv2d(20,64,5),
nn.ReLU()
)
# Using Sequential with OrderedDict. This is functionally the
# same as the above code
model = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(1,20,5)),
('relu1', nn.ReLU()),
('conv2', nn.Conv2d(20,64,5)),
('relu2', nn.ReLU())
]))
"""
_modules: dict[str, Module] # type: ignore[assignment]
@overload
def __init__(self, *args: Module) -> None:
...
@overload
def __init__(self, arg: "OrderedDict[str, Module]") -> None:
...
def __init__(self, *args):
super().__init__()
if len(args) == 1 and isinstance(args[0], OrderedDict):
for key, module in args[0].items():
self.add_module(key, module)
else:
for idx, module in enumerate(args):
self.add_module(str(idx), module)
def _get_item_by_idx(self, iterator, idx) -> T: # type: ignore[misc, type-var]
"""Get the idx-th item of the iterator."""
size = len(self)
idx = operator.index(idx)
if not -size <= idx < size:
raise IndexError(f"index {idx} is out of range")
idx %= size
return next(islice(iterator, idx, None))
@_copy_to_script_wrapper
def __getitem__(self, idx: Union[slice, int]) -> Union["Sequential", T]:
if isinstance(idx, slice):
return self.__class__(OrderedDict(list(self._modules.items())[idx]))
else:
return self._get_item_by_idx(self._modules.values(), idx)
def __setitem__(self, idx: int, module: Module) -> None:
key: str = self._get_item_by_idx(self._modules.keys(), idx)
return setattr(self, key, module)
def __delitem__(self, idx: Union[slice, int]) -> None:
if isinstance(idx, slice):
for key in list(self._modules.keys())[idx]:
delattr(self, key)
else:
key = self._get_item_by_idx(self._modules.keys(), idx)
delattr(self, key)
# To preserve numbering
str_indices = [str(i) for i in range(len(self._modules))]
self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))
@_copy_to_script_wrapper
def __len__(self) -> int:
return len(self._modules)
def __add__(self, other) -> "Sequential":
if isinstance(other, Sequential):
ret = Sequential()
for layer in self:
ret.append(layer)
for layer in other:
ret.append(layer)
return ret
else:
raise ValueError(
"add operator supports only objects "
f"of Sequential class, but {str(type(other))} is given."
)
def pop(self, key: Union[int, slice]) -> Module:
v = self[key]
del self[key]
return v
def __iadd__(self, other) -> Self:
if isinstance(other, Sequential):
offset = len(self)
for i, module in enumerate(other):
self.add_module(str(i + offset), module)
return self
else:
raise ValueError(
"add operator supports only objects "
f"of Sequential class, but {str(type(other))} is given."
)
def __mul__(self, other: int) -> "Sequential":
if not isinstance(other, int):
raise TypeError(
f"unsupported operand type(s) for *: {type(self)} and {type(other)}"
)
elif other <= 0:
raise ValueError(
f"Non-positive multiplication factor {other} for {type(self)}"
)
else:
combined = Sequential()
offset = 0
for _ in range(other):
for module in self:
combined.add_module(str(offset), module)
offset += 1
return combined
def __rmul__(self, other: int) -> "Sequential":
return self.__mul__(other)
def __imul__(self, other: int) -> Self:
if not isinstance(other, int):
raise TypeError(
f"unsupported operand type(s) for *: {type(self)} and {type(other)}"
)
elif other <= 0:
raise ValueError(
f"Non-positive multiplication factor {other} for {type(self)}"
)
else:
len_original = len(self)
offset = len(self)
for _ in range(other - 1):
for i in range(len_original):
self.add_module(str(i + offset), self._modules[str(i)])
offset += len_original
return self
@_copy_to_script_wrapper
def __dir__(self):
keys = super().__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
@_copy_to_script_wrapper
def __iter__(self) -> Iterator[Module]:
return iter(self._modules.values())
# NB: We can't really type check this function as the type of input
# may change dynamically (as is tested in
# TestScript.test_sequential_intermediary_types). Cannot annotate
# with Any as TorchScript expects a more precise type
def forward(self, input):
for module in self:
input = module(input)
return input
def append(self, module: Module) -> "Sequential":
r"""Append a given module to the end.
Args:
module (nn.Module): module to append
"""
self.add_module(str(len(self)), module)
return self
def insert(self, index: int, module: Module) -> "Sequential":
if not isinstance(module, Module):
raise AssertionError(f"module should be of type: {Module}")
n = len(self._modules)
if not (-n <= index <= n):
raise IndexError(f"Index out of range: {index}")
if index < 0:
index += n
for i in range(n, index, -1):
self._modules[str(i)] = self._modules[str(i - 1)]
self._modules[str(index)] = module
return self
def extend(self, sequential) -> "Sequential":
for layer in sequential:
self.append(layer)
return self
class ModuleList(Module):
r"""Holds submodules in a list.
:class:`~torch.nn.ModuleList` can be indexed like a regular Python list, but
modules it contains are properly registered, and will be visible by all
:class:`~torch.nn.Module` methods.
Args:
modules (iterable, optional): an iterable of modules to add
Example::
class MyModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)])
def forward(self, x):
# ModuleList can act as an iterable, or be indexed using ints
for i, l in enumerate(self.linears):
x = self.linears[i // 2](x) + l(x)
return x
"""
_modules: dict[str, Module] # type: ignore[assignment]
def __init__(self, modules: Optional[Iterable[Module]] = None) -> None:
super().__init__()
if modules is not None:
self += modules
def _get_abs_string_index(self, idx):
"""Get the absolute index for the list of modules."""
idx = operator.index(idx)
if not (-len(self) <= idx < len(self)):
raise IndexError(f"index {idx} is out of range")
if idx < 0:
idx += len(self)
return str(idx)
@overload
def __getitem__(self, idx: slice) -> "ModuleList":
...
@overload
def __getitem__(self, idx: int) -> Module:
...
@_copy_to_script_wrapper
def __getitem__(self, idx: Union[int, slice]) -> Union[Module, "ModuleList"]:
if isinstance(idx, slice):
return self.__class__(list(self._modules.values())[idx])
else:
return self._modules[self._get_abs_string_index(idx)]
def __setitem__(self, idx: int, module: Module) -> None:
idx = self._get_abs_string_index(idx)
return setattr(self, str(idx), module)
def __delitem__(self, idx: Union[int, slice]) -> None:
if isinstance(idx, slice):
for k in range(len(self._modules))[idx]:
delattr(self, str(k))
else:
delattr(self, self._get_abs_string_index(idx))
# To preserve numbering, self._modules is being reconstructed with modules after deletion
str_indices = [str(i) for i in range(len(self._modules))]
self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))
@_copy_to_script_wrapper
def __len__(self) -> int:
return len(self._modules)
@_copy_to_script_wrapper
def __iter__(self) -> Iterator[Module]:
return iter(self._modules.values())
def __iadd__(self, modules: Iterable[Module]) -> Self:
return self.extend(modules)
def __add__(self, other: Iterable[Module]) -> "ModuleList":
combined = ModuleList()
for i, module in enumerate(chain(self, other)):
combined.add_module(str(i), module)
return combined
def __repr__(self):
"""Return a custom repr for ModuleList that compresses repeated module representations."""
list_of_reprs = [repr(item) for item in self]
if len(list_of_reprs) == 0:
return self._get_name() + "()"
start_end_indices = [[0, 0]]
repeated_blocks = [list_of_reprs[0]]
for i, r in enumerate(list_of_reprs[1:], 1):
if r == repeated_blocks[-1]:
start_end_indices[-1][1] += 1
continue
start_end_indices.append([i, i])
repeated_blocks.append(r)
lines = []
main_str = self._get_name() + "("
for (start_id, end_id), b in zip(start_end_indices, repeated_blocks):
local_repr = f"({start_id}): {b}" # default repr
if start_id != end_id:
n = end_id - start_id + 1
local_repr = f"({start_id}-{end_id}): {n} x {b}"
local_repr = _addindent(local_repr, 2)
lines.append(local_repr)
main_str += "\n " + "\n ".join(lines) + "\n"
main_str += ")"
return main_str
@_copy_to_script_wrapper
def __dir__(self):
keys = super().__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def insert(self, index: int, module: Module) -> None:
r"""Insert a given module before a given index in the list.
Args:
index (int): index to insert.
module (nn.Module): module to insert
"""
for i in range(len(self._modules), index, -1):
self._modules[str(i)] = self._modules[str(i - 1)]
self._modules[str(index)] = module
def append(self, module: Module) -> "ModuleList":
r"""Append a given module to the end of the list.
Args:
module (nn.Module): module to append
"""
self.add_module(str(len(self)), module)
return self
def pop(self, key: Union[int, slice]) -> Module:
v = self[key]
del self[key]
return v
def extend(self, modules: Iterable[Module]) -> Self:
r"""Append modules from a Python iterable to the end of the list.
Args:
modules (iterable): iterable of modules to append
"""
if not isinstance(modules, container_abcs.Iterable):
raise TypeError(
"ModuleList.extend should be called with an "
"iterable, but got " + type(modules).__name__
)
offset = len(self)
for i, module in enumerate(modules):
self.add_module(str(offset + i), module)
return self
# remove forward alltogether to fallback on Module's _forward_unimplemented
class ModuleDict(Module):
r"""Holds submodules in a dictionary.
:class:`~torch.nn.ModuleDict` can be indexed like a regular Python dictionary,
but modules it contains are properly registered, and will be visible by all
:class:`~torch.nn.Module` methods.
:class:`~torch.nn.ModuleDict` is an **ordered** dictionary that respects
* the order of insertion, and
* in :meth:`~torch.nn.ModuleDict.update`, the order of the merged
``OrderedDict``, ``dict`` (started from Python 3.6) or another
:class:`~torch.nn.ModuleDict` (the argument to
:meth:`~torch.nn.ModuleDict.update`).
Note that :meth:`~torch.nn.ModuleDict.update` with other unordered mapping
types (e.g., Python's plain ``dict`` before Python version 3.6) does not
preserve the order of the merged mapping.
Args:
modules (iterable, optional): a mapping (dictionary) of (string: module)
or an iterable of key-value pairs of type (string, module)
Example::
class MyModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.choices = nn.ModuleDict({
'conv': nn.Conv2d(10, 10, 3),
'pool': nn.MaxPool2d(3)
})
self.activations = nn.ModuleDict([
['lrelu', nn.LeakyReLU()],
['prelu', nn.PReLU()]
])
def forward(self, x, choice, act):
x = self.choices[choice](x)
x = self.activations[act](x)
return x
"""
_modules: dict[str, Module] # type: ignore[assignment]
def __init__(self, modules: Optional[Mapping[str, Module]] = None) -> None:
super().__init__()
if modules is not None:
self.update(modules)
@_copy_to_script_wrapper
def __getitem__(self, key: str) -> Module:
return self._modules[key]
def __setitem__(self, key: str, module: Module) -> None:
self.add_module(key, module)
def __delitem__(self, key: str) -> None:
del self._modules[key]
@_copy_to_script_wrapper
def __len__(self) -> int:
return len(self._modules)
@_copy_to_script_wrapper
def __iter__(self) -> Iterator[str]:
return iter(self._modules)
@_copy_to_script_wrapper
def __contains__(self, key: str) -> bool:
return key in self._modules
def clear(self) -> None:
"""Remove all items from the ModuleDict."""
self._modules.clear()
def pop(self, key: str) -> Module:
r"""Remove key from the ModuleDict and return its module.
Args:
key (str): key to pop from the ModuleDict
"""
v = self[key]
del self[key]
return v
@_copy_to_script_wrapper
def keys(self) -> Iterable[str]:
r"""Return an iterable of the ModuleDict keys."""
return self._modules.keys()
@_copy_to_script_wrapper
def items(self) -> Iterable[tuple[str, Module]]:
r"""Return an iterable of the ModuleDict key/value pairs."""
return self._modules.items()
@_copy_to_script_wrapper
def values(self) -> Iterable[Module]:
r"""Return an iterable of the ModuleDict values."""
return self._modules.values()
def update(self, modules: Mapping[str, Module]) -> None:
r"""Update the :class:`~torch.nn.ModuleDict` with key-value pairs from a mapping, overwriting existing keys.
.. note::
If :attr:`modules` is an ``OrderedDict``, a :class:`~torch.nn.ModuleDict`, or
an iterable of key-value pairs, the order of new elements in it is preserved.
Args:
modules (iterable): a mapping (dictionary) from string to :class:`~torch.nn.Module`,
or an iterable of key-value pairs of type (string, :class:`~torch.nn.Module`)
"""
if not isinstance(modules, container_abcs.Iterable):
raise TypeError(
"ModuleDict.update should be called with an "
"iterable of key/value pairs, but got " + type(modules).__name__
)
if isinstance(modules, (OrderedDict, ModuleDict, container_abcs.Mapping)):
for key, module in modules.items():
self[key] = module
else:
# modules here can be a list with two items
for j, m in enumerate(modules):
if not isinstance(m, container_abcs.Iterable):
raise TypeError(
"ModuleDict update sequence element "
"#" + str(j) + " should be Iterable; is" + type(m).__name__
)
if not len(m) == 2:
raise ValueError(
"ModuleDict update sequence element "
"#" + str(j) + " has length " + str(len(m)) + "; 2 is required"
)
# modules can be Mapping (what it's typed at), or a list: [(name1, module1), (name2, module2)]
# that's too cumbersome to type correctly with overloads, so we add an ignore here
self[m[0]] = m[1] # type: ignore[assignment]
# remove forward alltogether to fallback on Module's _forward_unimplemented
class ParameterList(Module):
r"""Holds parameters in a list.
:class:`~torch.nn.ParameterList` can be used like a regular Python
list, but Tensors that are :class:`~torch.nn.Parameter` are properly registered,
and will be visible by all :class:`~torch.nn.Module` methods.
Note that the constructor, assigning an element of the list, the
:meth:`~torch.nn.ParameterList.append` method and the :meth:`~torch.nn.ParameterList.extend`
method will convert any :class:`~torch.Tensor` into :class:`~torch.nn.Parameter`.
Args:
parameters (iterable, optional): an iterable of elements to add to the list.
Example::
class MyModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.params = nn.ParameterList([nn.Parameter(torch.randn(10, 10)) for i in range(10)])
def forward(self, x):
# ParameterList can act as an iterable, or be indexed using ints
for i, p in enumerate(self.params):
x = self.params[i // 2].mm(x) + p.mm(x)
return x
"""
def __init__(self, values: Optional[Iterable[Any]] = None) -> None:
super().__init__()
self._size = 0
if values is not None:
self += values
def _get_abs_string_index(self, idx):
"""Get the absolute index for the list of modules."""
idx = operator.index(idx)
if not (-len(self) <= idx < len(self)):
raise IndexError(f"index {idx} is out of range")
if idx < 0:
idx += len(self)
return str(idx)
@overload
def __getitem__(self, idx: int) -> Any:
...
@overload
def __getitem__(self: T, idx: slice) -> T:
...
def __getitem__(self, idx):
if isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
out = self.__class__()
for i in range(start, stop, step):
out.append(self[i])
return out
else:
idx = self._get_abs_string_index(idx)
return getattr(self, str(idx))
def __setitem__(self, idx: int, param: Any) -> None:
# Note that all other function that add an entry to the list part of
# the ParameterList end up here. So this is the only place where we need
# to wrap things into Parameter if needed.
# Objects added via setattr() are not in the list part and thus won't
# call into this function.
idx = self._get_abs_string_index(idx)
if isinstance(param, torch.Tensor) and not isinstance(param, Parameter):
param = Parameter(param)
return setattr(self, str(idx), param)
def __len__(self) -> int:
return self._size
def __iter__(self) -> Iterator[Any]:
return iter(self[i] for i in range(len(self)))
def __iadd__(self, parameters: Iterable[Any]) -> Self:
return self.extend(parameters)
def __dir__(self):
keys = super().__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def append(self, value: Any) -> "ParameterList":
"""Append a given value at the end of the list.
Args:
value (Any): value to append
"""
new_idx = len(self)
self._size += 1
self[new_idx] = value
return self
def extend(self, values: Iterable[Any]) -> Self:
"""Append values from a Python iterable to the end of the list.
Args:
values (iterable): iterable of values to append
"""
# Tensor is an iterable but we never want to unpack it here
if not isinstance(values, container_abcs.Iterable) or isinstance(
values, torch.Tensor
):
raise TypeError(
"ParameterList.extend should be called with an "
"iterable, but got " + type(values).__name__
)
for value in values:
self.append(value)
return self
def extra_repr(self) -> str:
child_lines = []
for k, p in enumerate(self):
if isinstance(p, torch.Tensor):
size_str = "x".join(str(size) for size in p.size())
if p.device.type in ["cuda", torch._C._get_privateuse1_backend_name()]:
device_str = f" ({p.device})"
else:
device_str = ""
parastr = "{} containing: [{} of size {}{}]".format(
"Parameter" if isinstance(p, Parameter) else "Tensor",
p.dtype,
size_str,
device_str,
)
child_lines.append(" (" + str(k) + "): " + parastr)
else:
child_lines.append(
" (" + str(k) + "): Object of type: " + type(p).__name__
)
tmpstr = "\n".join(child_lines)
return tmpstr
def __call__(self, *args, **kwargs):
raise RuntimeError("ParameterList should not be called.")
class ParameterDict(Module):
r"""Holds parameters in a dictionary.
ParameterDict can be indexed like a regular Python dictionary, but Parameters it
contains are properly registered, and will be visible by all Module methods.
Other objects are treated as would be done by a regular Python dictionary
:class:`~torch.nn.ParameterDict` is an **ordered** dictionary.
:meth:`~torch.nn.ParameterDict.update` with other unordered mapping
types (e.g., Python's plain ``dict``) does not preserve the order of the
merged mapping. On the other hand, ``OrderedDict`` or another :class:`~torch.nn.ParameterDict`
will preserve their ordering.
Note that the constructor, assigning an element of the dictionary and the
:meth:`~torch.nn.ParameterDict.update` method will convert any :class:`~torch.Tensor` into
:class:`~torch.nn.Parameter`.
Args:
values (iterable, optional): a mapping (dictionary) of
(string : Any) or an iterable of key-value pairs
of type (string, Any)
Example::
class MyModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.params = nn.ParameterDict({
'left': nn.Parameter(torch.randn(5, 10)),
'right': nn.Parameter(torch.randn(5, 10))
})
def forward(self, x, choice):
x = self.params[choice].mm(x)
return x
"""
def __init__(self, parameters: Any = None) -> None:
super().__init__()
self._keys: dict[str, None] = {}
if parameters is not None:
self.update(parameters)
def _key_to_attr(self, key: str) -> str:
if not isinstance(key, str):
raise TypeError(
"Index given to ParameterDict cannot be used as a key as it is "
f"not a string (type is '{type(key).__name__}'). Open an issue on "
"github if you need non-string keys."
)
else:
# Use the key as-is so that `.named_parameters()` returns the right thing
return key
def __getitem__(self, key: str) -> Any:
attr = self._key_to_attr(key)
return getattr(self, attr)
def __setitem__(self, key: str, value: Any) -> None:
# Note that all other function that add an entry to the dictionary part of
# the ParameterDict end up here. So this is the only place where we need
# to wrap things into Parameter if needed.
# Objects added via setattr() are not in the dictionary part and thus won't
# call into this function.
self._keys[key] = None
attr = self._key_to_attr(key)
if isinstance(value, torch.Tensor) and not isinstance(value, Parameter):
value = Parameter(value)
setattr(self, attr, value)
def __delitem__(self, key: str) -> None:
del self._keys[key]
attr = self._key_to_attr(key)
delattr(self, attr)
def __len__(self) -> int:
return len(self._keys)
def __iter__(self) -> Iterator[str]:
return iter(self._keys)
def __reversed__(self) -> Iterator[str]:
return reversed(list(self._keys))
def copy(self) -> "ParameterDict":
"""Return a copy of this :class:`~torch.nn.ParameterDict` instance."""
# We have to use an OrderedDict because the ParameterDict constructor
# behaves differently on plain dict vs OrderedDict
return ParameterDict(OrderedDict((k, self[k]) for k in self._keys))
def __contains__(self, key: str) -> bool:
return key in self._keys
def setdefault(self, key: str, default: Optional[Any] = None) -> Any:
"""Set the default for a key in the Parameterdict.
If key is in the ParameterDict, return its value.
If not, insert `key` with a parameter `default` and return `default`.
`default` defaults to `None`.
Args:
key (str): key to set default for
default (Any): the parameter set to the key
"""
if key not in self:
self[key] = default
return self[key]
def clear(self) -> None:
"""Remove all items from the ParameterDict."""
for k in self._keys.copy():
del self[k]
def pop(self, key: str) -> Any:
r"""Remove key from the ParameterDict and return its parameter.
Args:
key (str): key to pop from the ParameterDict
"""
v = self[key]
del self[key]
return v
def popitem(self) -> tuple[str, Any]:
"""Remove and return the last inserted `(key, parameter)` pair from the ParameterDict."""
k, _ = self._keys.popitem()
# We need the key in the _keys to be able to access/del
self._keys[k] = None
val = self[k]
del self[k]
return k, val
def get(self, key: str, default: Optional[Any] = None) -> Any:
r"""Return the parameter associated with key if present. Otherwise return default if provided, None if not.
Args:
key (str): key to get from the ParameterDict
default (Parameter, optional): value to return if key not present
"""
return self[key] if key in self else default
def fromkeys(
self, keys: Iterable[str], default: Optional[Any] = None
) -> "ParameterDict":
r"""Return a new ParameterDict with the keys provided.
Args:
keys (iterable, string): keys to make the new ParameterDict from
default (Parameter, optional): value to set for all keys
"""
return ParameterDict((k, default) for k in keys)
def keys(self) -> Iterable[str]:
r"""Return an iterable of the ParameterDict keys."""
return self._keys.keys()
def items(self) -> Iterable[tuple[str, Any]]:
r"""Return an iterable of the ParameterDict key/value pairs."""
return ((k, self[k]) for k in self._keys)
def values(self) -> Iterable[Any]:
r"""Return an iterable of the ParameterDict values."""
return (self[k] for k in self._keys)
def update(self, parameters: Union[Mapping[str, Any], "ParameterDict"]) -> None:
r"""Update the :class:`~torch.nn.ParameterDict` with key-value pairs from ``parameters``, overwriting existing keys.
.. note::
If :attr:`parameters` is an ``OrderedDict``, a :class:`~torch.nn.ParameterDict`, or
an iterable of key-value pairs, the order of new elements in it is preserved.
Args:
parameters (iterable): a mapping (dictionary) from string to
:class:`~torch.nn.Parameter`, or an iterable of
key-value pairs of type (string, :class:`~torch.nn.Parameter`)
"""
if not isinstance(parameters, container_abcs.Iterable):
raise TypeError(
"ParametersDict.update should be called with an "
"iterable of key/value pairs, but got " + type(parameters).__name__
)
if isinstance(parameters, (OrderedDict, ParameterDict)):
for key, parameter in parameters.items():
self[key] = parameter
elif isinstance(parameters, container_abcs.Mapping):
for key, parameter in sorted(parameters.items()):
self[key] = parameter
else:
for j, p in enumerate(parameters):
if not isinstance(p, container_abcs.Iterable):
raise TypeError(
"ParameterDict update sequence element "
"#" + str(j) + " should be Iterable; is" + type(p).__name__
)
if not len(p) == 2:
raise ValueError(
"ParameterDict update sequence element "
"#" + str(j) + " has length " + str(len(p)) + "; 2 is required"
)
# parameters as length-2 list too cumbersome to type, see ModuleDict.update comment
self[p[0]] = p[1] # type: ignore[assignment]
def extra_repr(self) -> str:
child_lines = []
for k, p in self.items():
if isinstance(p, torch.Tensor):
size_str = "x".join(str(size) for size in p.size())
if p.device.type in ["cuda", torch._C._get_privateuse1_backend_name()]:
device_str = f" ({p.device})"
else:
device_str = ""
parastr = "{} containing: [{} of size {}{}]".format(
"Parameter" if isinstance(p, Parameter) else "Tensor",
torch.typename(p),
size_str,
device_str,
)
child_lines.append(" (" + str(k) + "): " + parastr)
else:
child_lines.append(
" (" + str(k) + "): Object of type: " + type(p).__name__
)
tmpstr = "\n".join(child_lines)
return tmpstr
def __call__(self, input):
raise RuntimeError("ParameterDict should not be called.")
def __or__(self, other: "ParameterDict") -> "ParameterDict":
copy = self.copy()
copy.update(other)
return copy
def __ror__(self, other: "ParameterDict") -> "ParameterDict":
copy = other.copy()
copy.update(self)
return copy
def __ior__(self, other: "ParameterDict") -> Self:
self.update(other)
return self
```
|
================================================================================================================
SOURCE CODE FILE: conv.py
LINES: 1
SIZE: 75.77 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\conv.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import math
from typing import Optional, Union
from typing_extensions import deprecated
import torch
from torch import Tensor
from torch._torch_docs import reproducibility_notes
from torch.nn import functional as F, init
from torch.nn.common_types import _size_1_t, _size_2_t, _size_3_t
from torch.nn.parameter import Parameter, UninitializedParameter
from .lazy import LazyModuleMixin
from .module import Module
from .utils import _pair, _reverse_repeat_tuple, _single, _triple
__all__ = [
"Conv1d",
"Conv2d",
"Conv3d",
"ConvTranspose1d",
"ConvTranspose2d",
"ConvTranspose3d",
"LazyConv1d",
"LazyConv2d",
"LazyConv3d",
"LazyConvTranspose1d",
"LazyConvTranspose2d",
"LazyConvTranspose3d",
]
convolution_notes = {
"groups_note": r"""* :attr:`groups` controls the connections between inputs and outputs.
:attr:`in_channels` and :attr:`out_channels` must both be divisible by
:attr:`groups`. For example,
* At groups=1, all inputs are convolved to all outputs.
* At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels
and producing half the output channels, and both subsequently
concatenated.
* At groups= :attr:`in_channels`, each input channel is convolved with
its own set of filters (of size
:math:`\frac{\text{out\_channels}}{\text{in\_channels}}`).""",
"depthwise_separable_note": r"""When `groups == in_channels` and `out_channels == K * in_channels`,
where `K` is a positive integer, this operation is also known as a "depthwise convolution".
In other words, for an input of size :math:`(N, C_{in}, L_{in})`,
a depthwise convolution with a depthwise multiplier `K` can be performed with the arguments
:math:`(C_\text{in}=C_\text{in}, C_\text{out}=C_\text{in} \times \text{K}, ..., \text{groups}=C_\text{in})`.""",
} # noqa: B950
class _ConvNd(Module):
__constants__ = [
"stride",
"padding",
"dilation",
"groups",
"padding_mode",
"output_padding",
"in_channels",
"out_channels",
"kernel_size",
]
__annotations__ = {"bias": Optional[torch.Tensor]}
def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor: # type: ignore[empty-body]
...
in_channels: int
_reversed_padding_repeated_twice: list[int]
out_channels: int
kernel_size: tuple[int, ...]
stride: tuple[int, ...]
padding: Union[str, tuple[int, ...]]
dilation: tuple[int, ...]
transposed: bool
output_padding: tuple[int, ...]
groups: int
padding_mode: str
weight: Tensor
bias: Optional[Tensor]
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: tuple[int, ...],
stride: tuple[int, ...],
padding: Union[str, tuple[int, ...]],
dilation: tuple[int, ...],
transposed: bool,
output_padding: tuple[int, ...],
groups: int,
bias: bool,
padding_mode: str,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
if groups <= 0:
raise ValueError("groups must be a positive integer")
if in_channels % groups != 0:
raise ValueError("in_channels must be divisible by groups")
if out_channels % groups != 0:
raise ValueError("out_channels must be divisible by groups")
valid_padding_strings = {"same", "valid"}
if isinstance(padding, str):
if padding not in valid_padding_strings:
raise ValueError(
f"Invalid padding string {padding!r}, should be one of {valid_padding_strings}"
)
if padding == "same" and any(s != 1 for s in stride):
raise ValueError(
"padding='same' is not supported for strided convolutions"
)
valid_padding_modes = {"zeros", "reflect", "replicate", "circular"}
if padding_mode not in valid_padding_modes:
raise ValueError(
f"padding_mode must be one of {valid_padding_modes}, but got padding_mode='{padding_mode}'"
)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
self.padding_mode = padding_mode
# `_reversed_padding_repeated_twice` is the padding to be passed to
# `F.pad` if needed (e.g., for non-zero padding types that are
# implemented as two ops: padding + conv). `F.pad` accepts paddings in
# reverse order than the dimension.
if isinstance(self.padding, str):
self._reversed_padding_repeated_twice = [0, 0] * len(kernel_size)
if padding == "same":
for d, k, i in zip(
dilation, kernel_size, range(len(kernel_size) - 1, -1, -1)
):
total_padding = d * (k - 1)
left_pad = total_padding // 2
self._reversed_padding_repeated_twice[2 * i] = left_pad
self._reversed_padding_repeated_twice[2 * i + 1] = (
total_padding - left_pad
)
else:
self._reversed_padding_repeated_twice = _reverse_repeat_tuple(
self.padding, 2
)
if transposed:
self.weight = Parameter(
torch.empty(
(in_channels, out_channels // groups, *kernel_size),
**factory_kwargs,
)
)
else:
self.weight = Parameter(
torch.empty(
(out_channels, in_channels // groups, *kernel_size),
**factory_kwargs,
)
)
if bias:
self.bias = Parameter(torch.empty(out_channels, **factory_kwargs))
else:
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self) -> None:
# Setting a=sqrt(5) in kaiming_uniform is the same as initializing with
# uniform(-1/sqrt(k), 1/sqrt(k)), where k = weight.size(1) * prod(*kernel_size)
# For more details see: https://github.com/pytorch/pytorch/issues/15314#issuecomment-477448573
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
if fan_in != 0:
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def extra_repr(self):
s = (
"{in_channels}, {out_channels}, kernel_size={kernel_size}"
", stride={stride}"
)
if self.padding != (0,) * len(self.padding):
s += ", padding={padding}"
if self.dilation != (1,) * len(self.dilation):
s += ", dilation={dilation}"
if self.output_padding != (0,) * len(self.output_padding):
s += ", output_padding={output_padding}"
if self.groups != 1:
s += ", groups={groups}"
if self.bias is None:
s += ", bias=False"
if self.padding_mode != "zeros":
s += ", padding_mode={padding_mode}"
return s.format(**self.__dict__)
def __setstate__(self, state):
super().__setstate__(state)
if not hasattr(self, "padding_mode"):
self.padding_mode = "zeros"
class Conv1d(_ConvNd):
__doc__ = (
r"""Applies a 1D convolution over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size
:math:`(N, C_{\text{in}}, L)` and output :math:`(N, C_{\text{out}}, L_{\text{out}})` can be
precisely described as:
.. math::
\text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
\sum_{k = 0}^{C_{in} - 1} \text{weight}(C_{\text{out}_j}, k)
\star \text{input}(N_i, k)
where :math:`\star` is the valid `cross-correlation`_ operator,
:math:`N` is a batch size, :math:`C` denotes a number of channels,
:math:`L` is a length of signal sequence.
"""
+ r"""
This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
* :attr:`stride` controls the stride for the cross-correlation, a single
number or a one-element tuple.
* :attr:`padding` controls the amount of padding applied to the input. It
can be either a string {{'valid', 'same'}} or a tuple of ints giving the
amount of implicit padding applied on both sides.
"""
"""
* :attr:`dilation` controls the spacing between the kernel points; also
known as the \u00e0 trous algorithm. It is harder to describe, but this `link`_
has a nice visualization of what :attr:`dilation` does.
"""
r"""
{groups_note}
Note:
{depthwise_separable_note}
Note:
{cudnn_reproducibility_note}
Note:
``padding='valid'`` is the same as no padding. ``padding='same'`` pads
the input so the output has the shape as the input. However, this mode
doesn't support any stride values other than 1.
Note:
This module supports complex data types i.e. ``complex32, complex64, complex128``.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int, tuple or str, optional): Padding added to both sides of
the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel
elements. Default: 1
groups (int, optional): Number of blocked connections from input
channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the
output. Default: ``True``
padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'zeros'``
""".format(
**reproducibility_notes, **convolution_notes
)
+ r"""
Shape:
- Input: :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`
- Output: :math:`(N, C_{out}, L_{out})` or :math:`(C_{out}, L_{out})`, where
.. math::
L_{out} = \left\lfloor\frac{L_{in} + 2 \times \text{padding} - \text{dilation}
\times (\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor
Attributes:
weight (Tensor): the learnable weights of the module of shape
:math:`(\text{out\_channels},
\frac{\text{in\_channels}}{\text{groups}}, \text{kernel\_size})`.
The values of these weights are sampled from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{in} * \text{kernel\_size}}`
bias (Tensor): the learnable bias of the module of shape
(out_channels). If :attr:`bias` is ``True``, then the values of these weights are
sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{in} * \text{kernel\_size}}`
Examples::
>>> m = nn.Conv1d(16, 33, 3, stride=2)
>>> input = torch.randn(20, 16, 50)
>>> output = m(input)
.. _cross-correlation:
https://en.wikipedia.org/wiki/Cross-correlation
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
)
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_1_t,
stride: _size_1_t = 1,
padding: Union[str, _size_1_t] = 0,
dilation: _size_1_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = "zeros", # TODO: refine this type
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
# we create new variables below to make mypy happy since kernel_size has
# type Union[int, Tuple[int]] and kernel_size_ has type Tuple[int]
kernel_size_ = _single(kernel_size)
stride_ = _single(stride)
padding_ = padding if isinstance(padding, str) else _single(padding)
dilation_ = _single(dilation)
super().__init__(
in_channels,
out_channels,
kernel_size_,
stride_,
padding_,
dilation_,
False,
_single(0),
groups,
bias,
padding_mode,
**factory_kwargs,
)
def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]):
if self.padding_mode != "zeros":
return F.conv1d(
F.pad(
input, self._reversed_padding_repeated_twice, mode=self.padding_mode
),
weight,
bias,
self.stride,
_single(0),
self.dilation,
self.groups,
)
return F.conv1d(
input, weight, bias, self.stride, self.padding, self.dilation, self.groups
)
def forward(self, input: Tensor) -> Tensor:
return self._conv_forward(input, self.weight, self.bias)
class Conv2d(_ConvNd):
__doc__ = (
r"""Applies a 2D convolution over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size
:math:`(N, C_{\text{in}}, H, W)` and output :math:`(N, C_{\text{out}}, H_{\text{out}}, W_{\text{out}})`
can be precisely described as:
.. math::
\text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
\sum_{k = 0}^{C_{\text{in}} - 1} \text{weight}(C_{\text{out}_j}, k) \star \text{input}(N_i, k)
where :math:`\star` is the valid 2D `cross-correlation`_ operator,
:math:`N` is a batch size, :math:`C` denotes a number of channels,
:math:`H` is a height of input planes in pixels, and :math:`W` is
width in pixels.
"""
+ r"""
This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
* :attr:`stride` controls the stride for the cross-correlation, a single
number or a tuple.
* :attr:`padding` controls the amount of padding applied to the input. It
can be either a string {{'valid', 'same'}} or an int / a tuple of ints giving the
amount of implicit padding applied on both sides.
"""
"""
* :attr:`dilation` controls the spacing between the kernel points; also
known as the \u00e0 trous algorithm. It is harder to describe, but this `link`_
has a nice visualization of what :attr:`dilation` does.
"""
r"""
{groups_note}
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
Note:
{depthwise_separable_note}
Note:
{cudnn_reproducibility_note}
Note:
``padding='valid'`` is the same as no padding. ``padding='same'`` pads
the input so the output has the shape as the input. However, this mode
doesn't support any stride values other than 1.
Note:
This module supports complex data types i.e. ``complex32, complex64, complex128``.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int, tuple or str, optional): Padding added to all four sides of
the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input
channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the
output. Default: ``True``
padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'zeros'``
""".format(
**reproducibility_notes, **convolution_notes
)
+ r"""
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` or :math:`(C_{out}, H_{out}, W_{out})`, where
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] - \text{dilation}[0]
\times (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] - \text{dilation}[1]
\times (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
Attributes:
weight (Tensor): the learnable weights of the module of shape
:math:`(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}},`
:math:`\text{kernel\_size[0]}, \text{kernel\_size[1]})`.
The values of these weights are sampled from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
bias (Tensor): the learnable bias of the module of shape
(out_channels). If :attr:`bias` is ``True``,
then the values of these weights are
sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
Examples:
>>> # With square kernels and equal stride
>>> m = nn.Conv2d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> # non-square kernels and unequal stride and with padding and dilation
>>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
>>> input = torch.randn(20, 16, 50, 100)
>>> output = m(input)
.. _cross-correlation:
https://en.wikipedia.org/wiki/Cross-correlation
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
)
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: Union[str, _size_2_t] = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = "zeros", # TODO: refine this type
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
kernel_size_ = _pair(kernel_size)
stride_ = _pair(stride)
padding_ = padding if isinstance(padding, str) else _pair(padding)
dilation_ = _pair(dilation)
super().__init__(
in_channels,
out_channels,
kernel_size_,
stride_,
padding_,
dilation_,
False,
_pair(0),
groups,
bias,
padding_mode,
**factory_kwargs,
)
def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]):
if self.padding_mode != "zeros":
return F.conv2d(
F.pad(
input, self._reversed_padding_repeated_twice, mode=self.padding_mode
),
weight,
bias,
self.stride,
_pair(0),
self.dilation,
self.groups,
)
return F.conv2d(
input, weight, bias, self.stride, self.padding, self.dilation, self.groups
)
def forward(self, input: Tensor) -> Tensor:
return self._conv_forward(input, self.weight, self.bias)
class Conv3d(_ConvNd):
__doc__ = (
r"""Applies a 3D convolution over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size :math:`(N, C_{in}, D, H, W)`
and output :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` can be precisely described as:
.. math::
out(N_i, C_{out_j}) = bias(C_{out_j}) +
\sum_{k = 0}^{C_{in} - 1} weight(C_{out_j}, k) \star input(N_i, k)
where :math:`\star` is the valid 3D `cross-correlation`_ operator
"""
+ r"""
This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
* :attr:`stride` controls the stride for the cross-correlation.
* :attr:`padding` controls the amount of padding applied to the input. It
can be either a string {{'valid', 'same'}} or a tuple of ints giving the
amount of implicit padding applied on both sides.
"""
"""
* :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
"""
r"""
{groups_note}
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the depth, height and width dimension
- a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
the second `int` for the height dimension and the third `int` for the width dimension
Note:
{depthwise_separable_note}
Note:
{cudnn_reproducibility_note}
Note:
``padding='valid'`` is the same as no padding. ``padding='same'`` pads
the input so the output has the shape as the input. However, this mode
doesn't support any stride values other than 1.
Note:
This module supports complex data types i.e. ``complex32, complex64, complex128``.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int, tuple or str, optional): Padding added to all six sides of
the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
padding_mode (str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
""".format(
**reproducibility_notes, **convolution_notes
)
+ r"""
Shape:
- Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` or :math:`(C_{in}, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` or :math:`(C_{out}, D_{out}, H_{out}, W_{out})`,
where
.. math::
D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0]
\times (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1]
\times (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2]
\times (\text{kernel\_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor
Attributes:
weight (Tensor): the learnable weights of the module of shape
:math:`(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}},`
:math:`\text{kernel\_size[0]}, \text{kernel\_size[1]}, \text{kernel\_size[2]})`.
The values of these weights are sampled from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
bias (Tensor): the learnable bias of the module of shape (out_channels). If :attr:`bias` is ``True``,
then the values of these weights are
sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
Examples::
>>> # With square kernels and equal stride
>>> m = nn.Conv3d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.Conv3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0))
>>> input = torch.randn(20, 16, 10, 50, 100)
>>> output = m(input)
.. _cross-correlation:
https://en.wikipedia.org/wiki/Cross-correlation
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
)
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_3_t,
stride: _size_3_t = 1,
padding: Union[str, _size_3_t] = 0,
dilation: _size_3_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = "zeros",
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
kernel_size_ = _triple(kernel_size)
stride_ = _triple(stride)
padding_ = padding if isinstance(padding, str) else _triple(padding)
dilation_ = _triple(dilation)
super().__init__(
in_channels,
out_channels,
kernel_size_,
stride_,
padding_,
dilation_,
False,
_triple(0),
groups,
bias,
padding_mode,
**factory_kwargs,
)
def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]):
if self.padding_mode != "zeros":
return F.conv3d(
F.pad(
input, self._reversed_padding_repeated_twice, mode=self.padding_mode
),
weight,
bias,
self.stride,
_triple(0),
self.dilation,
self.groups,
)
return F.conv3d(
input, weight, bias, self.stride, self.padding, self.dilation, self.groups
)
def forward(self, input: Tensor) -> Tensor:
return self._conv_forward(input, self.weight, self.bias)
class _ConvTransposeNd(_ConvNd):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
bias,
padding_mode,
device=None,
dtype=None,
) -> None:
if padding_mode != "zeros":
raise ValueError(
f'Only "zeros" padding mode is supported for {self.__class__.__name__}'
)
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
bias,
padding_mode,
**factory_kwargs,
)
# dilation being an optional parameter is for backwards
# compatibility
def _output_padding(
self,
input: Tensor,
output_size: Optional[list[int]],
stride: list[int],
padding: list[int],
kernel_size: list[int],
num_spatial_dims: int,
dilation: Optional[list[int]] = None,
) -> list[int]:
if output_size is None:
ret = _single(self.output_padding) # converting to list if was not already
else:
has_batch_dim = input.dim() == num_spatial_dims + 2
num_non_spatial_dims = 2 if has_batch_dim else 1
if len(output_size) == num_non_spatial_dims + num_spatial_dims:
output_size = output_size[num_non_spatial_dims:]
if len(output_size) != num_spatial_dims:
raise ValueError(
f"ConvTranspose{num_spatial_dims}D: for {input.dim()}D input, output_size must have {num_spatial_dims} "
f"or {num_non_spatial_dims + num_spatial_dims} elements (got {len(output_size)})"
)
min_sizes = torch.jit.annotate(list[int], [])
max_sizes = torch.jit.annotate(list[int], [])
for d in range(num_spatial_dims):
dim_size = (
(input.size(d + num_non_spatial_dims) - 1) * stride[d]
- 2 * padding[d]
+ (dilation[d] if dilation is not None else 1)
* (kernel_size[d] - 1)
+ 1
)
min_sizes.append(dim_size)
max_sizes.append(min_sizes[d] + stride[d] - 1)
for i in range(len(output_size)):
size = output_size[i]
min_size = min_sizes[i]
max_size = max_sizes[i]
if size < min_size or size > max_size:
raise ValueError(
f"requested an output size of {output_size}, but valid sizes range "
f"from {min_sizes} to {max_sizes} (for an input of {input.size()[2:]})"
)
res = torch.jit.annotate(list[int], [])
for d in range(num_spatial_dims):
res.append(output_size[d] - min_sizes[d])
ret = res
return ret
class ConvTranspose1d(_ConvTransposeNd):
__doc__ = (
r"""Applies a 1D transposed convolution operator over an input image
composed of several input planes.
This module can be seen as the gradient of Conv1d with respect to its input.
It is also known as a fractionally-strided convolution or
a deconvolution (although it is not an actual deconvolution operation as it does
not compute a true inverse of convolution). For more information, see the visualizations
`here`_ and the `Deconvolutional Networks`_ paper.
This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
* :attr:`stride` controls the stride for the cross-correlation.
* :attr:`padding` controls the amount of implicit zero padding on both
sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note
below for details.
* :attr:`output_padding` controls the additional size added to one side
of the output shape. See note below for details.
"""
"""
* :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.
"""
r"""
{groups_note}
Note:
The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding``
amount of zero padding to both sizes of the input. This is set so that
when a :class:`~torch.nn.Conv1d` and a :class:`~torch.nn.ConvTranspose1d`
are initialized with same parameters, they are inverses of each other in
regard to the input and output shapes. However, when ``stride > 1``,
:class:`~torch.nn.Conv1d` maps multiple input shapes to the same output
shape. :attr:`output_padding` is provided to resolve this ambiguity by
effectively increasing the calculated output shape on one side. Note
that :attr:`output_padding` is only used to find output shape, but does
not actually add zero-padding to output.
Note:
In some circumstances when using the CUDA backend with CuDNN, this operator
may select a nondeterministic algorithm to increase performance. If this is
undesirable, you can try to make the operation deterministic (potentially at
a performance cost) by setting ``torch.backends.cudnn.deterministic =
True``.
Please see the notes on :doc:`/notes/randomness` for background.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
will be added to both sides of the input. Default: 0
output_padding (int or tuple, optional): Additional size added to one side
of the output shape. Default: 0
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
""".format(
**reproducibility_notes, **convolution_notes
)
+ r"""
Shape:
- Input: :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`
- Output: :math:`(N, C_{out}, L_{out})` or :math:`(C_{out}, L_{out})`, where
.. math::
L_{out} = (L_{in} - 1) \times \text{stride} - 2 \times \text{padding} + \text{dilation}
\times (\text{kernel\_size} - 1) + \text{output\_padding} + 1
Attributes:
weight (Tensor): the learnable weights of the module of shape
:math:`(\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},`
:math:`\text{kernel\_size})`.
The values of these weights are sampled from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{out} * \text{kernel\_size}}`
bias (Tensor): the learnable bias of the module of shape (out_channels).
If :attr:`bias` is ``True``, then the values of these weights are
sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{out} * \text{kernel\_size}}`
.. _`here`:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
.. _`Deconvolutional Networks`:
https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf
"""
)
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_1_t,
stride: _size_1_t = 1,
padding: _size_1_t = 0,
output_padding: _size_1_t = 0,
groups: int = 1,
bias: bool = True,
dilation: _size_1_t = 1,
padding_mode: str = "zeros",
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
output_padding = _single(output_padding)
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
True,
output_padding,
groups,
bias,
padding_mode,
**factory_kwargs,
)
def forward(self, input: Tensor, output_size: Optional[list[int]] = None) -> Tensor:
if self.padding_mode != "zeros":
raise ValueError(
"Only `zeros` padding mode is supported for ConvTranspose1d"
)
assert isinstance(self.padding, tuple)
# One cannot replace List by Tuple or Sequence in "_output_padding" because
# TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
num_spatial_dims = 1
output_padding = self._output_padding(
input,
output_size,
self.stride, # type: ignore[arg-type]
self.padding, # type: ignore[arg-type]
self.kernel_size, # type: ignore[arg-type]
num_spatial_dims,
self.dilation, # type: ignore[arg-type]
)
return F.conv_transpose1d(
input,
self.weight,
self.bias,
self.stride,
self.padding,
output_padding,
self.groups,
self.dilation,
)
class ConvTranspose2d(_ConvTransposeNd):
__doc__ = (
r"""Applies a 2D transposed convolution operator over an input image
composed of several input planes.
This module can be seen as the gradient of Conv2d with respect to its input.
It is also known as a fractionally-strided convolution or
a deconvolution (although it is not an actual deconvolution operation as it does
not compute a true inverse of convolution). For more information, see the visualizations
`here`_ and the `Deconvolutional Networks`_ paper.
This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
* :attr:`stride` controls the stride for the cross-correlation.
* :attr:`padding` controls the amount of implicit zero padding on both
sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note
below for details.
* :attr:`output_padding` controls the additional size added to one side
of the output shape. See note below for details.
"""
"""
* :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.
"""
r"""
{groups_note}
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding`
can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimensions
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
Note:
The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding``
amount of zero padding to both sizes of the input. This is set so that
when a :class:`~torch.nn.Conv2d` and a :class:`~torch.nn.ConvTranspose2d`
are initialized with same parameters, they are inverses of each other in
regard to the input and output shapes. However, when ``stride > 1``,
:class:`~torch.nn.Conv2d` maps multiple input shapes to the same output
shape. :attr:`output_padding` is provided to resolve this ambiguity by
effectively increasing the calculated output shape on one side. Note
that :attr:`output_padding` is only used to find output shape, but does
not actually add zero-padding to output.
Note:
{cudnn_reproducibility_note}
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
will be added to both sides of each dimension in the input. Default: 0
output_padding (int or tuple, optional): Additional size added to one side
of each dimension in the output shape. Default: 0
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
""".format(
**reproducibility_notes, **convolution_notes
)
+ r"""
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` or :math:`(C_{out}, H_{out}, W_{out})`, where
.. math::
H_{out} = (H_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{dilation}[0]
\times (\text{kernel\_size}[0] - 1) + \text{output\_padding}[0] + 1
.. math::
W_{out} = (W_{in} - 1) \times \text{stride}[1] - 2 \times \text{padding}[1] + \text{dilation}[1]
\times (\text{kernel\_size}[1] - 1) + \text{output\_padding}[1] + 1
Attributes:
weight (Tensor): the learnable weights of the module of shape
:math:`(\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},`
:math:`\text{kernel\_size[0]}, \text{kernel\_size[1]})`.
The values of these weights are sampled from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
bias (Tensor): the learnable bias of the module of shape (out_channels)
If :attr:`bias` is ``True``, then the values of these weights are
sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
Examples::
>>> # With square kernels and equal stride
>>> m = nn.ConvTranspose2d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> input = torch.randn(20, 16, 50, 100)
>>> output = m(input)
>>> # exact output size can be also specified as an argument
>>> input = torch.randn(1, 16, 12, 12)
>>> downsample = nn.Conv2d(16, 16, 3, stride=2, padding=1)
>>> upsample = nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
>>> h = downsample(input)
>>> h.size()
torch.Size([1, 16, 6, 6])
>>> output = upsample(h, output_size=input.size())
>>> output.size()
torch.Size([1, 16, 12, 12])
.. _`here`:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
.. _`Deconvolutional Networks`:
https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf
"""
)
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
output_padding: _size_2_t = 0,
groups: int = 1,
bias: bool = True,
dilation: _size_2_t = 1,
padding_mode: str = "zeros",
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
output_padding = _pair(output_padding)
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
True,
output_padding,
groups,
bias,
padding_mode,
**factory_kwargs,
)
def forward(self, input: Tensor, output_size: Optional[list[int]] = None) -> Tensor:
if self.padding_mode != "zeros":
raise ValueError(
"Only `zeros` padding mode is supported for ConvTranspose2d"
)
assert isinstance(self.padding, tuple)
# One cannot replace List by Tuple or Sequence in "_output_padding" because
# TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
num_spatial_dims = 2
output_padding = self._output_padding(
input,
output_size,
self.stride, # type: ignore[arg-type]
self.padding, # type: ignore[arg-type]
self.kernel_size, # type: ignore[arg-type]
num_spatial_dims,
self.dilation, # type: ignore[arg-type]
)
return F.conv_transpose2d(
input,
self.weight,
self.bias,
self.stride,
self.padding,
output_padding,
self.groups,
self.dilation,
)
class ConvTranspose3d(_ConvTransposeNd):
__doc__ = (
r"""Applies a 3D transposed convolution operator over an input image composed of several input
planes.
The transposed convolution operator multiplies each input value element-wise by a learnable kernel,
and sums over the outputs from all input feature planes.
This module can be seen as the gradient of Conv3d with respect to its input.
It is also known as a fractionally-strided convolution or
a deconvolution (although it is not an actual deconvolution operation as it does
not compute a true inverse of convolution). For more information, see the visualizations
`here`_ and the `Deconvolutional Networks`_ paper.
This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
* :attr:`stride` controls the stride for the cross-correlation.
* :attr:`padding` controls the amount of implicit zero padding on both
sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note
below for details.
* :attr:`output_padding` controls the additional size added to one side
of the output shape. See note below for details.
"""
"""
* :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.
"""
r"""
{groups_note}
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding`
can either be:
- a single ``int`` -- in which case the same value is used for the depth, height and width dimensions
- a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
the second `int` for the height dimension and the third `int` for the width dimension
Note:
The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding``
amount of zero padding to both sizes of the input. This is set so that
when a :class:`~torch.nn.Conv3d` and a :class:`~torch.nn.ConvTranspose3d`
are initialized with same parameters, they are inverses of each other in
regard to the input and output shapes. However, when ``stride > 1``,
:class:`~torch.nn.Conv3d` maps multiple input shapes to the same output
shape. :attr:`output_padding` is provided to resolve this ambiguity by
effectively increasing the calculated output shape on one side. Note
that :attr:`output_padding` is only used to find output shape, but does
not actually add zero-padding to output.
Note:
{cudnn_reproducibility_note}
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
will be added to both sides of each dimension in the input. Default: 0
output_padding (int or tuple, optional): Additional size added to one side
of each dimension in the output shape. Default: 0
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
""".format(
**reproducibility_notes, **convolution_notes
)
+ r"""
Shape:
- Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` or :math:`(C_{in}, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` or
:math:`(C_{out}, D_{out}, H_{out}, W_{out})`, where
.. math::
D_{out} = (D_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{dilation}[0]
\times (\text{kernel\_size}[0] - 1) + \text{output\_padding}[0] + 1
.. math::
H_{out} = (H_{in} - 1) \times \text{stride}[1] - 2 \times \text{padding}[1] + \text{dilation}[1]
\times (\text{kernel\_size}[1] - 1) + \text{output\_padding}[1] + 1
.. math::
W_{out} = (W_{in} - 1) \times \text{stride}[2] - 2 \times \text{padding}[2] + \text{dilation}[2]
\times (\text{kernel\_size}[2] - 1) + \text{output\_padding}[2] + 1
Attributes:
weight (Tensor): the learnable weights of the module of shape
:math:`(\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},`
:math:`\text{kernel\_size[0]}, \text{kernel\_size[1]}, \text{kernel\_size[2]})`.
The values of these weights are sampled from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
bias (Tensor): the learnable bias of the module of shape (out_channels)
If :attr:`bias` is ``True``, then the values of these weights are
sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
Examples::
>>> # With square kernels and equal stride
>>> m = nn.ConvTranspose3d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.ConvTranspose3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(0, 4, 2))
>>> input = torch.randn(20, 16, 10, 50, 100)
>>> output = m(input)
.. _`here`:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
.. _`Deconvolutional Networks`:
https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf
"""
)
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_3_t,
stride: _size_3_t = 1,
padding: _size_3_t = 0,
output_padding: _size_3_t = 0,
groups: int = 1,
bias: bool = True,
dilation: _size_3_t = 1,
padding_mode: str = "zeros",
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
output_padding = _triple(output_padding)
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
True,
output_padding,
groups,
bias,
padding_mode,
**factory_kwargs,
)
def forward(self, input: Tensor, output_size: Optional[list[int]] = None) -> Tensor:
if self.padding_mode != "zeros":
raise ValueError(
"Only `zeros` padding mode is supported for ConvTranspose3d"
)
assert isinstance(self.padding, tuple)
# One cannot replace List by Tuple or Sequence in "_output_padding" because
# TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
num_spatial_dims = 3
output_padding = self._output_padding(
input,
output_size,
self.stride, # type: ignore[arg-type]
self.padding, # type: ignore[arg-type]
self.kernel_size, # type: ignore[arg-type]
num_spatial_dims,
self.dilation, # type: ignore[arg-type]
)
return F.conv_transpose3d(
input,
self.weight,
self.bias,
self.stride,
self.padding,
output_padding,
self.groups,
self.dilation,
)
# TODO: Deprecate and remove the following alias `_ConvTransposeMixin`.
#
# `_ConvTransposeMixin` was a mixin that was removed. It is meant to be used
# with `_ConvNd` to construct actual module classes that implements conv
# transpose ops:
#
# class MyConvTranspose(_ConvNd, _ConvTransposeMixin):
# ...
#
# In PyTorch, it has been replaced by `_ConvTransposeNd`, which is a proper
# subclass of `_ConvNd`. However, some user code in the wild still (incorrectly)
# use the internal class `_ConvTransposeMixin`. Hence, we provide this alias
# for BC, because it is cheap and easy for us to do so, even though that
# `_ConvTransposeNd` is really not a mixin anymore (but multiple inheritance as
# above would still work).
class _ConvTransposeMixin(_ConvTransposeNd):
@deprecated(
"`_ConvTransposeMixin` is a deprecated internal class. "
"Please consider using public APIs.",
category=FutureWarning,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# TODO: Conv2dLocal
# TODO: Conv2dMap
# TODO: ConvTranspose2dMap
class _LazyConvXdMixin(LazyModuleMixin):
groups: int
transposed: bool
in_channels: int
out_channels: int
kernel_size: tuple[int, ...]
weight: UninitializedParameter
bias: UninitializedParameter
def reset_parameters(self) -> None:
# has_uninitialized_params is defined in parent class and it is using a protocol on self
if not self.has_uninitialized_params() and self.in_channels != 0: # type: ignore[misc]
# "type:ignore[..]" is required because mypy thinks that "reset_parameters" is undefined
# in super class. Turns out that it is defined in _ConvND which is inherited by any class
# that also inherits _LazyConvXdMixin
super().reset_parameters() # type: ignore[misc]
# Signature of "initialize_parameters" is incompatible with the definition in supertype LazyModuleMixin
def initialize_parameters(self, input: Tensor, *args, **kwargs) -> None: # type: ignore[override]
# defined by parent class but using a protocol
if self.has_uninitialized_params(): # type: ignore[misc]
self.in_channels = self._get_in_channels(input)
if self.in_channels % self.groups != 0:
raise ValueError("in_channels must be divisible by groups")
assert isinstance(self.weight, UninitializedParameter)
if self.transposed:
self.weight.materialize(
(
self.in_channels,
self.out_channels // self.groups,
*self.kernel_size,
)
)
else:
self.weight.materialize(
(
self.out_channels,
self.in_channels // self.groups,
*self.kernel_size,
)
)
if self.bias is not None:
assert isinstance(self.bias, UninitializedParameter)
self.bias.materialize((self.out_channels,))
self.reset_parameters()
# Function to extract in_channels from first input.
def _get_in_channels(self, input: Tensor) -> int:
num_spatial_dims = self._get_num_spatial_dims()
num_dims_no_batch = num_spatial_dims + 1 # +1 for channels dim
num_dims_batch = num_dims_no_batch + 1
if input.dim() not in (num_dims_no_batch, num_dims_batch):
raise RuntimeError(
f"Expected {num_dims_no_batch}D (unbatched) or {num_dims_batch}D (batched) input "
f"to {self.__class__.__name__}, but "
f"got input of size: {input.shape}"
)
return input.shape[1] if input.dim() == num_dims_batch else input.shape[0]
# Function to return the number of spatial dims expected for inputs to the module.
# This is expected to be implemented by subclasses.
def _get_num_spatial_dims(self) -> int:
raise NotImplementedError
# LazyConv1d defines weight as a Tensor but derived class defines it as UnitializeParameter
class LazyConv1d(_LazyConvXdMixin, Conv1d): # type: ignore[misc]
r"""A :class:`torch.nn.Conv1d` module with lazy initialization of the ``in_channels`` argument.
The ``in_channels`` argument of the :class:`Conv1d` is inferred from the ``input.size(1)``.
The attributes that will be lazily initialized are `weight` and `bias`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of
the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel
elements. Default: 1
groups (int, optional): Number of blocked connections from input
channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the
output. Default: ``True``
padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'zeros'``
.. seealso:: :class:`torch.nn.Conv1d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
"""
# super class define this variable as None. "type: ignore[..] is required
# since we are redefining the variable.
cls_to_become = Conv1d # type: ignore[assignment]
def __init__(
self,
out_channels: int,
kernel_size: _size_1_t,
stride: _size_1_t = 1,
padding: _size_1_t = 0,
dilation: _size_1_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = "zeros",
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(
0,
0,
kernel_size,
stride,
padding,
dilation,
groups,
# bias is hardcoded to False to avoid creating tensor
# that will soon be overwritten.
False,
padding_mode,
**factory_kwargs,
)
self.weight = UninitializedParameter(**factory_kwargs)
self.out_channels = out_channels
if bias:
self.bias = UninitializedParameter(**factory_kwargs)
def _get_num_spatial_dims(self) -> int:
return 1
# LazyConv2d defines weight as a Tensor but derived class defines it as UnitializeParameter
class LazyConv2d(_LazyConvXdMixin, Conv2d): # type: ignore[misc]
r"""A :class:`torch.nn.Conv2d` module with lazy initialization of the ``in_channels`` argument.
The ``in_channels`` argument of the :class:`Conv2d` that is inferred from the ``input.size(1)``.
The attributes that will be lazily initialized are `weight` and `bias`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of
the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel
elements. Default: 1
groups (int, optional): Number of blocked connections from input
channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the
output. Default: ``True``
padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'zeros'``
.. seealso:: :class:`torch.nn.Conv2d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
"""
# super class define this variable as None. "type: ignore[..] is required
# since we are redefining the variable.
cls_to_become = Conv2d # type: ignore[assignment]
def __init__(
self,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = "zeros", # TODO: refine this type
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(
0,
0,
kernel_size,
stride,
padding,
dilation,
groups,
# bias is hardcoded to False to avoid creating tensor
# that will soon be overwritten.
False,
padding_mode,
**factory_kwargs,
)
self.weight = UninitializedParameter(**factory_kwargs)
self.out_channels = out_channels
if bias:
self.bias = UninitializedParameter(**factory_kwargs)
def _get_num_spatial_dims(self) -> int:
return 2
# LazyConv3d defines weight as a Tensor but derived class defines it as UnitializeParameter
class LazyConv3d(_LazyConvXdMixin, Conv3d): # type: ignore[misc]
r"""A :class:`torch.nn.Conv3d` module with lazy initialization of the ``in_channels`` argument.
The ``in_channels`` argument of the :class:`Conv3d` that is inferred from
the ``input.size(1)``.
The attributes that will be lazily initialized are `weight` and `bias`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of
the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel
elements. Default: 1
groups (int, optional): Number of blocked connections from input
channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the
output. Default: ``True``
padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'zeros'``
.. seealso:: :class:`torch.nn.Conv3d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
"""
# super class define this variable as None. "type: ignore[..] is required
# since we are redefining the variable.
cls_to_become = Conv3d # type: ignore[assignment]
def __init__(
self,
out_channels: int,
kernel_size: _size_3_t,
stride: _size_3_t = 1,
padding: _size_3_t = 0,
dilation: _size_3_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = "zeros",
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(
0,
0,
kernel_size,
stride,
padding,
dilation,
groups,
# bias is hardcoded to False to avoid creating tensor
# that will soon be overwritten.
False,
padding_mode,
**factory_kwargs,
)
self.weight = UninitializedParameter(**factory_kwargs)
self.out_channels = out_channels
if bias:
self.bias = UninitializedParameter(**factory_kwargs)
def _get_num_spatial_dims(self) -> int:
return 3
# LazyConvTranspose1d defines weight as a Tensor but derived class defines it as UnitializeParameter
class LazyConvTranspose1d(_LazyConvXdMixin, ConvTranspose1d): # type: ignore[misc]
r"""A :class:`torch.nn.ConvTranspose1d` module with lazy initialization of the ``in_channels`` argument.
The ``in_channels`` argument of the :class:`ConvTranspose1d` that is inferred from
the ``input.size(1)``.
The attributes that will be lazily initialized are `weight` and `bias`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
will be added to both sides of the input. Default: 0
output_padding (int or tuple, optional): Additional size added to one side
of the output shape. Default: 0
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
.. seealso:: :class:`torch.nn.ConvTranspose1d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
"""
# super class define this variable as None. "type: ignore[..] is required
# since we are redefining the variable.
cls_to_become = ConvTranspose1d # type: ignore[assignment]
def __init__(
self,
out_channels: int,
kernel_size: _size_1_t,
stride: _size_1_t = 1,
padding: _size_1_t = 0,
output_padding: _size_1_t = 0,
groups: int = 1,
bias: bool = True,
dilation: _size_1_t = 1,
padding_mode: str = "zeros",
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(
0,
0,
kernel_size,
stride,
padding,
output_padding,
groups,
# bias is hardcoded to False to avoid creating tensor
# that will soon be overwritten.
False,
dilation,
padding_mode,
**factory_kwargs,
)
self.weight = UninitializedParameter(**factory_kwargs)
self.out_channels = out_channels
if bias:
self.bias = UninitializedParameter(**factory_kwargs)
def _get_num_spatial_dims(self) -> int:
return 1
# LazyConvTranspose2d defines weight as a Tensor but derived class defines it as UnitializeParameter
class LazyConvTranspose2d(_LazyConvXdMixin, ConvTranspose2d): # type: ignore[misc]
r"""A :class:`torch.nn.ConvTranspose2d` module with lazy initialization of the ``in_channels`` argument.
The ``in_channels`` argument of the :class:`ConvTranspose2d` is inferred from
the ``input.size(1)``.
The attributes that will be lazily initialized are `weight` and `bias`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
will be added to both sides of each dimension in the input. Default: 0
output_padding (int or tuple, optional): Additional size added to one side
of each dimension in the output shape. Default: 0
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
.. seealso:: :class:`torch.nn.ConvTranspose2d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
"""
# super class define this variable as None. "type: ignore[..] is required
# since we are redefining the variable.
cls_to_become = ConvTranspose2d # type: ignore[assignment]
def __init__(
self,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
output_padding: _size_2_t = 0,
groups: int = 1,
bias: bool = True,
dilation: int = 1,
padding_mode: str = "zeros",
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(
0,
0,
kernel_size,
stride,
padding,
output_padding,
groups,
# bias is hardcoded to False to avoid creating tensor
# that will soon be overwritten.
False,
dilation,
padding_mode,
**factory_kwargs,
)
self.weight = UninitializedParameter(**factory_kwargs)
self.out_channels = out_channels
if bias:
self.bias = UninitializedParameter(**factory_kwargs)
def _get_num_spatial_dims(self) -> int:
return 2
# LazyConvTranspose3d defines weight as a Tensor but derived class defines it as UnitializeParameter
class LazyConvTranspose3d(_LazyConvXdMixin, ConvTranspose3d): # type: ignore[misc]
r"""A :class:`torch.nn.ConvTranspose3d` module with lazy initialization of the ``in_channels`` argument.
The ``in_channels`` argument of the :class:`ConvTranspose3d` is inferred from
the ``input.size(1)``.
The attributes that will be lazily initialized are `weight` and `bias`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
will be added to both sides of each dimension in the input. Default: 0
output_padding (int or tuple, optional): Additional size added to one side
of each dimension in the output shape. Default: 0
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
.. seealso:: :class:`torch.nn.ConvTranspose3d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
"""
# super class define this variable as None. "type: ignore[..] is required
# since we are redefining the variable.
cls_to_become = ConvTranspose3d # type: ignore[assignment]
def __init__(
self,
out_channels: int,
kernel_size: _size_3_t,
stride: _size_3_t = 1,
padding: _size_3_t = 0,
output_padding: _size_3_t = 0,
groups: int = 1,
bias: bool = True,
dilation: _size_3_t = 1,
padding_mode: str = "zeros",
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(
0,
0,
kernel_size,
stride,
padding,
output_padding,
groups,
# bias is hardcoded to False to avoid creating tensor
# that will soon be overwritten.
False,
dilation,
padding_mode,
**factory_kwargs,
)
self.weight = UninitializedParameter(**factory_kwargs)
self.out_channels = out_channels
if bias:
self.bias = UninitializedParameter(**factory_kwargs)
def _get_num_spatial_dims(self) -> int:
return 3
```
|
====================================================================================================================
SOURCE CODE FILE: distance.py
LINES: 1
SIZE: 3.28 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\distance.py
ENCODING: utf-8
```py
import torch.nn.functional as F
from torch import Tensor
from .module import Module
__all__ = ["PairwiseDistance", "CosineSimilarity"]
class PairwiseDistance(Module):
r"""
Computes the pairwise distance between input vectors, or between columns of input matrices.
Distances are computed using ``p``-norm, with constant ``eps`` added to avoid division by zero
if ``p`` is negative, i.e.:
.. math ::
\mathrm{dist}\left(x, y\right) = \left\Vert x-y + \epsilon e \right\Vert_p,
where :math:`e` is the vector of ones and the ``p``-norm is given by.
.. math ::
\Vert x \Vert _p = \left( \sum_{i=1}^n \vert x_i \vert ^ p \right) ^ {1/p}.
Args:
p (real, optional): the norm degree. Can be negative. Default: 2
eps (float, optional): Small value to avoid division by zero.
Default: 1e-6
keepdim (bool, optional): Determines whether or not to keep the vector dimension.
Default: False
Shape:
- Input1: :math:`(N, D)` or :math:`(D)` where `N = batch dimension` and `D = vector dimension`
- Input2: :math:`(N, D)` or :math:`(D)`, same shape as the Input1
- Output: :math:`(N)` or :math:`()` based on input dimension.
If :attr:`keepdim` is ``True``, then :math:`(N, 1)` or :math:`(1)` based on input dimension.
Examples::
>>> pdist = nn.PairwiseDistance(p=2)
>>> input1 = torch.randn(100, 128)
>>> input2 = torch.randn(100, 128)
>>> output = pdist(input1, input2)
"""
__constants__ = ["norm", "eps", "keepdim"]
norm: float
eps: float
keepdim: bool
def __init__(
self, p: float = 2.0, eps: float = 1e-6, keepdim: bool = False
) -> None:
super().__init__()
self.norm = p
self.eps = eps
self.keepdim = keepdim
def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
return F.pairwise_distance(x1, x2, self.norm, self.eps, self.keepdim)
class CosineSimilarity(Module):
r"""Returns cosine similarity between :math:`x_1` and :math:`x_2`, computed along `dim`.
.. math ::
\text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)}.
Args:
dim (int, optional): Dimension where cosine similarity is computed. Default: 1
eps (float, optional): Small value to avoid division by zero.
Default: 1e-8
Shape:
- Input1: :math:`(\ast_1, D, \ast_2)` where D is at position `dim`
- Input2: :math:`(\ast_1, D, \ast_2)`, same number of dimensions as x1, matching x1 size at dimension `dim`,
and broadcastable with x1 at other dimensions.
- Output: :math:`(\ast_1, \ast_2)`
Examples::
>>> input1 = torch.randn(100, 128)
>>> input2 = torch.randn(100, 128)
>>> cos = nn.CosineSimilarity(dim=1, eps=1e-6)
>>> output = cos(input1, input2)
"""
__constants__ = ["dim", "eps"]
dim: int
eps: float
def __init__(self, dim: int = 1, eps: float = 1e-8) -> None:
super().__init__()
self.dim = dim
self.eps = eps
def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
return F.cosine_similarity(x1, x2, self.dim, self.eps)
```
|
===================================================================================================================
SOURCE CODE FILE: dropout.py
LINES: 1
SIZE: 11.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\dropout.py
ENCODING: utf-8
```py
import torch.nn.functional as F
from torch import Tensor
from .module import Module
__all__ = [
"Dropout",
"Dropout1d",
"Dropout2d",
"Dropout3d",
"AlphaDropout",
"FeatureAlphaDropout",
]
class _DropoutNd(Module):
__constants__ = ["p", "inplace"]
p: float
inplace: bool
def __init__(self, p: float = 0.5, inplace: bool = False) -> None:
super().__init__()
if p < 0 or p > 1:
raise ValueError(
f"dropout probability has to be between 0 and 1, but got {p}"
)
self.p = p
self.inplace = inplace
def extra_repr(self) -> str:
return f"p={self.p}, inplace={self.inplace}"
class Dropout(_DropoutNd):
r"""During training, randomly zeroes some of the elements of the input tensor with probability :attr:`p`.
The zeroed elements are chosen independently for each forward call and are sampled from a Bernoulli distribution.
Each channel will be zeroed out independently on every forward call.
This has proven to be an effective technique for regularization and
preventing the co-adaptation of neurons as described in the paper
`Improving neural networks by preventing co-adaptation of feature
detectors`_ .
Furthermore, the outputs are scaled by a factor of :math:`\frac{1}{1-p}` during
training. This means that during evaluation the module simply computes an
identity function.
Args:
p: probability of an element to be zeroed. Default: 0.5
inplace: If set to ``True``, will do this operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`. Input can be of any shape
- Output: :math:`(*)`. Output is of the same shape as input
Examples::
>>> m = nn.Dropout(p=0.2)
>>> input = torch.randn(20, 16)
>>> output = m(input)
.. _Improving neural networks by preventing co-adaptation of feature
detectors: https://arxiv.org/abs/1207.0580
"""
def forward(self, input: Tensor) -> Tensor:
return F.dropout(input, self.p, self.training, self.inplace)
class Dropout1d(_DropoutNd):
r"""Randomly zero out entire channels.
A channel is a 1D feature map,
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
batched input is a 1D tensor :math:`\text{input}[i, j]`.
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
Usually the input comes from :class:`nn.Conv1d` modules.
As described in the paper
`Efficient Object Localization Using Convolutional Networks`_ ,
if adjacent pixels within feature maps are strongly correlated
(as is normally the case in early convolution layers) then i.i.d. dropout
will not regularize the activations and will otherwise just result
in an effective learning rate decrease.
In this case, :func:`nn.Dropout1d` will help promote independence between
feature maps and should be used instead.
Args:
p (float, optional): probability of an element to be zero-ed.
inplace (bool, optional): If set to ``True``, will do this operation
in-place
Shape:
- Input: :math:`(N, C, L)` or :math:`(C, L)`.
- Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input).
Examples::
>>> m = nn.Dropout1d(p=0.2)
>>> input = torch.randn(20, 16, 32)
>>> output = m(input)
.. _Efficient Object Localization Using Convolutional Networks:
https://arxiv.org/abs/1411.4280
"""
def forward(self, input: Tensor) -> Tensor:
return F.dropout1d(input, self.p, self.training, self.inplace)
class Dropout2d(_DropoutNd):
r"""Randomly zero out entire channels.
A channel is a 2D feature map,
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
batched input is a 2D tensor :math:`\text{input}[i, j]`.
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
Usually the input comes from :class:`nn.Conv2d` modules.
As described in the paper
`Efficient Object Localization Using Convolutional Networks`_ ,
if adjacent pixels within feature maps are strongly correlated
(as is normally the case in early convolution layers) then i.i.d. dropout
will not regularize the activations and will otherwise just result
in an effective learning rate decrease.
In this case, :func:`nn.Dropout2d` will help promote independence between
feature maps and should be used instead.
Args:
p (float, optional): probability of an element to be zero-ed.
inplace (bool, optional): If set to ``True``, will do this operation
in-place
.. warning ::
Due to historical reasons, this class will perform 1D channel-wise dropout
for 3D inputs (as done by :class:`nn.Dropout1d`). Thus, it currently does NOT
support inputs without a batch dimension of shape :math:`(C, H, W)`. This
behavior will change in a future release to interpret 3D inputs as no-batch-dim
inputs. To maintain the old behavior, switch to :class:`nn.Dropout1d`.
Shape:
- Input: :math:`(N, C, H, W)` or :math:`(N, C, L)`.
- Output: :math:`(N, C, H, W)` or :math:`(N, C, L)` (same shape as input).
Examples::
>>> m = nn.Dropout2d(p=0.2)
>>> input = torch.randn(20, 16, 32, 32)
>>> output = m(input)
.. _Efficient Object Localization Using Convolutional Networks:
https://arxiv.org/abs/1411.4280
"""
def forward(self, input: Tensor) -> Tensor:
return F.dropout2d(input, self.p, self.training, self.inplace)
class Dropout3d(_DropoutNd):
r"""Randomly zero out entire channels.
A channel is a 3D feature map,
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
batched input is a 3D tensor :math:`\text{input}[i, j]`.
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
Usually the input comes from :class:`nn.Conv3d` modules.
As described in the paper
`Efficient Object Localization Using Convolutional Networks`_ ,
if adjacent pixels within feature maps are strongly correlated
(as is normally the case in early convolution layers) then i.i.d. dropout
will not regularize the activations and will otherwise just result
in an effective learning rate decrease.
In this case, :func:`nn.Dropout3d` will help promote independence between
feature maps and should be used instead.
Args:
p (float, optional): probability of an element to be zeroed.
inplace (bool, optional): If set to ``True``, will do this operation
in-place
Shape:
- Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`.
- Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input).
Examples::
>>> m = nn.Dropout3d(p=0.2)
>>> input = torch.randn(20, 16, 4, 32, 32)
>>> output = m(input)
.. _Efficient Object Localization Using Convolutional Networks:
https://arxiv.org/abs/1411.4280
"""
def forward(self, input: Tensor) -> Tensor:
return F.dropout3d(input, self.p, self.training, self.inplace)
class AlphaDropout(_DropoutNd):
r"""Applies Alpha Dropout over the input.
Alpha Dropout is a type of Dropout that maintains the self-normalizing
property.
For an input with zero mean and unit standard deviation, the output of
Alpha Dropout maintains the original mean and standard deviation of the
input.
Alpha Dropout goes hand-in-hand with SELU activation function, which ensures
that the outputs have zero mean and unit standard deviation.
During training, it randomly masks some of the elements of the input
tensor with probability *p* using samples from a bernoulli distribution.
The elements to masked are randomized on every forward call, and scaled
and shifted to maintain zero mean and unit standard deviation.
During evaluation the module simply computes an identity function.
More details can be found in the paper `Self-Normalizing Neural Networks`_ .
Args:
p (float): probability of an element to be dropped. Default: 0.5
inplace (bool, optional): If set to ``True``, will do this operation
in-place
Shape:
- Input: :math:`(*)`. Input can be of any shape
- Output: :math:`(*)`. Output is of the same shape as input
Examples::
>>> m = nn.AlphaDropout(p=0.2)
>>> input = torch.randn(20, 16)
>>> output = m(input)
.. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515
"""
def forward(self, input: Tensor) -> Tensor:
return F.alpha_dropout(input, self.p, self.training)
class FeatureAlphaDropout(_DropoutNd):
r"""Randomly masks out entire channels.
A channel is a feature map,
e.g. the :math:`j`-th channel of the :math:`i`-th sample in the batch input
is a tensor :math:`\text{input}[i, j]` of the input tensor). Instead of
setting activations to zero, as in regular Dropout, the activations are set
to the negative saturation value of the SELU activation function. More details
can be found in the paper `Self-Normalizing Neural Networks`_ .
Each element will be masked independently for each sample on every forward
call with probability :attr:`p` using samples from a Bernoulli distribution.
The elements to be masked are randomized on every forward call, and scaled
and shifted to maintain zero mean and unit variance.
Usually the input comes from :class:`nn.AlphaDropout` modules.
As described in the paper
`Efficient Object Localization Using Convolutional Networks`_ ,
if adjacent pixels within feature maps are strongly correlated
(as is normally the case in early convolution layers) then i.i.d. dropout
will not regularize the activations and will otherwise just result
in an effective learning rate decrease.
In this case, :func:`nn.AlphaDropout` will help promote independence between
feature maps and should be used instead.
Args:
p (float, optional): probability of an element to be zeroed. Default: 0.5
inplace (bool, optional): If set to ``True``, will do this operation
in-place
Shape:
- Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`.
- Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input).
Examples::
>>> m = nn.FeatureAlphaDropout(p=0.2)
>>> input = torch.randn(20, 16, 4, 32, 32)
>>> output = m(input)
.. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515
.. _Efficient Object Localization Using Convolutional Networks:
https://arxiv.org/abs/1411.4280
"""
def forward(self, input: Tensor) -> Tensor:
return F.feature_alpha_dropout(input, self.p, self.training)
```
|
===================================================================================================================
SOURCE CODE FILE: flatten.py
LINES: 1
SIZE: 5.56 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\flatten.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from typing import Union
from torch import Tensor
from torch.types import _size
from .module import Module
__all__ = ["Flatten", "Unflatten"]
class Flatten(Module):
r"""
Flattens a contiguous range of dims into a tensor.
For use with :class:`~nn.Sequential`, see :meth:`torch.flatten` for details.
Shape:
- Input: :math:`(*, S_{\text{start}},..., S_{i}, ..., S_{\text{end}}, *)`,'
where :math:`S_{i}` is the size at dimension :math:`i` and :math:`*` means any
number of dimensions including none.
- Output: :math:`(*, \prod_{i=\text{start}}^{\text{end}} S_{i}, *)`.
Args:
start_dim: first dim to flatten (default = 1).
end_dim: last dim to flatten (default = -1).
Examples::
>>> input = torch.randn(32, 1, 5, 5)
>>> # With default parameters
>>> m = nn.Flatten()
>>> output = m(input)
>>> output.size()
torch.Size([32, 25])
>>> # With non-default parameters
>>> m = nn.Flatten(0, 2)
>>> output = m(input)
>>> output.size()
torch.Size([160, 5])
"""
__constants__ = ["start_dim", "end_dim"]
start_dim: int
end_dim: int
def __init__(self, start_dim: int = 1, end_dim: int = -1) -> None:
super().__init__()
self.start_dim = start_dim
self.end_dim = end_dim
def forward(self, input: Tensor) -> Tensor:
return input.flatten(self.start_dim, self.end_dim)
def extra_repr(self) -> str:
return f"start_dim={self.start_dim}, end_dim={self.end_dim}"
class Unflatten(Module):
r"""
Unflattens a tensor dim expanding it to a desired shape. For use with :class:`~nn.Sequential`.
* :attr:`dim` specifies the dimension of the input tensor to be unflattened, and it can
be either `int` or `str` when `Tensor` or `NamedTensor` is used, respectively.
* :attr:`unflattened_size` is the new shape of the unflattened dimension of the tensor and it can be
a `tuple` of ints or a `list` of ints or `torch.Size` for `Tensor` input; a `NamedShape`
(tuple of `(name, size)` tuples) for `NamedTensor` input.
Shape:
- Input: :math:`(*, S_{\text{dim}}, *)`, where :math:`S_{\text{dim}}` is the size at
dimension :attr:`dim` and :math:`*` means any number of dimensions including none.
- Output: :math:`(*, U_1, ..., U_n, *)`, where :math:`U` = :attr:`unflattened_size` and
:math:`\prod_{i=1}^n U_i = S_{\text{dim}}`.
Args:
dim (Union[int, str]): Dimension to be unflattened
unflattened_size (Union[torch.Size, Tuple, List, NamedShape]): New shape of the unflattened dimension
Examples:
>>> input = torch.randn(2, 50)
>>> # With tuple of ints
>>> m = nn.Sequential(
>>> nn.Linear(50, 50),
>>> nn.Unflatten(1, (2, 5, 5))
>>> )
>>> output = m(input)
>>> output.size()
torch.Size([2, 2, 5, 5])
>>> # With torch.Size
>>> m = nn.Sequential(
>>> nn.Linear(50, 50),
>>> nn.Unflatten(1, torch.Size([2, 5, 5]))
>>> )
>>> output = m(input)
>>> output.size()
torch.Size([2, 2, 5, 5])
>>> # With namedshape (tuple of tuples)
>>> input = torch.randn(2, 50, names=('N', 'features'))
>>> unflatten = nn.Unflatten('features', (('C', 2), ('H', 5), ('W', 5)))
>>> output = unflatten(input)
>>> output.size()
torch.Size([2, 2, 5, 5])
"""
NamedShape = tuple[tuple[str, int]]
__constants__ = ["dim", "unflattened_size"]
dim: Union[int, str]
unflattened_size: Union[_size, NamedShape]
def __init__(
self, dim: Union[int, str], unflattened_size: Union[_size, NamedShape]
) -> None:
super().__init__()
if isinstance(dim, int):
self._require_tuple_int(unflattened_size)
elif isinstance(dim, str):
self._require_tuple_tuple(unflattened_size)
else:
raise TypeError("invalid argument type for dim parameter")
self.dim = dim
self.unflattened_size = unflattened_size
def _require_tuple_tuple(self, input):
if isinstance(input, tuple):
for idx, elem in enumerate(input):
if not isinstance(elem, tuple):
raise TypeError(
"unflattened_size must be tuple of tuples, "
+ f"but found element of type {type(elem).__name__} at pos {idx}"
)
return
raise TypeError(
"unflattened_size must be a tuple of tuples, "
+ f"but found type {type(input).__name__}"
)
def _require_tuple_int(self, input):
if isinstance(input, (tuple, list)):
for idx, elem in enumerate(input):
if not isinstance(elem, int):
raise TypeError(
"unflattened_size must be tuple of ints, "
+ f"but found element of type {type(elem).__name__} at pos {idx}"
)
return
raise TypeError(
f"unflattened_size must be a tuple of ints, but found type {type(input).__name__}"
)
def forward(self, input: Tensor) -> Tensor:
return input.unflatten(self.dim, self.unflattened_size)
def extra_repr(self) -> str:
return f"dim={self.dim}, unflattened_size={self.unflattened_size}"
```
|
================================================================================================================
SOURCE CODE FILE: fold.py
LINES: 1
SIZE: 12.90 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\nn\modules\fold.py
ENCODING: utf-8
```py
import torch.nn.functional as F
from torch import Tensor
from torch.nn.common_types import _size_any_t
from .module import Module
__all__ = ["Fold", "Unfold"]
class Fold(Module):
r"""Combines an array of sliding local blocks into a large containing tensor.
Consider a batched :attr:`input` tensor containing sliding local blocks,
e.g., patches of images, of shape :math:`(N, C \times \prod(\text{kernel\_size}), L)`,
where :math:`N` is batch dimension, :math:`C \times \prod(\text{kernel\_size})`
is the number of values within a block (a block has :math:`\prod(\text{kernel\_size})`
spatial locations each containing a :math:`C`-channeled vector), and
:math:`L` is the total number of blocks. (This is exactly the
same specification as the output shape of :class:`~torch.nn.Unfold`.) This
operation combines these local blocks into the large :attr:`output` tensor
of shape :math:`(N, C, \text{output\_size}[0], \text{output\_size}[1], \dots)`
by summing the overlapping values. Similar to :class:`~torch.nn.Unfold`, the
arguments must satisfy
.. math::
L = \prod_d \left\lfloor\frac{\text{output\_size}[d] + 2 \times \text{padding}[d] %
- \text{dilation}[d] \times (\text{kernel\_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor,
where :math:`d` is over all spatial dimensions.
* :attr:`output_size` describes the spatial shape of the large containing
tensor of the sliding local blocks. It is useful to resolve the ambiguity
when multiple input shapes map to same number of sliding blocks, e.g.,
with ``stride > 0``.
The :attr:`padding`, :attr:`stride` and :attr:`dilation` arguments specify
how the sliding blocks are retrieved.
* :attr:`stride` controls the stride for the sliding blocks.
* :attr:`padding` controls the amount of implicit zero-paddings on both
sides for :attr:`padding` number of points for each dimension before
reshaping.
""" """
* :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
""" r"""
Args:
output_size (int or tuple): the shape of the spatial dimensions of the
output (i.e., ``output.sizes()[2:]``)
kernel_size (int or tuple): the size of the sliding blocks
dilation (int or tuple, optional): a parameter that controls the
stride of elements within the
neighborhood. Default: 1
padding (int or tuple, optional): implicit zero padding to be added on
both sides of input. Default: 0
stride (int or tuple): the stride of the sliding blocks in the input
spatial dimensions. Default: 1
* If :attr:`output_size`, :attr:`kernel_size`, :attr:`dilation`,
:attr:`padding` or :attr:`stride` is an int or a tuple of length 1 then
their values will be replicated across all spatial dimensions.
* For the case of two output spatial dimensions this operation is sometimes
called ``col2im``.
.. note::
:class:`~torch.nn.Fold` calculates each combined value in the resulting
large tensor by summing all values from all containing blocks.
:class:`~torch.nn.Unfold` extracts the values in the local blocks by
copying from the large tensor. So, if the blocks overlap, they are not
inverses of each other.
In general, folding and unfolding operations are related as
follows. Consider :class:`~torch.nn.Fold` and
:class:`~torch.nn.Unfold` instances created with the same
parameters:
>>> fold_params = dict(kernel_size=..., dilation=..., padding=..., stride=...)
>>> fold = nn.Fold(output_size=..., **fold_params)
>>> unfold = nn.Unfold(**fold_params)
Then for any (supported) ``input`` tensor the following
equality holds:
::
fold(unfold(input)) == divisor * input
where ``divisor`` is a tensor that depends only on the shape
and dtype of the ``input``:
>>> # xdoctest: +SKIP
>>> input_ones = torch.ones(input.shape, dtype=input.dtype)
>>> divisor = fold(unfold(input_ones))
When the ``divisor`` tensor contains no zero elements, then
``fold`` and ``unfold`` operations are inverses of each
other (up to constant divisor).
.. warning::
Currently, only unbatched (3D) or batched (4D) image-like output tensors are supported.
Shape:
- Input: :math:`(N, C \times \prod(\text{kernel\_size}), L)` or :math:`(C \times \prod(\text{kernel\_size}), L)`
- Output: :math:`(N, C, \text{output\_size}[0], \text{output\_size}[1], \dots)`
or :math:`(C, \text{output\_size}[0], \text{output\_size}[1], \dots)` as described above
Examples::
>>> fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 2))
>>> input = torch.randn(1, 3 * 2 * 2, 12)
>>> output = fold(input)
>>> output.size()
torch.Size([1, 3, 4, 5])
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
__constants__ = ["output_size", "kernel_size", "dilation", "padding", "stride"]
output_size: _size_any_t
kernel_size: _size_any_t
dilation: _size_any_t
padding: _size_any_t
stride: _size_any_t
def __init__(
self,
output_size: _size_any_t,
kernel_size: _size_any_t,
dilation: _size_any_t = 1,
padding: _size_any_t = 0,
stride: _size_any_t = 1,
) -> None:
super().__init__()
self.output_size = output_size
self.kernel_size = kernel_size
self.dilation = dilation
self.padding = padding
self.stride = stride
def forward(self, input: Tensor) -> Tensor:
return F.fold(
input,
self.output_size,
self.kernel_size,
self.dilation,
self.padding,
self.stride,
)
def extra_repr(self) -> str:
return (
"output_size={output_size}, kernel_size={kernel_size}, "
"dilation={dilation}, padding={padding}, stride={stride}".format(
**self.__dict__
)
)
class Unfold(Module):
r"""Extracts sliding local blocks from a batched input tensor.
Consider a batched :attr:`input` tensor of shape :math:`(N, C, *)`,
where :math:`N` is the batch dimension, :math:`C` is the channel dimension,
and :math:`*` represent arbitrary spatial dimensions. This operation flattens
each sliding :attr:`kernel_size`-sized block within the spatial dimensions
of :attr:`input` into a column (i.e., last dimension) of a 3-D :attr:`output`
tensor of shape :math:`(N, C \times \prod(\text{kernel\_size}), L)`, where
:math:`C \times \prod(\text{kernel\_size})` is the total number of values
within each block (a block has :math:`\prod(\text{kernel\_size})` spatial
locations each containing a :math:`C`-channeled vector), and :math:`L` is
the total number of such blocks:
.. math::
L = \prod_d \left\lfloor\frac{\text{spatial\_size}[d] + 2 \times \text{padding}[d] %
- \text{dilation}[d] \times (\text{kernel\_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor,
where :math:`\text{spatial\_size}` is formed by the spatial dimensions
of :attr:`input` (:math:`*` above), and :math:`d` is over all spatial
dimensions.
Therefore, indexing :attr:`output` at the last dimension (column dimension)
gives all values within a certain block.
The :attr:`padding`, :attr:`stride` and :attr:`dilation` arguments specify
how the sliding blocks are retrieved.
* :attr:`stride` controls the stride for the sliding blocks.
* :attr:`padding` controls the amount of implicit zero-paddings on both
sides for :attr:`padding` number of points for each dimension before
reshaping.
""" """
* :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
""" r"""
Args:
kernel_size (int or tuple): the size of the sliding blocks
dilation (int or tuple, optional): a parameter that controls the
stride of elements within the
neighborhood. Default: 1
padding (int or tuple, optional): implicit zero padding to be added on
both sides of input. Default: 0
stride (int or tuple, optional): the stride of the sliding blocks in the input
spatial dimensions. Default: 1
* If :attr:`kernel_size`, :attr:`dilation`, :attr:`padding` or
:attr:`stride` is an int or a tuple of length 1, their values will be
replicated across all spatial dimensions.
* For the case of two input spatial dimensions this operation is sometimes
called ``im2col``.
.. note::
:class:`~torch.nn.Fold` calculates each combined value in the resulting
large tensor by summing all values from all containing blocks.
:class:`~torch.nn.Unfold` extracts the values in the local blocks by
copying from the large tensor. So, if the blocks overlap, they are not
inverses of each other.
In general, folding and unfolding operations are related as
follows. Consider :class:`~torch.nn.Fold` and
:class:`~torch.nn.Unfold` instances created with the same
parameters:
>>> fold_params = dict(kernel_size=..., dilation=..., padding=..., stride=...)
>>> fold = nn.Fold(output_size=..., **fold_params)
>>> unfold = nn.Unfold(**fold_params)
Then for any (supported) ``input`` tensor the following
equality holds:
::
fold(unfold(input)) == divisor * input
where ``divisor`` is a tensor that depends only on the shape
and dtype of the ``input``:
>>> # xdoctest: +SKIP
>>> input_ones = torch.ones(input.shape, dtype=input.dtype)
>>> divisor = fold(unfold(input_ones))
When the ``divisor`` tensor contains no zero elements, then
``fold`` and ``unfold`` operations are inverses of each
other (up to constant divisor).
.. warning::
Currently, only 4-D input tensors (batched image-like tensors) are
supported.
Shape:
- Input: :math:`(N, C, *)`
- Output: :math:`(N, C \times \prod(\text{kernel\_size}), L)` as described above
Examples::
>>> unfold = nn.Unfold(kernel_size=(2, 3))
>>> input = torch.randn(2, 5, 3, 4)
>>> output = unfold(input)
>>> # each patch contains 30 values (2x3=6 vectors, each of 5 channels)
>>> # 4 blocks (2x3 kernels) in total in the 3x4 input
>>> output.size()
torch.Size([2, 30, 4])
>>> # xdoctest: +IGNORE_WANT
>>> # Convolution is equivalent with Unfold + Matrix Multiplication + Fold (or view to output shape)
>>> inp = torch.randn(1, 3, 10, 12)
>>> w = torch.randn(2, 3, 4, 5)
>>> inp_unf = torch.nn.functional.unfold(inp, (4, 5))
>>> out_unf = inp_unf.transpose(1, 2).matmul(w.view(w.size(0), -1).t()).transpose(1, 2)
>>> out = torch.nn.functional.fold(out_unf, (7, 8), (1, 1))
>>> # or equivalently (and avoiding a copy),
>>> # out = out_unf.view(1, 2, 7, 8)
>>> (torch.nn.functional.conv2d(inp, w) - out).abs().max()
tensor(1.9073e-06)
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
__constants__ = ["kernel_size", "dilation", "padding", "stride"]
kernel_size: _size_any_t
dilation: _size_any_t
padding: _size_any_t
stride: _size_any_t
def __init__(
self,
kernel_size: _size_any_t,
dilation: _size_any_t = 1,
padding: _size_any_t = 0,
stride: _size_any_t = 1,
) -> None:
super().__init__()
self.kernel_size = kernel_size
self.dilation = dilation
self.padding = padding
self.stride = stride
def forward(self, input: Tensor) -> Tensor:
return F.unfold(
input, self.kernel_size, self.dilation, self.padding, self.stride
)
def extra_repr(self) -> str:
return (
"kernel_size={kernel_size}, dilation={dilation}, padding={padding},"
" stride={stride}".format(**self.__dict__)
)
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.