diff --git a/ckpts/universal/global_step120/zero/15.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/15.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..b3d2ac3fd2cbbb1aa83c441ad4ce361d98eaba65 --- /dev/null +++ b/ckpts/universal/global_step120/zero/15.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c54f008b0420ce9a6543fa4182978fce98614d47bacc66e120dc1cd698570c2e +size 50332828 diff --git a/ckpts/universal/global_step120/zero/15.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/15.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..c78b1961aa2ff673722a207d8f7fa6d21a078463 --- /dev/null +++ b/ckpts/universal/global_step120/zero/15.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa64a7e8c84a4aa24d5606e708ef41c553e667c8d9d564cf1c782ed0b68b00b8 +size 50332843 diff --git a/ckpts/universal/global_step120/zero/15.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step120/zero/15.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..9f6184f6c75c0cdb547ed7b8233aebe41b4a649e --- /dev/null +++ b/ckpts/universal/global_step120/zero/15.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81dc8161845dd2f7ae322f702606ddce30ba8d1c358ee6db16a32a758a3a5917 +size 50332749 diff --git a/ckpts/universal/global_step120/zero/16.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/16.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..e213c0d003a67cf52e98852e90d91e2ecea70204 --- /dev/null +++ b/ckpts/universal/global_step120/zero/16.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab35ef16672e672908d6ea232361665000eeae86cdeea20c355c84dc5b2e0498 +size 16778396 diff --git a/venv/lib/python3.10/site-packages/torch/_refs/__init__.py b/venv/lib/python3.10/site-packages/torch/_refs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fa571489bd7f3e752872eee4c7832704dfece39f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_refs/__init__.py @@ -0,0 +1,6443 @@ +import builtins +import collections +import inspect +import itertools +import math +import operator +import warnings + +from collections.abc import Iterable +from enum import Enum +from functools import partial, reduce, singledispatch, wraps +from typing import Any, Callable, Dict, List, Optional, overload, Sequence, Tuple, Union + +import torch + +import torch._prims as prims +import torch._prims_common as utils +from torch import sym_float, sym_int +from torch._prims_common import ( + DeviceLikeType, + Dim, + DimsSequenceType, + DimsType, + dtype_to_type, + ELEMENTWISE_TYPE_PROMOTION_KIND, + FloatLike, + FloatWithoutSymFloat, + IntLike, + is_weakly_lesser_type, + Number, + NumberType, + RealNumberType, + REDUCTION_OUTPUT_TYPE_KIND, + ShapeType, + StrideType, + TensorLike, + TensorLikeType, + TensorOrNumberLikeType, + TensorSequenceType, +) +from torch._prims_common.wrappers import ( + _maybe_convert_to_dtype, + _maybe_resize_out, + _safe_copy_out, + elementwise_type_promotion_wrapper, + elementwise_unary_scalar_wrapper, + out_wrapper, +) + +# Experimental module containing prototype Python references for existing +# PyTorch operations. + +__all__ = [ + # + # Elementwise Unary References + # + "abs", + "acos", + "acosh", + "asinh", + "asin", + "atan", + "atanh", + "bitwise_not", + # "cbrt", # No corresponding torch operation + "ceil", + "conj_physical", + "cos", + "cosh", + "count_nonzero", + "deg2rad", + "digamma", + "erf", + "erfinv", + "erfc", + "exp", + "expm1", + "exponential", + "exp2", + "fill", + "fill_", + "floor", + "frac", + "geometric", + "index_add", + "index_copy", + "index_copy_", + "index_select", + "index_fill", + "index_fill_", + "isfinite", + "isinf", + "isposinf", + "isneginf", + "isnan", + "isreal", + "i0", + "lerp", + "lgamma", + "log", + "log1p", + "log2", + "log10", + "log_normal", + "log_softmax", + "mvlgamma", + "norm", + "normal", + "nan_to_num", + "neg", + "positive", + "rad2deg", + "reciprocal", + "round", # TODO: model kwargs + "sigmoid", + "sgn", + "sign", + "signbit", + "sin", + "sinc", + "sinh", + "softmax", + "sqrt", + "square", + "tan", + "tanh", + "trace", + "trunc", + # + # Elementwise Binary References + # + "add", + "atan2", + "bitwise_and", + "bitwise_left_shift", + "bitwise_or", + "bitwise_right_shift", + "bitwise_xor", + "clamp_min", + "clamp_max", + "copysign", + "div", + "eq", + "float_power", + "floor_divide", + "fmax", + "fmin", + "fmod", + "gcd", + "ge", + "gt", + "heaviside", + "hypot", + "igamma", + "igammac", + "imag", + "isclose", + "lcm", + # 'ldexp', + "le", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "logsumexp", + "lt", + # 'max', # implement with reductions + "maximum", + # 'min', # implement with reductions + "minimum", + "mul", + "ne", + "nextafter", + # 'polar', # abs, cos, sin + "pow", + "real", + "rpow", + "remainder", + "rsub", + "rtruediv", + "rfloordiv", + "sub", + "true_divide", + "trunc_divide", + "xlogy", + # + # Elementwise Ternary References + # + "addcdiv", + "addcmul", + "clamp", + # + # Conditional references + # + "masked_fill", + "masked_fill_", + "where", + # + # Data conversion and movement references + # + "clone", + "copy_to", # TODO: add OpInfo (or implement .to) + "item", + "to", + # + # Reduction ops + # + "all", + "amax", + "amin", + "any", + "cumsum", + "cumprod", + "mean", + "dot", + "vdot", + "std", + "std_mean", + "sum", + "sum_to_size", + "prod", + "var", + "var_mean", + # + # Linear algebra ops + # + "addr", + # + # View & Shape Ops + # + "alias", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "as_strided", + "as_strided_scatter", + "block_diag", + "broadcast_shapes", + "broadcast_tensors", + "broadcast_to", + "cat", + "chunk", + "column_stack", + "conj", + "constant_pad_nd", + "contiguous", + "diag_embed", + "diag", + "diagonal", + "diagonal_copy", + "diagonal_scatter", + "dsplit", + "dstack", + "expand", + "expand_as", + "flatten", + "flip", + "fliplr", + "flipud", + "hsplit", + "hstack", + "meshgrid", + "movedim", + "narrow", + "narrow_copy", + "native_group_norm", + "native_layer_norm", + "permute", + "ravel", + "repeat", + "reshape", + "reshape_as", + "roll", + "rot90", + "rsqrt", + "stack", + "swap_axes", # alias for transpose + "squeeze", + "t", + "T", + "take_along_dim", + "tensor_split", + "transpose", + "unfold", + "unfold_copy", + "unsqueeze", + "view", + "view_as", + "vsplit", + "vstack", + "view_as_complex", + "unflatten", + "unbind", + "triu", + "tril", + "triu_indices", + "tril_indices", + # + # Tensor Creation + # + "arange", + "cauchy", + "empty", + "empty_like", + "empty_permuted", + "empty_strided", + "eye", + "full", + "full_like", + "linspace", + "logspace", + "new_empty", + "new_empty_strided", + "new_full", + "new_ones", + "new_zeros", + "ones", + "ones_like", + "randn", + "scalar_tensor", + "zero", + "zeros", + "zeros_like", + # + # Test-related functions + # + "allclose", + "equal", + # + # Statistical operations + # + "bucketize", + # + # Misc + # + "is_complex", + "renorm", + "stft", + "istft", +] + +Tensor = torch.Tensor +DispatchKey = torch._C.DispatchKey # type: ignore[attr-defined] +aten = torch._ops.ops.aten + +# Note that the docstrings for the public methods from this file are in +# torch/_torch_docs.py + + +def is_noncontiguous_supported(device): + if device is not None and device.type == "hpu": + return False + return True + + +def handle_noncontiguous_outputs(input_tlist, output): + device = None + from torch._subclasses.fake_tensor import FakeTensor + + for t in input_tlist: + if isinstance(t, FakeTensor): + device = t.fake_device + break + + if not is_noncontiguous_supported(device): + output = output.contiguous() + + return output + + +def _broadcast_shapes(*_shapes): + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + shapes = tuple( + (x,) if isinstance(x, IntLike) else x + for x in filter(lambda x: x is not None, _shapes) + ) + + # Short-circuits on no input + if len(shapes) == 0: + return None + + # Type checking + # TODO: make common validations available as utils + for shape in shapes: + assert isinstance(shape, Sequence) + + # Computes common shape + common_shape = [ + 1, + ] * reduce(max, (len(shape) for shape in shapes)) + for arg_idx, shape in enumerate(shapes): + for idx in range(-1, -1 - len(shape), -1): + if guard_size_oblivious(common_shape[idx] == 1): + if shape[idx] < 0: + raise ValueError( + "Attempting to broadcast a dimension with negative length!" + ) + common_shape[idx] = shape[idx] + elif guard_size_oblivious(shape[idx] != 1): + if common_shape[idx] != shape[idx]: + raise RuntimeError( + f"Attempting to broadcast a dimension of length {shape[idx]} at {idx}! " + f"Mismatching argument at index {arg_idx} had {shape}; but expected shape " + f"should be broadcastable to {common_shape}" + ) + + return common_shape + + +def _maybe_broadcast(*args, preserve_cpu_scalar_tensors=True): + # Computes common shape + common_shape = _broadcast_shapes( + *(t.shape if isinstance(t, TensorLike) else None for t in args) + ) + + def __maybe_broadcast(x, shape): + if x is None: + return None + elif isinstance(x, Number): + return x + elif isinstance(x, TensorLike): + if preserve_cpu_scalar_tensors and utils.is_cpu_scalar_tensor(x): + return x + + if not utils.same_shape(x.shape, common_shape): + return x.expand(common_shape) + + return x + else: + raise RuntimeError( + "Unexpected type when broadcasting: " + str(type(x)) + "!" + ) + + return tuple(__maybe_broadcast(x, common_shape) for x in args) + + +# Utilities should come BEFORE this import +from torch._decomp import register_decomposition + +# +# Elementwise unary references +# + +infer_aten_op = object() + + +# TODO: add type promotion support +def _make_elementwise_unary_reference( + type_promotion_kind, + *, + aten_op=infer_aten_op, + extra_meta=None, +) -> Callable: + def inner(prim: Callable): + nonlocal aten_op + + @wraps(prim) + @out_wrapper() + @elementwise_unary_scalar_wrapper + @elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=type_promotion_kind, + ) + def _ref(a: TensorLikeType) -> TensorLikeType: + if extra_meta is not None: + extra_meta(a) + + output = prim(a) + return handle_noncontiguous_outputs([a], output) + + if aten_op is infer_aten_op: + aten_op = utils.get_aten_op(prim, prim.__name__) + if aten_op is not None: + register_decomposition(aten_op)(_ref) + + return _ref + + return inner + + +def _make_alias(fn, name): + """ + This function defines an alias of another function and sets its __name__ argument. + It also sets its __module__ argument to the module of the caller. + Note that when naïvely doing `alias = fn`, we have that `alias.__name__ == "fn"`, and + `alias.__module__ == fn.__module__`. + """ + + def _fn(*args, **kwargs): + return fn(*args, **kwargs) + + _fn.__name__ = name + _fn.__module__ = inspect.currentframe().f_back.f_globals["__name__"] # type: ignore[union-attr] + return _fn + + +def _make_inplace(fn): + """ + Given a function with out variant (i.e. using `out_wrapper()), it returns its in-place variant + See https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-do-in-place-operations-work-in-pytorch + """ + + # nb. We use the name of the first argument used in the unary references + @wraps(fn) + def _fn(a, *args, **kwargs): + return fn(a, *args, out=a, **kwargs) + + inplace_name = f"{fn.__name__}_" + _fn.__name__ = inplace_name + _fn = register_decomposition(getattr(aten, inplace_name))(_fn) + + # We access the __all__ attribute of the module where fn is defined + # There may be a cleaner way of doing this... + from inspect import getmodule + + _all = getmodule(fn).__all__ # type: ignore[union-attr] + if inplace_name not in _all: + _all.append(inplace_name) + return _fn + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT) +def abs(a): + return prims.abs(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def acos(a): + return prims.acos(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def acosh(a): + return prims.acosh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def asin(a): + return prims.asin(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def asinh(a): + return prims.asinh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def atan(a): + return prims.atan(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def atanh(a): + return prims.atanh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def bitwise_not(a): + return prims.bitwise_not(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def ceil(a): + return prims.ceil(a) + + +@register_decomposition(aten.is_complex) +def is_complex(input: TensorLikeType): + return utils.is_complex_dtype(input.dtype) + + +@register_decomposition(aten.conj_physical) +@out_wrapper() +def conj_physical(input: TensorLikeType): + if not utils.is_complex_dtype(input.dtype): + return input + return prims.conj_physical(input) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def cos(a): + return prims.cos(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def cosh(a): + return prims.cosh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def digamma(a): + return prims.digamma(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def erf(a): + return prims.erf(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def erfinv(a): + return prims.erf_inv(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def erfc(a): + return prims.erfc(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def exp(a): + return prims.exp(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def expm1(a): + return prims.expm1(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def exp2(a): + return prims.exp2(a) + + +# Fill has its own implementation because it has a value parameter +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a,"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH, +) +def fill(a: TensorLikeType, value: NumberType) -> TensorLikeType: + assert isinstance(a, TensorLike) + assert isinstance(value, Number) + + python_type = utils.dtype_to_type(a.dtype) + if not utils.is_weakly_lesser_type(type(value), python_type): + msg = f"value argument of type {type(value)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + + return prims.fill(a, value) + + +def fill_(a: TensorLikeType, value: NumberType) -> TensorLikeType: + r = prims.fill(a, value) + prims.copy_to(a, r) + return a + + +@register_decomposition(aten.zero) +@out_wrapper() +def zero(input: TensorLikeType) -> TensorLikeType: + return torch.zeros_like(input) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def floor(a): + return prims.floor(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def frac(x: TensorLikeType) -> TensorLikeType: + trunc_x = torch.mul(torch.floor(torch.abs(x)), torch.sign(x)) + return torch.sub(x, trunc_x) + + +# imag does not use _make_elementwise_unary_reference because it does not support out +def imag(a: TensorLikeType) -> TensorLikeType: + assert isinstance(a, TensorLike) + torch._check( + utils.is_complex_dtype(a.dtype), lambda: "imag only supports complex tensors." + ) + return prims.imag(a) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + aten_op=None, # CompositeImplicitAutograd +) +def isfinite(a: TensorLikeType) -> TensorLikeType: + if utils.is_float_dtype(a.dtype) or utils.is_complex_dtype(a.dtype): + return prims.isfinite(a) + + return ones_like(a, dtype=torch.bool) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def isinf(a: TensorLikeType) -> TensorLikeType: + if utils.is_complex_dtype(a.dtype): + return torch.logical_or(isinf(torch.real(a)), isinf(torch.imag(a))) + if utils.is_float_dtype(a.dtype): + return torch.abs(a) == float("inf") + return torch.zeros_like(a, dtype=torch.bool) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def isposinf(a: TensorLikeType) -> TensorLikeType: + torch._check( + not utils.is_complex_dtype(a.dtype), + lambda: f"Complex dtype is not supported for isposinf, got dtype {a.dtype}", + ) + if utils.is_float_dtype(a.dtype): + return a == float("inf") + return torch.zeros_like(a, dtype=torch.bool) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def isneginf(a: TensorLikeType) -> TensorLikeType: + torch._check( + not utils.is_complex_dtype(a.dtype), + lambda: f"Complex dtype is not supported for isneginf, got dtype {a.dtype}", + ) + if utils.is_float_dtype(a.dtype): + return a == float("-inf") + return torch.zeros_like(a, dtype=torch.bool) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def isnan(a: TensorLikeType) -> TensorLikeType: + return prims.ne(a, a) + + +# alias +mvlgamma = _make_alias(torch.special.multigammaln, "mvlgamma") # type: ignore[has-type] + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + aten_op=None, # CompositeImplicitAutograd +) +def isreal(a: TensorLikeType) -> TensorLikeType: + if utils.is_complex_dtype(a.dtype): + return torch.imag(a) == 0 + return torch.ones_like(a, dtype=torch.bool) + + +# TODO: if this is special maybe it should be defined there and imported here? +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, aten_op=aten.i0 +) +def i0(a): + return prims.bessel_i0(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def lgamma(a): + return prims.lgamma(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def log(a): + return prims.log(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def log1p(a): + return prims.log1p(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def log2(a): + return prims.log2(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def log10(a): + return prims.log10(a) + + +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def log_softmax( + a: TensorLikeType, + dim: int, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + result_dtype = dtype or a.dtype + computation_dtype = utils.get_computation_dtype(result_dtype) + a_ = _maybe_convert_to_dtype(a, computation_dtype) + return _maybe_convert_to_dtype(a_ - logsumexp(a_, dim, keepdim=True), result_dtype) # type: ignore[return-value] + + +@register_decomposition(aten.logsumexp) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def logsumexp( + self: TensorLikeType, dim: DimsType, keepdim: bool = False +) -> TensorLikeType: + if not isinstance(dim, Iterable): + dim = (dim,) + if self.numel() == 0: + return torch.sum(torch.exp(self), dim, keepdim).log() + maxes = torch.amax(self, dim, keepdim=True) + maxes = torch.masked_fill(maxes, maxes.abs() == float("inf"), 0) + maxes_squeezed = maxes if keepdim else torch.squeeze(maxes, dim) + result = torch.sum(torch.exp(self - maxes), dim, keepdim) + return result.log().add(maxes_squeezed) + + +@register_decomposition(aten.nan_to_num) +@out_wrapper() +def nan_to_num( + a: TensorLikeType, + nan: Optional[NumberType] = 0.0, + posinf: Optional[NumberType] = None, + neginf: Optional[NumberType] = None, +) -> TensorLikeType: + assert isinstance(a, TensorLike) + + if utils.is_boolean_dtype(a.dtype) or utils.is_integer_dtype(a.dtype): + return a.clone() + + if nan is None: + nan = 0.0 + + if posinf is None: + posinf = torch.finfo(a.dtype).max + + if neginf is None: + neginf = torch.finfo(a.dtype).min + + result = torch.where(torch.isnan(a), nan, a) # type: ignore[call-overload] + result = torch.where(torch.isneginf(a), neginf, result) # type: ignore[call-overload] + result = torch.where(torch.isposinf(a), posinf, result) # type: ignore[call-overload] + return result + + +def _neg_meta(a: TensorLikeType): + torch._check( + a.dtype is not torch.bool, + lambda: ( + "Negation, the `-` operator, on a bool tensor is not supported. " + "If you are trying to invert a mask, use the `~` or `logical_not()` " + "operator instead." + ), + ) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, extra_meta=_neg_meta +) +def neg(a): + return prims.neg(a) + + +# positive does not use _make_elementwise_unary_reference because it does not support out +# CompositeImplicitAutograd - don't register decomp +def positive(a: TensorLikeType) -> TensorLikeType: + assert isinstance(a, TensorLike) + if a.dtype is torch.bool: + msg = "positive does not support bool tensors." + raise RuntimeError(msg) + return a + + +# real does not use _make_elementwise_unary_reference because it does not support out +def real(a: TensorLikeType) -> TensorLikeType: + assert isinstance(a, TensorLike) + if utils.is_complex_dtype(a.dtype): + return prims.real(a) + return a + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def reciprocal(a): + return prims.reciprocal(a) + + +@register_decomposition(aten.round) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def round(a: TensorLikeType, *, decimals: int = 0) -> TensorLikeType: + if decimals == 0: + return prims.round(a) + else: + ten_pow = 10**decimals + ten_neg_pow = 10 ** (-decimals) + return prims.mul(prims.round(prims.mul(a, ten_pow)), ten_neg_pow) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def rsqrt(a): + return prims.rsqrt(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def sigmoid(a: TensorLikeType) -> TensorLikeType: + return true_divide(1, add(1, exp(neg(a)))) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def sgn(a): + if utils.is_complex_dtype(a.dtype): + a_abs = a.abs() + return torch.where(a_abs == 0, 0, a / a_abs) + else: + return a.sign() + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def sign(a): + return prims.sign(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def signbit(a): + return prims.signbit(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def sin(a): + return prims.sin(a) + + +# Autograd note: This will give the right first derivative at zero (by chance), +# but not the right second derivative +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def sinc(a): + a = math.pi * a + return torch.where(a == 0, 1, torch.sin(a) / a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def sinh(a): + return prims.sinh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def sqrt(a): + return prims.sqrt(a) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG, + aten_op=None, # CompositeImplicitAutograd, +) +def square(a: TensorLikeType) -> TensorLikeType: + return mul(a, a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def tan(a): + return prims.tan(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def tanh(a): + return prims.tanh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def trunc(a): + return prims.trunc(a) + + +# TODO: register this as a real ref/decomposition once TorchInductor supports complex! +def view_as_complex(self: TensorLikeType) -> TensorLikeType: + input_dtype = self.dtype + torch._check( + utils.is_float_dtype(input_dtype), + lambda: f"view_as_complex is only supported for floating point" + f"tensors, but got a tensor of scalar type: {input_dtype}", + ) + sizes = self.size() + torch._check( + len(sizes) != 0, + lambda: "Input tensor must have one or more dimensions", + ) + torch._check( + sizes[-1] == 2, + lambda: "Tensor must have a last dimension of size 2", + ) + + old_strides = self.stride() + torch._check( + old_strides[-1] == 1, + lambda: "Tensor must have a last dimension with stride 1", + ) + dims = old_strides[:-1] + torch._check( + py_all(stride % 2 == 0 for stride in dims), + lambda: "Tensor must have a stride divisible by 2 for all but last dimension", + ) + torch._check( + self.storage_offset() % 2 == 0, + lambda: "Tensor must have a storage_offset divisible by 2", + ) + return prims.view_element_type( + self, utils.corresponding_complex_dtype(input_dtype) + ).squeeze(-1) + + +def _make_elementwise_binary_reference( + type_promotion_kind, + aten_op=infer_aten_op, + name=None, + has_out=True, + supports_lhs_python_scalar=True, + supports_rhs_python_scalar=True, + supports_two_python_scalars=False, + should_register_decomposition=True, +) -> Callable: + def inner(prim: Callable): + nonlocal aten_op, name + if name is None: + name = prim.__name__ + + @wraps(prim) + @elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=type_promotion_kind, + ) + def _ref( + a: Union[Tensor, NumberType], + b: Union[Tensor, NumberType], + ) -> Tensor: + torch._check_value( + supports_lhs_python_scalar or not isinstance(a, Number), + lambda: f"{name}: Received a lhs Python scalar to an elementwise binary " + "operation that does not accept lhs scalars!", + ) + torch._check_value( + supports_rhs_python_scalar or not isinstance(b, Number), + lambda: f"{name}: Received a rhs Python scalar to an elementwise binary " + "operation that does not accept rhs scalars!", + ) + torch._check_value( + supports_two_python_scalars + or not (isinstance(a, Number) and isinstance(b, Number)), + lambda: f"{name}: Receive two Number inputs to an elementwise binary operation!", + ) + a, b = _maybe_broadcast(a, b) + output = prim(a, b) + return handle_noncontiguous_outputs([a, b], output) + + if has_out: + _ref = out_wrapper()(_ref) + + _ref.__name__ = name + if aten_op is infer_aten_op: + aten_op = utils.get_aten_op(prim, name) + if aten_op is not None and should_register_decomposition: + register_decomposition(aten_op)(_ref) + + return _ref + + return inner + + +# Add has its own implementation because it has an alpha argument +@register_decomposition(aten.add) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def add( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], + *, + alpha: Optional[NumberType] = None, +): + """ + Reference implementation of torch.add + """ + + a, b = _maybe_broadcast(a, b) + + if alpha is not None: + dtype = a.dtype if isinstance(a, TensorLike) else b.dtype # type: ignore[union-attr] + python_type = utils.dtype_to_type(dtype) + if python_type != bool and not utils.is_weakly_lesser_type( + type(alpha), python_type + ): + msg = f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + if isinstance(b, TensorLike): + b = prims.mul(b, alpha) + else: + b = b * alpha + + output = prims.add(a, b) + return handle_noncontiguous_outputs([a, b], output) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def atan2(a, b): + return prims.atan2(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def bitwise_and(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.bitwise_and(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def bitwise_left_shift(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.shift_left(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def bitwise_or(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.bitwise_or(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def bitwise_right_shift(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.shift_right_arithmetic(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def bitwise_xor(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.bitwise_xor(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + supports_lhs_python_scalar=False, +) +def copysign( + a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType] +): + if isinstance(b, Number) and isinstance(a, Tensor): + b = scalar_tensor(b, dtype=a.dtype, device=a.device) + elif isinstance(a, Tensor) and isinstance(b, Tensor) and a.device != b.device: + msg = "Expected divisor (b) to be on the same device ({}) as dividend (a), but it is found on {}!".format( + a.device, b.device + ) + raise RuntimeError(msg) + return where(signbit(b), neg(abs(a)), abs(a)) + + +# complex = _make_elementwise_binary_reference(prims.complex, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) + + +@register_decomposition(aten.div) +@out_wrapper() +def div( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], + *, + rounding_mode: Optional[str] = None, +): + """ + Reference implementation of torch.div + """ + if rounding_mode is None: + return true_divide(a, b) + elif rounding_mode == "trunc": + return trunc_divide(a, b) + elif rounding_mode == "floor": + return floor_divide(a, b) + else: + msg = f"div expected rounding_mode to be one of None, 'trunc', or 'floor' but found {rounding_mode}." + raise ValueError(msg) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def eq(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.eq(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG, +) +def pow( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], +) -> TensorLikeType: + assert isinstance(a, TensorLikeType) or isinstance(b, TensorLikeType) + + if isinstance(b, Number): + if b == 1.0: + return a.clone() # type: ignore[return-value,union-attr] + elif b == 2.0: + return a * a # type: ignore[return-value] + elif b == 0.5: + return torch.sqrt(a) # type: ignore[arg-type] + elif isinstance(a, Number): + if a == 1.0: + return torch.fill(b, True) + if a == 2.0 and ( + utils.is_float_dtype(b.dtype) or utils.is_complex_dtype(b.dtype) + ): + return torch.exp2(b) + + return prims.pow(a, b) + + +# Float power has its own implementation because it has unique type promotion. +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def float_power( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], +) -> Tensor: + if isinstance(a, Number) and isinstance(b, Number): + raise ValueError( + "Receive two Number inputs to an elementwise binary operation!" + ) + + # Handles type promotion + dtype = utils.get_higher_dtype(a, b) + assert dtype is not None + if utils.is_complex_dtype(dtype): + dtype = torch.complex128 + else: + dtype = torch.float64 + + # Float power has the following contiguous cast behavior to be + # consistent with its C++ impl + a = _maybe_convert_to_dtype(a, dtype) + b = _maybe_convert_to_dtype(b, dtype) + + a, b = _maybe_broadcast(a, b) + return pow(a, b) + + +# >>> a = torch.tensor(-0.2500, dtype=torch.float64) +# tensor(-0.250000000000000, dtype=torch.float64) +# +# >>> b = torch.tensor(-0.0010, dtype=torch.float64) +# tensor(-0.001000000000000, dtype=torch.float64) +# +# Note: In this case, casting float to double will expand the float mantissa with zeros, +# while creating a double generates a distinct mantissa. +# >>> torch.tensor(-0.001).to(dtype=torch.float64) +# tensor(-0.001000000047497, dtype=torch.float64) +# +# Floor Division +# The difference is caused because torch.remainder(a, b) = -0.001. +# +# >>> torch.floor(torch.true_divide(a, b)) +# tensor(250., dtype=torch.float64) +# +# >>> torch.div(a, b, rounding_mode='floor') +# tensor(249., dtype=torch.float64) +# +# Definition: a // b = (a - remainder(a, b)) / b +# >>> torch.true_divide(torch.sub(a, torch.remainder(a, b)), b) +# tensor(249., dtype=torch.float64) +# +# For reference, see CPython's implementation: +# https://github.com/python/cpython/blob/ace008c531dd685a30c1dd68f9b5ba35f20171cf/Objects/floatobject.c#L636 + + +@_make_elementwise_binary_reference( + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_two_python_scalars=True, + should_register_decomposition=False, +) +def floor_divide( + a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType] +): + # Wrap scalars because some references only accept tensor arguments. + if isinstance(a, Number) and isinstance(b, Number): + a = scalar_tensor(a) + b = scalar_tensor(b) + elif isinstance(b, Number) and isinstance(a, Tensor): + b = scalar_tensor(b, dtype=a.dtype, device=a.device) + elif isinstance(a, Number) and isinstance(b, Tensor): + a = scalar_tensor(a, dtype=b.dtype, device=b.device) + elif isinstance(a, Tensor) and isinstance(b, Tensor) and a.device != b.device: + if a.device == torch.device("cpu"): + msg = "Expected divisor (b) to be on the same device ({}) as dividend (a), but it is found on {}!".format( + a.device, b.device + ) + raise RuntimeError(msg) + else: + b = prims.device_put(b, device=a.device) + + assert isinstance(a, Tensor) and isinstance(b, Tensor) + dtype = a.dtype + if utils.is_float_dtype(dtype): + return _floor_divide_float(a, b) + elif utils.is_integer_dtype(dtype): + return _floor_divide_integer(a, b) + else: + torch._check(False, lambda: f"{dtype} not supported for floor_divide") + + +def _floor_divide_integer(a: Tensor, b: Tensor) -> Tensor: + a, b = _maybe_broadcast(a, b) + + if not a.dtype.is_signed: + return prims.div(a, b) + + # Convert truncation to flooring: + offset = (torch.signbit(a) != torch.signbit(b)).logical_and(torch.fmod(a, b) != 0) + return prims.div(a, b) - _maybe_convert_to_dtype(offset, a.dtype) + + +def _floor_divide_float(a: Tensor, b: Tensor) -> Tensor: + mod = fmod(a, b) + div = true_divide(sub(a, mod), b) + + # Ensure that the remainder has the same sign as denominator + different_signed_inputs = bitwise_xor(lt(a, 0), lt(b, 0)) + non_zero_remainder = ne(mod, 0) + mask = bitwise_and(non_zero_remainder, different_signed_inputs) + div = where(mask, sub(div, 1), div) + + # Map quotient to nearest integer value + floor_div = floor(div) + mask = gt(sub(div, floor_div), 0.5) + floor_div = where(mask, add(floor_div, 1), floor_div) + + basic_div = true_divide(a, b) + zero_tensor = scalar_tensor(0, dtype=basic_div.dtype, device=basic_div.device) + + # If quotient is zero, copy signbit from true_divide quotient + floor_div = where(ne(div, 0), floor_div, copysign(zero_tensor, basic_div)) + + # If denominator is zero, then follow true_divide behavior + return where(ne(b, 0), floor_div, basic_div) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def fmax(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.fmax(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def fmin(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.fmin(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=True, +) +def fmod(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.fmod(a, b) + + +@register_decomposition(aten.frexp) +@out_wrapper("mantissa", "exponent") +def frexp(self: TensorLikeType) -> Tuple[TensorLikeType, TensorLikeType]: + return torch.return_types.frexp(prims.frexp(self)) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def gcd(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.gcd(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def ge(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.ge(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def gt(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.gt(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def heaviside(input: TensorLikeType, values: TensorLikeType) -> TensorLikeType: + input_eq_zero = torch.eq(input, 0) + input_lt_zero = torch.logical_or(torch.lt(input, 0), torch.isnan(input)) + zeros_and_ones = torch.where(input_lt_zero, 0, 1) + output = torch.where(input_eq_zero, values, zeros_and_ones) + return output + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def hypot(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.hypot(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def igamma(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.igamma(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def igammac(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.igammac(a, b) + + +def _check_close_args( + name: str, + a: TensorLikeType, + b: TensorLikeType, + rtol: float, + atol: float, +) -> None: + torch._check_value( + a.dtype == b.dtype, + lambda: f"{name}: Attempting to compare tensors of different dtypes {a.dtype} and {b.dtype}!", + ) + torch._check( + rtol >= 0, + lambda: f"{name}: rtol must be greater than or equal to zero, but got {rtol}!", + ) + torch._check( + atol >= 0, + lambda: f"{name}: atol must be greater than or equal to zero, but got {atol}!", + ) + + +# CompositeImplicitAutograd - don't register decomp +def isclose( + a: TensorLikeType, + b: TensorLikeType, + rtol: float = 1e-05, + atol: float = 1e-08, + equal_nan: bool = False, +) -> TensorLikeType: + _check_close_args(name="torch.isclose", a=a, b=b, rtol=rtol, atol=atol) + + close = eq(a, b) + if equal_nan and (utils.is_float_dtype(a.dtype) or utils.is_complex_dtype(a.dtype)): + close = logical_or(close, logical_and(isnan(a), isnan(b))) + + # Note: In case of zero tolerances the closeness inequality degenerates to an equality check. + # In this case, the short-circuit prevents false positives as detailed in the paragraph below. + if atol == 0 and rtol == 0: + return close + + # Note [closeness error computation] + # atol and rtol are provided as doubles, so the computation + # rtol * other will produce a float or complex tensor. + # When the difference (self - other) is compared to it then the + # tensor representing the difference will also be cast to float or complex. + # However, since (self - other) in uint8 is very likely to produce a + # negative value, this moves the cast forward so the difference is + # always computed in a float or complex type. + # If the values of the integer tensors cannot be exactly represented + # by the default scalar type then this may cause an incorrect result. + if not utils.is_float_dtype(a.dtype) and not utils.is_complex_dtype(a.dtype): + a = prims.convert_element_type(a, torch.get_default_dtype()) + b = prims.convert_element_type(b, torch.get_default_dtype()) + + allowed_error = add(atol, abs(mul(b, rtol))) + actual_error = abs(sub(a, b)) + + # Computes finite closeness + result = logical_or( + close, logical_and(isfinite(actual_error), le(actual_error, allowed_error)) + ) + + return result + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def lcm(a: TensorLikeType, b: TensorLikeType): + dtype = a.dtype + # promoting to int32 to maintain 100% consistency with C++ and to + # prevent overflow in case of int8 and int16 + promote_to_int = dtype in (torch.int8, torch.int16) + if promote_to_int: + a = prims.convert_element_type(a, torch.int32) + b = prims.convert_element_type(b, torch.int32) + + g = torch.gcd(a, b) + # Avoid division by zero in case gcd(0, 0) == 0 + g = torch.where(g == 0, 1, g) + res = torch.abs(prims.div(a, g) * b) + return res if not promote_to_int else prims.convert_element_type(res, dtype) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def le(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.le(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def logaddexp(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + # Nb. this implementation does not distribute the gradients evenly when a == b + mask = torch.real(a) >= torch.real(b) + max_ = torch.where(mask, a, b) + min_ = torch.where(mask, b, a) + inf_mask = torch.logical_and( + torch.logical_not(torch.isfinite(torch.real(a))), torch.real(a) == torch.real(b) + ) + if utils.is_complex_dtype(a.dtype) or utils.is_complex_dtype(b.dtype): + # are you wondering what this bunch of codes are for? edge cases! + neg_min_mask = torch.real(min_) < 0 + inf_vals = torch.where( + neg_min_mask, min_, torch.log(torch.exp(min_) + torch.exp(max_)) + ) + non_nan_vals = torch.where( + inf_mask, inf_vals, max_ + torch.log1p(torch.exp(min_ - max_)) + ) + # the type for full_like does not include tensor yet + nan_mask = torch.isnan(min_) + return torch.where(nan_mask, complex(float("nan"), float("nan")), non_nan_vals) # type: ignore[call-overload] + else: + return torch.where(inf_mask, a, max_ + torch.log1p(torch.exp(min_ - max_))) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def logaddexp2(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + torch._check( + not (utils.is_complex_dtype(a.dtype) or utils.is_complex_dtype(b.dtype)), + lambda: "logaddexp2 doesn't support complex dtypes", + ) + # Nb. this implementation does not distribute the gradients evenly when a == b + mask = a >= b + max_ = torch.where(mask, a, b) + min_ = torch.where(mask, b, a) + inf_mask = torch.logical_and(torch.isinf(a), a == b) + inv_log_2 = 1.0 / math.log(2) + result = max_ + torch.log1p(torch.exp2(min_ - max_)) * inv_log_2 + return torch.where(inf_mask, a, result) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, +) +def logical_and(a: TensorLikeType, b: TensorLikeType): + if not utils.is_boolean_dtype(a.dtype): + a = a != 0 + if not utils.is_boolean_dtype(b.dtype): + b = b != 0 + return a & b + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def logical_not(a: TensorLikeType): + if not utils.is_boolean_dtype(a.dtype): + return a == 0 + return ~a + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, +) +def logical_or(a: TensorLikeType, b: TensorLikeType): + if not utils.is_boolean_dtype(a.dtype): + a = a != 0 + if not utils.is_boolean_dtype(b.dtype): + b = b != 0 + return bitwise_or(a, b) + + +# TODO: skip unnecessary conversion of long to float +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, +) +def logical_xor(a: TensorLikeType, b: TensorLikeType): + if not utils.is_boolean_dtype(a.dtype): + a = a != 0 + if not utils.is_boolean_dtype(b.dtype): + b = b != 0 + return a ^ b + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def lt(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.lt(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def maximum(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.maximum(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def minimum(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.minimum(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_two_python_scalars=True, +) +def mul(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.mul(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def ne(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.ne(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def nextafter(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.nextafter(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def remainder(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.remainder(a, b) + + +# reverse sub +@register_decomposition(aten.rsub) +@out_wrapper() +def rsub( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], + alpha: NumberType = 1, +): + if isinstance(a, Number): + msg = "Received a Number for the first argument, but expected a Tensor" + raise ValueError(msg) + + return torch.sub(b, a, alpha=alpha) + + +# TODO: consider refactoring this with add impl +# sub has its own implementation because it has an alpha argument +@register_decomposition(aten.sub) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def sub( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], + *, + alpha: NumberType = 1, +): + """ + Reference implementation of torch.sub + """ + + a, b = _maybe_broadcast(a, b) + + if alpha != 1: + dtype = a.dtype if isinstance(a, TensorLike) else b.dtype # type: ignore[union-attr] + python_type = utils.dtype_to_type(dtype) + if not utils.is_weakly_lesser_type(type(alpha), python_type): + msg = f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + if isinstance(b, torch.Tensor): + b = prims.mul(b, alpha) + else: + # Carefully not to use prims.mul if b is a scalar / symint. + # prims.mul always returns a tensor, + # which will mess with type promotion. + b = b * alpha + + output = prims.sub(a, b) + return handle_noncontiguous_outputs([a, b], output) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + name="true_divide", + aten_op=None, # CompositeImplicitAutograd + supports_two_python_scalars=True, +) +def true_divide(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.div(a, b) + + +@register_decomposition(aten.xlogy) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def xlogy(a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType]): + torch._check( + isinstance(a, TensorLike) or isinstance(b, TensorLike), + lambda: 'Expected either argument a or b to be a Tensor"', + ) + + # Operations like eq and log do not handle scalar values, so we convert them to scalar_tensors. + if isinstance(b, TensorLike) and isinstance(a, Number): + a = scalar_tensor(a, dtype=b.dtype, device=b.device) + elif isinstance(a, TensorLike) and isinstance(b, Number): + b = scalar_tensor(b, dtype=a.dtype, device=a.device) + + # mypy: expected "Tensor" + assert isinstance(a, TensorLike) + assert isinstance(b, TensorLike) + rhs = torch.where(torch.eq(a, 0), 0, torch.mul(a, torch.log(b))) + return torch.where(torch.isnan(b), float("nan"), rhs) + + +@_make_elementwise_binary_reference( + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + aten_op=None, # CompositeImplicitAutograd + supports_two_python_scalars=True, +) +def trunc_divide( + a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType] +): + dtype = utils.get_dtype(a) + if utils.is_integer_dtype(dtype): + return prims.div(a, b) + + return trunc(prims.div(a, b)) + + +# +# Elementwise Ternary References +# + + +@register_decomposition(aten.addcdiv) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self", "tensor1", "tensor2"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def addcdiv( + self: TensorLikeType, + tensor1: TensorLikeType, + tensor2: TensorLikeType, + *, + value: NumberType = 1, +) -> TensorLikeType: + """ + Reference implementation of torch.addcdiv + """ + if value is not None: + dtype = self.dtype # no scalars allowed, see add + python_type = utils.dtype_to_type(dtype) + torch._check_value( + utils.is_weakly_lesser_type(type(value), python_type), + lambda: f"value argument of type {type(value)} cannot be safely cast to type {python_type}!", + ) + + return self + value * tensor1 / tensor2 + + +@register_decomposition(aten.addcmul) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self", "tensor1", "tensor2"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def addcmul( + self: TensorLikeType, + tensor1: TensorLikeType, + tensor2: TensorLikeType, + *, + value: NumberType = 1, +) -> TensorLikeType: + """ + Reference implementation of torch.addcmul + """ + if value is not None: + dtype = self.dtype # no scalars allowed, see add + python_type = utils.dtype_to_type(dtype) + torch._check_value( + utils.is_weakly_lesser_type(type(value), python_type), + lambda: f"value argument of type {type(value)} cannot be safely cast to type {python_type}!", + ) + + return self + value * tensor1 * tensor2 + + +@register_decomposition(aten.clamp) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "min", "max"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def clamp( + a: TensorLikeType, + min: Optional[TensorOrNumberLikeType] = None, + max: Optional[TensorOrNumberLikeType] = None, +) -> TensorLikeType: + # NOTE: grad behavior with implementation `where` is not consistent on `nan` + if min is None and max is None: + msg = "clamp called but both min and max are none!" + raise ValueError(msg) + if min is not None: + a_isnan = torch.isnan(a) + condition = torch.bitwise_or(torch.ge(a, min), a_isnan) # type: ignore[arg-type] + # we should also propagate `nan` coming from boundaries. However, that's + # not necessary since `ge` would already `False` when either operands has + # a `nan`. So this line below is redundant + # `condition = bitwise_and(condition, bitwise_not(isnan(min)))` + a = torch.where(condition, a, min) # type: ignore[arg-type] + if max is not None: + a_isnan = torch.isnan(a) + # same as above, no need to adjust `nan` from `max` + condition = torch.bitwise_or(torch.le(a, max), a_isnan) # type: ignore[arg-type] + a = torch.where(condition, a, max) # type: ignore[arg-type] + + return a + + +@register_decomposition(aten.clamp_min) +@out_wrapper() +def clamp_min( + self: TensorLikeType, + min: Optional[TensorOrNumberLikeType] = None, +) -> TensorLikeType: + return torch.clamp(self, min=min) # type: ignore[arg-type] + + +@register_decomposition(aten.clamp_max) +@out_wrapper() +def clamp_max( + self: TensorLikeType, + max: Optional[TensorOrNumberLikeType] = None, +) -> TensorLikeType: + return torch.clamp(self, max=max) # type: ignore[arg-type] + + +# +# Conditional references +# + + +# https://pytorch.org/docs/stable/generated/torch.where.html +# TODO: implement alternate where +@register_decomposition(aten.where) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH, +) +def where( + pred: Tensor, + a: Optional[TensorOrNumberLikeType] = None, + b: Optional[TensorOrNumberLikeType] = None, +): + """ """ + + if a is None or b is None: + raise NotImplementedError + + utils.check_same_device(pred, a, b, allow_cpu_scalar_tensors=True) + torch._check( + pred.dtype is torch.bool, + lambda: f"expected predicate to be bool, got {pred.dtype}", + ) + + pred, a, b = _maybe_broadcast(pred, a, b) + return prims.where(pred, a, b) + + +# +# Data Movement References +# +@register_decomposition(aten.clone) +@out_wrapper() +def clone( + a: TensorLikeType, *, memory_format: torch.memory_format = torch.preserve_format +) -> TensorLikeType: + result = prims.clone(a, memory_format=memory_format) + return result + + +def copy_to(a: Tensor, b: Tensor, *, allow_cross_device=True): + if not allow_cross_device and a.device != b.device: + msg = "Attempting to copy from device {} to device {}, but cross-device copies are not allowed!".format( + b.device, a.device + ) + raise RuntimeError(msg) + + return prims.copy_to(a, b) + + +@register_decomposition(aten.item) +def item(a: TensorLikeType) -> NumberType: + if a.numel() != 1: + msg = f"Can't convert a tensor with {a.numel()} elements to a number!" + raise ValueError(msg) + + # NOTE: explicit conversion is necessary for bool! + # See https://github.com/pytorch/pytorch/issues/78071 + number_type = utils.dtype_to_type(a.dtype) + return number_type(prims.item(a)) + + +# fast path when `to` returns an alias to input. This mimics the same function in aten +def _to_will_alias( + a: TensorLikeType, + device: Optional[DeviceLikeType] = None, + dtype: Optional[torch.dtype] = None, + copy: Optional[bool] = None, + layout: Optional[torch.layout] = None, + memory_format: Optional[torch.memory_format] = None, + pin_memory: Optional[bool] = False, + non_blocking: bool = False, # not using non_blocking +) -> bool: + return ( + not copy + and (device is None or a.device == device) + and (dtype is None or a.dtype == dtype) + and (layout is None or a.layout == layout) + # is_pinned issue #84925 + # and (pin_memory is None or pin_memory == a.is_pinned()) + and ( + memory_format is None + or memory_format == torch.preserve_format + or utils.is_contiguous_for_memory_format(a, memory_format=memory_format) + ) + ) + + +@singledispatch +def _to_dispatch(*args, **kwargs): + raise NotImplementedError + + +@_to_dispatch.register +def _to_device( + device: torch.device, + dtype: torch.dtype, + non_blocking: bool = False, + copy: bool = False, + memory_format: Optional[torch.memory_format] = None, +) -> Dict[str, Any]: + kwargs = { + "device": device, + "dtype": dtype, + "non_blocking": non_blocking, + "copy": copy, + "memory_format": memory_format, + } + return kwargs + + +@_to_dispatch.register +def _to_device_str( + device: str, + dtype: torch.dtype, + non_blocking: bool = False, + copy: bool = False, + memory_format: Optional[torch.memory_format] = None, +) -> Dict[str, Any]: + kwargs = { + "device": torch.device(device), + "dtype": dtype, + "non_blocking": non_blocking, + "copy": copy, + "memory_format": memory_format, + } + return kwargs + + +@_to_dispatch.register +def _to_dtype( + dtype: torch.dtype, + non_blocking: bool = False, + copy: bool = False, + memory_format: Optional[torch.memory_format] = None, +) -> Dict[str, Any]: + kwargs = { + "dtype": dtype, + "non_blocking": non_blocking, + "copy": copy, + "memory_format": memory_format, + } + return kwargs + + +@_to_dispatch.register +def _to_other( + other: Tensor, + non_blocking: bool = False, + copy: bool = False, + memory_format: Optional[torch.memory_format] = None, +) -> Dict[str, Any]: + device = other.device + dtype = other.dtype + layout = other.layout + # is_pinned issue #84925 + # pin_memory = other.is_pinned() + kwargs = { + "device": device, + "dtype": dtype, + "layout": layout, + "non_blocking": non_blocking, + "copy": copy, + "memory_format": memory_format, + } + return kwargs + + +# remove to_kwargs that is already present in `a` +def _canonicalize_to_arguments(a: Tensor, to_kwargs: dict): + options_to_check = ["dtype", "device", "layout", "memory_format"] + # "device" option could be passed a str instead torch.device + if "device" in to_kwargs and isinstance(to_kwargs["device"], str): + to_kwargs["device"] = torch.device(to_kwargs["device"]) + + for kw in options_to_check: + if kw in to_kwargs: + if ( + (kw == "memory_format" and to_kwargs[kw] is torch.preserve_format) + or ( + kw == "device" + and to_kwargs[kw].type == a.device.type + and ( + not to_kwargs[kw].index or to_kwargs[kw].index == a.device.index + ) + ) + or ( + getattr(a, kw, None) == to_kwargs[kw] + ) # this also handles {"memory_format": None} + ): + to_kwargs.pop(kw) + + +def to(a: TensorLikeType, *args, **kwargs) -> TensorLikeType: + # handled dispatch via positional arguments + if len(args) != 0: + kwargs = _to_dispatch(*args, **kwargs) + + # TODO: is_pinned is not currently supported in refs or fake_tensor + # https://github.com/pytorch/pytorch/issues/84925 + assert "pin_memory" not in kwargs + _canonicalize_to_arguments(a, kwargs) + + if _to_will_alias(a, **kwargs): + return a + + copy = kwargs.pop("copy") if "copy" in kwargs else False + non_blocking = kwargs.pop("non_blocking") if "non_blocking" in kwargs else False + + # short-circuit to `prims.convert_element_type` when `to` is just a dtype change + if ( + (copy or (kwargs.get("dtype", a.dtype) != a.dtype)) + and (not non_blocking) + and ("memory_format" not in kwargs) + and ("device" not in kwargs) + and ("layout" not in kwargs) + # is_pinned issue #84925 + # and ("pin_memory" not in kwargs) + ): + return prims.convert_element_type(a, kwargs.get("dtype", a.dtype)) + + result = torch.empty_like(a, **kwargs) + # TODO: non_blocking should be handled by `copy_to` + copy_to(result, a) + return result + + +# +# Reduction references +# + + +def _reduction( + a: TensorLikeType, + prim: Callable, + *, + has_identity: bool = True, + accepts_dim_tuple: bool = True, # to handle min/argmin that accept single dim only + dims: Optional[DimsType] = None, + keepdims: bool = False, + dtype: Optional[torch.dtype] = None, # should be specified for ops that support it + out: Optional[Tensor] = None, + output_dtype_kind: REDUCTION_OUTPUT_TYPE_KIND, +) -> TensorLikeType: # it is usually SAME, but I want + # ref writers to actually think about what to put here + assert isinstance(a, TensorLike) + if a.ndim > 64: + raise RuntimeError( + f"Received a tensor with {a.ndim} dimensions, but only tensors with up to 64 dims are supported!" + ) + + if out is not None: + assert isinstance(out, TensorLike) + if dtype is not None: + # TODO - this is true for eager mode currently, but it's wrong behavior for complex norms + if dtype != out.dtype: + raise RuntimeError( + "dtype argument and out dtype must match in reduction" + ) + if not accepts_dim_tuple: + assert dims is None or isinstance(dims, Dim) + if isinstance(dims, Dim): + dims = (dims,) # type: ignore[assignment] + dims = utils.reduction_dims(a.shape, dims) + if not has_identity: + valid_shape = a.ndim == 0 or py_all(a.shape[i] for i in dims) + if not valid_shape: + raise RuntimeError( + "reducing over zero-size dimension for reduction operation without identity" + ) + computation_dtype, result_dtype = utils.reduction_dtypes( + a, output_dtype_kind, dtype + ) + a = _maybe_convert_to_dtype(a, computation_dtype) # type: ignore[method-assign] + result = prim(a, dims) + if keepdims: + output_shape = [a.shape[i] if i not in dims else 1 for i in range(a.ndim)] + broadcast_dims = [i for i in range(a.ndim) if i not in dims] + result = prims.broadcast_in_dim(result, output_shape, broadcast_dims) + + if out is not None: + assert result_dtype is not None + if dtype is not None and result_dtype != out.dtype: + raise RuntimeError( + "Expected the dtype of reduction result and out to match" + ) + out = _maybe_resize_out(out, result.shape) + return _safe_copy_out(copy_from=result, copy_to=out) # type: ignore[arg-type] + + if result.dtype != result_dtype and result_dtype is not None: + result = prims.convert_element_type(result, result_dtype) + + return result + + +def _make_copy_from_view(fn): + """ + Given a view function (e.g. torch.diagonal) generates its copy variant (e.g. torch.diagonal_copy) + """ + name = fn.__name__ + fn = out_wrapper()(fn) + + def _fn(*args, out=None, **kwargs): + result = fn(*args, out=out, **kwargs) + if out is None: + return result.clone(memory_format=torch.contiguous_format) + return result + + copy_name = f"{name}_copy" + _fn.__name__ = copy_name + _fn = register_decomposition(getattr(aten, copy_name))(_fn) + return _fn + + +# Saves Python all +py_all = all + + +@register_decomposition(aten.all) +@out_wrapper() +def all( + a: TensorLikeType, + dim: Optional[DimsType] = None, + keepdim: bool = False, +) -> TensorLikeType: + result = torch.logical_not(torch.any(torch.logical_not(a), dim, keepdim=keepdim)) + + if a.dtype == torch.uint8: + result = result.to(dtype=torch.uint8) + + return result + + +# Saves Python any +py_any = any + + +@register_decomposition(aten.any) +@out_wrapper() +def any( + a: TensorLikeType, + dim: Optional[DimsType] = None, + keepdim: bool = False, +) -> TensorLikeType: + a_ = _maybe_convert_to_dtype(a, torch.bool) + if isinstance(dim, (list, tuple)) and len(dim) == 0: + result = a_.clone() + else: + result = a_.sum(dim=dim, keepdim=keepdim).ne(False) + + # Preserves uint8 -- probably a legacy mask thing + if a.dtype is torch.uint8: + return prims.convert_element_type(result, torch.uint8) + + return result + + +@register_decomposition([aten.sum.dim_IntList, aten.sum.IntList_out]) +def sum( + a: TensorLikeType, + dim: Union[Optional[int], Optional[List[int]]] = None, + keepdim: bool = False, + *, + dtype: Optional[torch.dtype] = None, + out: Optional[Tensor] = None, +) -> TensorLikeType: + if dtype is None: + if out is not None: + dtype = out.dtype + elif utils.is_boolean_dtype(a.dtype) or utils.is_integer_dtype(a.dtype): + dtype = torch.int64 + else: + dtype = a.dtype + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + return _reduction( + a, + prims.sum, + dims=dim, + keepdims=keepdim, + dtype=dtype, + out=out, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.SAME, + ) + + +def sum_to_size( + a: Tensor, + *shape, +) -> Tensor: + shape = utils.extract_shape_from_varargs(shape, validate=False) + torch._check( + utils.is_expandable_to(shape, a.shape), + lambda: f'sum_to_size: size "{shape}" is not expandable to size "{a.shape}"', + ) + # In ATen scalar tensors are sent through sum and the result is returned as + # type promoted + if utils.is_same_shape(shape, a.shape) and len(shape) > 0: + return prims.view_of(a) + leading_dims = a.ndim - len(shape) + reduce_dims = tuple(range(leading_dims)) + tuple( + i + for i in range(leading_dims, len(shape)) + if shape[i - leading_dims] == 1 and a.shape[i] != 1 + ) + return torch.sum(a, dim=reduce_dims, keepdim=True, dtype=None) + + +@register_decomposition(aten.prod) +def prod( + a: TensorLikeType, + dim: Union[Optional[int], Optional[List[int]]] = None, + keepdim: bool = False, + *, + dtype=None, + out: Optional[Tensor] = None, +) -> TensorLikeType: + if dtype is None: + if out is not None: + dtype = out.dtype + elif utils.is_boolean_dtype(a.dtype) or utils.is_integer_dtype(a.dtype): + dtype = torch.int64 + else: + dtype = a.dtype + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + return _reduction( + a, + prims.prod, + dims=dim, + keepdims=keepdim, + dtype=dtype, + out=out, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.SAME, + ) + + +@register_decomposition(aten.amin) +def amin( + a: TensorLikeType, + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + out: Optional[Tensor] = None, +) -> TensorLikeType: + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + + return _reduction( + a, + prims.amin, + dims=dim, + keepdims=keepdim, + dtype=None, + out=out, + has_identity=False, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.SAME, + ) + + +@register_decomposition(aten.amax) +def amax( + a: TensorLikeType, + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + out: Optional[Tensor] = None, +) -> TensorLikeType: + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + + return _reduction( + a, + prims.amax, + dims=dim, + keepdims=keepdim, + dtype=None, + out=out, + has_identity=False, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.SAME, + ) + + +def _dim_var_dispatch(dim=None, unbiased=None): + # There's the following overload of torch.var: + # var(Tensor self, bool unbiased=True) -> (Tensor, Tensor) + # We need to explicitly convert bool dims to unbiased arg + if unbiased is None and isinstance(dim, bool): + unbiased = dim + dim = None + return dim, unbiased + + +@register_decomposition(aten.var) +@out_wrapper() +def var( + a: TensorLikeType, + dim: Optional[DimsType] = None, + unbiased: Optional[bool] = None, + keepdim: bool = False, + *, + correction: Optional[NumberType] = None, +) -> TensorLikeType: + dim, unbiased = _dim_var_dispatch(dim, unbiased) + correction = utils.set_correction(unbiased, correction) + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + + result = _reduction( + a, + partial(prims.var, correction=correction), + dims=dim, + keepdims=keepdim, + dtype=None, + out=None, + has_identity=True, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT, + ) + return result + + +@register_decomposition(aten.std) +@out_wrapper() +def std( + a: TensorLikeType, + dim: Union[Optional[int], Optional[List[int]]] = None, + unbiased: Optional[bool] = None, + keepdim: bool = False, + *, + correction: Optional[NumberType] = None, +) -> TensorLikeType: + dim, unbiased = _dim_var_dispatch(dim, unbiased) + correction = utils.set_correction(unbiased, correction) + + opmath_dtype, dtype = utils.reduction_dtypes( + a, REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT + ) + a = _maybe_convert_to_dtype(a, opmath_dtype) + a_var = torch.var(a, dim, correction=correction, keepdim=keepdim) + a_std = torch.sqrt(a_var) + assert dtype is not None + return _maybe_convert_to_dtype(a_std, dtype) + + +@register_decomposition(aten.mean) +def mean( + a: TensorLikeType, + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + dtype=None, + out=None, +) -> TensorLikeType: + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + orig_dtype = dtype + if dtype is None: + dtype = a.dtype + # can't use out wrapper because of this argument + torch._check( + out is None or out.dtype == dtype, + lambda: f"Expected out tensor to have dtype {dtype}, but got {out.dtype} instead", + ) + result = _reduction( + a, + prims.sum, + dims=dim, + keepdims=keepdim, + dtype=dtype, + out=None, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.KEEP_PROMOTED_TYPE, + ) + torch._check( + utils.is_float_dtype(dtype) or utils.is_complex_dtype(dtype), + lambda: ( + f"mean(): could not infer output dtype. " + f"{'Input' if orig_dtype is None else 'Optional'} dtype must be either " + f"a floating point or complex dtype. Got: {dtype}" + ), + ) + if isinstance(dim, Dim): + dim = (dim,) # type: ignore[assignment] + dims = utils.reduction_dims(a.shape, dim) # type: ignore[arg-type] + nelem = 1 if a.ndim == 0 else reduce(operator.mul, (a.shape[i] for i in dims), 1) + result = true_divide(result, nelem) + result_dtype = a.dtype if dtype is None else dtype + result = _maybe_convert_to_dtype(result, result_dtype) # type: ignore[method-assign] + if out is not None: + assert isinstance(out, TensorLike) + out = _maybe_resize_out(out, result.shape) + return _safe_copy_out(copy_from=result, copy_to=out) # type: ignore[arg-type] + return result + + +@register_decomposition(aten.std_mean) +@out_wrapper("out0", "out1") +def std_mean( + a: TensorLikeType, + dim: Optional[DimsType] = None, + *, + unbiased: Optional[bool] = None, + keepdim: bool = False, + correction: Optional[NumberType] = None, +): + dim, unbiased = _dim_var_dispatch(dim, unbiased) + correction = utils.set_correction(unbiased, correction) + opmath_dtype, dtype = utils.reduction_dtypes( + a, REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT + ) + original_dtype = a.dtype + a = _maybe_convert_to_dtype(a, opmath_dtype) + a_var, a_mean = torch.var_mean(a, dim, correction=correction, keepdim=keepdim) + a_std = torch.sqrt(a_var) + assert dtype is not None + return ( + _maybe_convert_to_dtype(a_std, dtype), + _maybe_convert_to_dtype(a_mean, original_dtype), + ) + + +@register_decomposition(aten.var_mean) +@out_wrapper("out0", "out1") +def var_mean( + a: TensorLikeType, + dim: Optional[DimsType] = None, + unbiased: Optional[bool] = None, + keepdim: bool = False, + *, + correction: Optional[NumberType] = None, +): + dim, unbiased = _dim_var_dispatch(dim, unbiased) + v = var(a, dim, unbiased, keepdim, correction=correction) + m = mean(a, dim, keepdim) + return v, m + + +@register_decomposition(aten.addr) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self", "vec1", "vec2"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def addr( + self: TensorLikeType, + vec1: TensorLikeType, + vec2: TensorLikeType, + *, + beta: NumberType = 1, + alpha: NumberType = 1, +) -> TensorLikeType: + torch._check( + vec1.ndim == 1, + lambda: f"addr: Expected 1-D argument vec1, but got {vec1.ndim}-D", + ) + torch._check( + vec2.ndim == 1, + lambda: f"addr: Expected 1-D argument vec2, but got {vec2.ndim}-D", + ) + self = self.expand(vec1.shape[0], vec2.shape[0]) + if utils.is_boolean_dtype(self.dtype): + # Integers are accepted for booleans + torch._check( + is_weakly_lesser_type(type(beta), int), + lambda: f"expected bool/int beta but got {type(beta)}", + ) + torch._check( + is_weakly_lesser_type(type(alpha), int), + lambda: f"expected bool/int alpha but got {type(beta)}", + ) + if not beta: + return torch.outer(vec1, vec2) if alpha else torch.full_like(self, False) + else: + return torch.logical_or( + self, + torch.outer(vec1, vec2) if alpha else torch.full_like(self, False), + ) + else: + torch._check( + is_weakly_lesser_type(type(beta), dtype_to_type(self.dtype)), + lambda: f"cannot safely convert {type(beta)} to {self.dtype}", + ) + torch._check( + is_weakly_lesser_type(type(alpha), dtype_to_type(self.dtype)), + lambda: f"cannot safely convert {type(alpha)} to {self.dtype}", + ) + if beta == 0: + # This means NaNs from self are dropped if beta is zero + return alpha * torch.outer(vec1, vec2) + else: + return beta * self + alpha * torch.outer(vec1, vec2) + + +# CompositeImplicitAutograd - don't register decomp +def atleast_1d( + arg: Union[TensorLikeType, Sequence[TensorLikeType]], *args: TensorLikeType +) -> Union[TensorLikeType, Tuple[TensorLikeType, ...]]: + """Reference implementation of :func:`torch.atleast_1d`.""" + if not args and isinstance(arg, collections.abc.Sequence): + args_ = arg + else: + assert not isinstance(arg, collections.abc.Sequence) + args_ = (arg,) + args + res = tuple(a if a.ndim >= 1 else unsqueeze(a, 0) for a in args_) + return res if len(res) > 1 else res[0] + + +# Helper function with assert to avoid MyPy error +# of incompatible type passed to unsqueeze +def _unsqueeze_atleast( + at_least_fn: Callable, dim: int, arg: TensorLikeType +) -> TensorLikeType: + arg_ = at_least_fn(arg) + assert isinstance(arg_, TensorLike) + return unsqueeze(arg_, dim) + + +# CompositeImplicitAutograd - don't register decomp +def atleast_2d( + arg: Union[TensorLikeType, Sequence[TensorLikeType]], *args: TensorLikeType +) -> Union[TensorLikeType, Tuple[TensorLikeType, ...]]: + """Reference implementation of :func:`torch.atleast_2d`.""" + if not args and isinstance(arg, collections.abc.Sequence): + args_ = arg + else: + assert not isinstance(arg, collections.abc.Sequence) + args_ = (arg,) + args + unsqueeze_atleast_1d = partial(_unsqueeze_atleast, atleast_1d, 0) + res = tuple(a if a.ndim >= 2 else unsqueeze_atleast_1d(a) for a in args_) + return res if len(res) > 1 else res[0] + + +# CompositeImplicitAutograd - don't register decomp +def atleast_3d( + arg: Union[TensorLikeType, Sequence[TensorLikeType]], *args: TensorLikeType +) -> Union[TensorLikeType, Tuple[TensorLikeType, ...]]: + """Reference implementation of :func:`torch.atleast_3d`.""" + if not args and isinstance(arg, collections.abc.Sequence): + args_ = arg + else: + assert not isinstance(arg, collections.abc.Sequence) + args_ = (arg,) + args + unsqueeze_atleast_2d = partial(_unsqueeze_atleast, atleast_2d, -1) + res = tuple(a if a.ndim >= 3 else unsqueeze_atleast_2d(a) for a in args_) + return res if len(res) > 1 else res[0] + + +def as_strided( + a: TensorLikeType, + size: ShapeType, + stride: StrideType, + storage_offset: Optional[int] = None, +) -> TensorLikeType: + storage_offset_int = ( + storage_offset if storage_offset is not None else a.storage_offset() + ) + return prims.as_strided(a, size, stride, storage_offset_int) + + +@register_decomposition(aten.as_strided_scatter) +@out_wrapper() +def as_strided_scatter( + input: TensorLikeType, + src: TensorLikeType, + size: ShapeType, + stride: StrideType, + storage_offset: Optional[int] = None, +) -> TensorLikeType: + storage_offset_int = 0 if storage_offset is None else storage_offset + return prims.as_strided_scatter(input, src, size, stride, storage_offset_int) + + +def broadcast_shapes(*shapes) -> ShapeType: + return torch.Size(_broadcast_shapes(*shapes)) + + +@aten.broadcast_tensors.default.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten.broadcast_tensors.default.py_impl(DispatchKey.Meta) +def broadcast_tensors(*tensors) -> List[TensorLikeType]: + if len(tensors) == 1 and not isinstance(tensors[0], Tensor): + tensors = tensors[0] + return list(_maybe_broadcast(*tensors, preserve_cpu_scalar_tensors=False)) + + +# CompositeImplicitAutograd - don't register decomp +def broadcast_to(a: TensorLikeType, size: ShapeType) -> TensorLikeType: + start = len(size) - len(a.shape) + dims = tuple(range(start, len(a.shape) + start)) + return prims.broadcast_in_dim(a, size, dims) + + +@register_decomposition(aten.cat) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("tensors",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH, +) +def cat(tensors: TensorSequenceType, dim: int = 0) -> TensorLikeType: + def cat_compute_output_memory_format(inputs): + format = None + for t in inputs: + f = utils.suggest_memory_format(t) + if f == torch.contiguous_format: + return f + if format is not None and format != f: + return torch.contiguous_format + format = f + assert format is not None + return format + + if len(tensors) == 0: + msg = "cat expects at least one tensor, but received zero!" + raise ValueError(msg) + + for tensor in tensors: + assert isinstance(tensor, TensorLike) + + utils.check_same_device(*tensors, allow_cpu_scalar_tensors=False) + + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + # This is a bit tricky. Naively, you would expect to just pick one + # arbitrary tensor and check that all tensors match this tensor. However, + # there is legacy behavior which says that if you have a 1-D empty tensor + # (0,), this is permissible. So you can't assume that all the tensors + # have same dimensionality, and you can't assume that the first tensor is + # the correct stencil. + # + # We'll implement this in a few passes. First, we will try to infer the + # ndim of the cat output. If this ndim != 1, then we know that all ndim = + # 1 inputs must be empty, or are errors. If this ndim == 1, then life + # is easy (the legacy special case coincides with regular handling). + # + # NB: The regular implementation of cat just filters out empty inputs, + # but we do it slightly different here for better handling for unbacked + # SymInts + + example = None + for i, t in enumerate(tensors): + if example is None: + if t.ndim != 1: + example = t + else: + if t.ndim != 1: + torch._check( + t.ndim == example.ndim, + lambda: "Number of dimensions of tensors must match. " + f"Expected {example.ndim}-D tensors, but got {t.ndim}-D for " + f"tensor number {i} in the list", + ) + + if example is None: + # example is None if everything is 1-D. If so, just arbitrarily pick + # the first one + example = tensors[0] + + shape = example.shape + filtered = [] + for tensor_idx, tensor in enumerate(tensors): + if len(shape) != len(tensor.shape): + assert tensor.ndim == 1 # we've already checked this above + # Don't suggest the legacy behavior in the error message + torch._check( + tensor.shape[0] == 0, + lambda: f"Number of dimensions of tensors must match. " + f"Expected {example.ndim}-D tensors, but got 1-D for " + f"tensor number {tensor_idx} in the list", + ) + else: + # Remove inputs that are 1-D, zero size + if tensor.ndim == 1 and guard_size_oblivious(tensor.shape[0] == 0): + continue + # Don't bother checking size match, prims.cat will handle it + filtered.append(tensor) + + memory_format = cat_compute_output_memory_format(tensors) + + if len(filtered) == 0: + t = tensors[0] + + # TODO: fix this to work with meta tensors + try: + requires_grad = any(x.requires_grad for x in tensors) + except Exception: + requires_grad = False + + return empty( + (0,), + dtype=t.dtype, + device=t.device, + requires_grad=requires_grad, + memory_format=memory_format, + ) + + dim = utils.canonicalize_dim(filtered[0].ndim, dim) + utils.validate_idx(filtered[0].ndim, dim) + + return prims.cat(filtered, dim).clone(memory_format=memory_format) + + +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def column_stack(tensors: TensorSequenceType) -> TensorLikeType: + aligned_tensors = tuple( + x if x.ndim > 1 else x.reshape((x.numel(), 1)) for x in tensors + ) + return cat(aligned_tensors, 1) + + +def conj(input: TensorLikeType) -> TensorLikeType: + if not utils.is_complex_dtype(input.dtype): + return input + if input.is_sparse: + return torch.conj_physical(input) + return prims.conj(input) + + +# This replicates at::constant_pad_nd, defined in ATen/native/PadNd.cpp +@register_decomposition(aten.constant_pad_nd) +@out_wrapper() +def constant_pad_nd( + input: TensorLikeType, pad: List[int], value: NumberType = 0 +) -> TensorLikeType: + torch._check( + len(pad) % 2 == 0, + lambda: f"Length of pad must be even but instead it equals {len(pad)}", + ) + + input_sizes = input.shape + l_inp = len(input_sizes) + + l_pad = len(pad) // 2 + l_diff = l_inp - l_pad + + torch._check( + l_inp >= l_pad, + lambda: "Length of pad should be no more than twice the number of " + f"dimensions of the input. Pad length is {len(pad)} while the input has " + f"{l_inp} dimensions.", + ) + + c_input = input + for i in range(l_diff, l_inp): + pad_idx = 2 * (l_inp - i - 1) + if pad[pad_idx] < 0: + c_input = c_input.narrow(i, -pad[pad_idx], c_input.shape[i] + pad[pad_idx]) + + if pad[pad_idx + 1] < 0: + c_input = c_input.narrow(i, 0, c_input.shape[i] + pad[pad_idx + 1]) + + # if none of the pads are positive we can just return the result + if builtins.all(p <= 0 for p in pad): + return c_input.clone() + + new_shape = list(input_sizes[:l_diff]) + + for i in range(l_pad): + pad_idx = len(pad) - ((i + 1) * 2) + new_dim = input_sizes[l_diff + i] + pad[pad_idx] + pad[pad_idx + 1] + torch._check( + new_dim > 0, + lambda: f"The input size {input_sizes[l_diff + i]}, plus negative padding " + f"{pad[pad_idx]} and {pad[pad_idx + 1]} resulted in a negative output size, " + f"which is invalid. Check dimension {l_diff + i} of your input.", + ) + new_shape.append(new_dim) + + memory_format = utils.suggest_memory_format(input) + output = torch.empty( + new_shape, + dtype=input.dtype, + device=input.device, + requires_grad=input.requires_grad, + memory_format=memory_format, + ) + + if value == 0 and input.dtype == torch.bool: + value = False + # torch.fill isn't typed to allow complex values + output = torch.fill(output, value) # type: ignore[arg-type] + + c_output = output + for i in range(l_diff, l_inp): + pad_idx = 2 * (l_inp - i - 1) + if pad[pad_idx] > 0: + c_output = c_output.narrow( + i, pad[pad_idx], c_output.shape[i] - pad[pad_idx] + ) + if pad[pad_idx + 1] > 0: + c_output = c_output.narrow(i, 0, c_output.shape[i] - pad[pad_idx + 1]) + + prims.copy_to(c_output, c_input) + return output + + +def contiguous( + a: Tensor, *, memory_format: torch.memory_format = torch.contiguous_format +) -> Tensor: + torch._check( + memory_format != torch.preserve_format, + lambda: "preserve memory format is unsupported by the contiguous operator", + ) + + if utils.is_contiguous_for_memory_format(a, memory_format=memory_format): + return a + + return torch.clone(a, memory_format=memory_format) + + +@out_wrapper() +def dstack(tensors: TensorSequenceType) -> TensorLikeType: + torch._check(len(tensors) > 0, lambda: "dstack expects a non-empty TensorList") + aligned_tensors = atleast_3d(*tensors) + return cat(aligned_tensors, 2) + + +@register_decomposition(aten.expand) +def expand(a: Tensor, *shape) -> Tensor: + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + # NOTE: cannot use utils.extract_shape_from_varargs here + # because that also validates the shape, but the shape + # given to expand may be "invalid" + if len(shape) == 1 and isinstance(shape[0], Sequence): + shape = tuple(shape[0]) + + torch._check( + len(shape) >= len(a.shape), + lambda: "expand: the requested shape has too few dimensions!", + ) + + offset = len(shape) - len(a.shape) + shape_ = list(shape) + for idx, x in enumerate(a.shape): + offset_idx = idx + offset + requested_length = shape[offset_idx] + torch._check( + guard_size_oblivious(requested_length == x) + or guard_size_oblivious(x == 1) + or requested_length == -1, + lambda: f"expand: attempting to expand a dimension of length {x}!", + ) + + shape_[offset_idx] = requested_length if requested_length != -1 else x + + # At this point shape must be valid + utils.validate_shape(shape_) + + return prims.broadcast_in_dim( + a, shape_, tuple(range(offset, len(a.shape) + offset)) + ) + + +# CompositeImplicitAutograd - don't register decomp +def expand_as(a: Tensor, b: Tensor) -> Tensor: + return a.expand(b.shape) + + +def chunk(a: TensorLikeType, chunks: int, dim: int = 0) -> Tuple[TensorLikeType, ...]: + if chunks <= 0: + msg = f"Expected at least one chunk, but got {chunks}!" + raise ValueError(msg) + + dim = utils.canonicalize_dim(a.ndim, dim) + length = a.shape[dim] + chunk_size = math.ceil(length / chunks) + full_chunks = math.floor(length / chunk_size) + tail_chunk_size = length % chunk_size + + result = [] + for i in range(full_chunks): + result.append(narrow(a, dim, i * chunk_size, chunk_size)) + + if tail_chunk_size != 0: + result.append(narrow(a, dim, full_chunks * chunk_size, tail_chunk_size)) + + return tuple(result) + + +# Note: flatten, unlike other shape operators, returns the input tensor on a no-op (unless +# a 0D tensor is flattened, in which case it's returned in 1D) +# CompositeImplicitAutograd - don't register decomp +def flatten(a: TensorLikeType, start_dim: int = 0, end_dim: int = -1) -> TensorLikeType: + start_dim = utils.canonicalize_dim(a.ndim, start_dim) + end_dim = utils.canonicalize_dim(a.ndim, end_dim) + + # Short-circuits on no-op + if start_dim == end_dim and a.ndim != 0: + return a + + # Tries to take a view + # TODO: we could look at directing collapse_view to skip its meta function here (unsafe_collapse_view) + new_shape, new_strides = prims._collapse_view_helper(a, start_dim, end_dim) + if new_shape is not None: + return prims.collapse_view(a, start_dim, end_dim) + + # Makes a copy if it can't make a view + return prims.collapse(a, start_dim, end_dim) + + +@register_decomposition(aten.flip) +@out_wrapper() +def flip(a: TensorLikeType, dims: DimsSequenceType) -> TensorLikeType: + if not isinstance(dims, tuple) and not isinstance(dims, list): + raise ValueError("dims has to be a sequence of ints") + dims = utils.canonicalize_dims(a.ndim, dims) # type: ignore[assignment] + utils.validate_no_repeating_dims(dims) + return prims.rev(a, dims) + + +# CompositeImplicitAutograd - don't register decomp +def fliplr(a: TensorLikeType) -> TensorLikeType: + if a.ndim < 2: + raise RuntimeError("Input must be >= 2-d.") + + return flip(a, (1,)) + + +# CompositeImplicitAutograd - don't register decomp +def flipud(a: TensorLikeType) -> TensorLikeType: + if a.ndim < 1: + raise RuntimeError("Input must be >= 1-d.") + + return flip(a, (0,)) + + +# CompositeImplicitAutograd - don't register decomp +def narrow( + a: TensorLikeType, dim: int, start: Union[int, TensorLikeType], length: int +) -> TensorLikeType: + # Supports Tensor overload that was added for XLA: + # https://github.com/pytorch/pytorch/issues/31558 + if isinstance(start, TensorLike): + torch._check( + start.dim() == 0 and utils.is_integer_dtype(start.dtype), + lambda: "start must be an 0-dim integral Tensor.", + ) + start = start.item() # type: ignore[assignment] + torch._check(a.dim() > 0, lambda: "narrow() cannot be applied to a 0-dim tensor.") + torch._check(length >= 0, lambda: "narrow(): length must be non-negative.") + dim = utils.canonicalize_dim(a.ndim, dim) + dim_length = a.size(dim) + torch._check_with( + IndexError, + -dim_length <= start and start <= dim_length, # type: ignore[arg-type] + lambda: f"start out of range (expected to be in range of [{-dim_length}, {dim_length}], but got {start})", + ) + if start < 0: + start = start + dim_length + torch._check( + start <= dim_length - length, # type: ignore[arg-type] + lambda: f"start ({start}) + length ({length}) exceeds dimension size ({dim_length}).", + ) + return prims.slice_in_dim(a, start, start + length, axis=dim) + + +# TODO: This must return a sparse tensor if the input is sparse, but refs have +# no sparse support. See narrow_copy_sparse in core. +narrow_copy = _make_copy_from_view(narrow) + + +def _normalize( + a: Tensor, norm_dims: DimsType, eps: float +) -> Tuple[Tensor, Tensor, Tensor]: + """Computes mean and 1/std of a tensor along norm_dims. + + Used as a helper function for normalization layers. + + Args: + a (Tensor): input tensor + norm_dims (DimsType): dimensions to normalize over + eps (float): epsilon for numerical stability + + Returns: + out (Tensor): normalized tensor. + mean (Tensor): mean of the tensor along norm_dims. + rstd (Tensor): 1/std of the tensor along norm_dims. + """ + norm_dims = utils.canonicalize_dims(a.ndim, norm_dims) + computation_dtype = utils.get_computation_dtype(a.dtype) + a_acc = _maybe_convert_to_dtype(a, computation_dtype) + assert isinstance(a_acc, TensorLike) # to avoid mypy error for var_mean + biased_var, mean = torch.var_mean( + a_acc, dim=norm_dims, unbiased=False, keepdim=True + ) + rstd = torch.rsqrt(biased_var + eps) + out = (a - mean) * rstd + return out, mean, rstd + + +# add all specified dimensions +def _unsqueeze_multiple(x: TensorLikeType, dimensions: List[int]) -> TensorLikeType: + for dim in sorted(dimensions): + x = torch.unsqueeze(x, dim) + return x + + +@register_decomposition(aten.native_group_norm.default) +def native_group_norm( + input: Tensor, + weight: Optional[Tensor], + bias: Optional[Tensor], + batch_size: int, + num_channels: int, + flattened_inner_size: int, + num_groups: int, + eps: float, +) -> Tuple[Tensor, Tensor, Tensor]: + torch._check( + input.ndim >= 2, + lambda: f"Expected at least 2 dimensions for input tensor but received {input.ndim}", + ) + torch._check( + num_channels % num_groups == 0, + lambda: "Expected number of channels in input to be divisible by num_groups, " + + f"but got input of shape {input.shape} and num_groups = {num_groups}", + ) + + # num_channels / num_groups and flattened inner dimension are the reduction axes + reduction_dims = [2, 3] + input_reshaped = torch.reshape( + input, + [batch_size, num_groups, num_channels // num_groups, flattened_inner_size], + ) + out, mean, rstd = _normalize(input_reshaped, reduction_dims, eps) + out = out.view(input.shape) + + broadcast_dims = [0] + list(range(2, input.ndim)) + unsqueeze_bias = None + if bias is not None: + unsqueeze_bias = _unsqueeze_multiple(bias, broadcast_dims) + unsqueeze_weight = None + if weight is not None: + unsqueeze_weight = _unsqueeze_multiple(weight, broadcast_dims) + + if unsqueeze_weight is not None: + out = out * unsqueeze_weight + if unsqueeze_bias is not None: + out = out + unsqueeze_bias + + out = _maybe_convert_to_dtype(out, input.dtype) # type: ignore[assignment] + mean = _maybe_convert_to_dtype(mean, input.dtype) # type: ignore[assignment] + rstd = _maybe_convert_to_dtype(rstd, input.dtype) # type: ignore[assignment] + + # remove broadcast dimensions from mean and rstd + mean = torch.squeeze(mean, reduction_dims) + rstd = torch.squeeze(rstd, reduction_dims) + return (out, mean, rstd) + + +@register_decomposition(aten.native_layer_norm) +@out_wrapper("out0", "out1", "out2") +def native_layer_norm( + input: Tensor, + normalized_shape: ShapeType, + weight: Optional[Tensor], + bias: Optional[Tensor], + eps: float, +) -> Tuple[Tensor, Tensor, Tensor]: + normalized_ndim = len(normalized_shape) + torch._check( + normalized_ndim >= 1, + lambda: "Expected normalized_shape to be at least 1-dimensional, i.e., " + + "containing at least one element, but got normalized_shape = " + + str(normalized_shape), + ) + # torch.Size([1, 2, 3]) == [1, 2, 3] evaluates to False + # while torch.Size([1, 2, 3]) == (1, 2, 3) is True + # therefore we use tuple(normalized_shape) + torch._check( + weight is None or weight.shape == tuple(normalized_shape), + lambda: "Expected weight to be of same shape as normalized_shape, but got " + + "weight of shape " + + str(weight.shape) # type: ignore[union-attr] + + " and normalized_shape = " + + str(normalized_shape), + ) + torch._check( + bias is None or bias.shape == tuple(normalized_shape), + lambda: "Expected bias to be of same shape as normalized_shape, but got " + + "bias of shape " + + str(bias.shape) # type: ignore[union-attr] + + " and normalized_shape = " + + str(normalized_shape), + ) + torch._check( + input.ndim >= normalized_ndim + and input.shape[(input.ndim - normalized_ndim) :] == tuple(normalized_shape), + lambda: "Given normalized_shape=" + + str(normalized_shape) + + ", expected input with shape " + + str(normalized_shape) + + ", but got input of size " + + str(input.shape), + ) + + input = input.contiguous() + if weight is not None: + weight = weight.contiguous() + if bias is not None: + bias = bias.contiguous() + + axis = input.ndim - normalized_ndim + reduction_dims = list(range(axis, input.ndim)) + out, mean, rstd = _normalize(input, reduction_dims, eps) + + if weight is None and bias is not None: + out = out + bias + elif weight is not None and bias is None: + out = out * weight + elif weight is not None and bias is not None: + out = out * weight + bias + + out = _maybe_convert_to_dtype(out, input.dtype) # type: ignore[assignment] + if input.device.type == "cpu": + mean = _maybe_convert_to_dtype(mean, input.dtype) # type: ignore[assignment] + rstd = _maybe_convert_to_dtype(rstd, input.dtype) # type: ignore[assignment] + return (out, mean, rstd) + + +# TODO: Adding this as a meta function causes functorch tests to fail when compiled with debug mode. +# test/test_eager_transforms.py::TestFunctionalizeCPU::test_functionalize_fx_transpose_simple_cpu +@register_decomposition(aten.permute) +def permute(a: TensorLikeType, *dims) -> TensorLikeType: + _permutation = utils.canonicalize_dims( + a.ndim, utils.extract_dims_from_varargs(dims) + ) + return prims.transpose(a, _permutation) + + +@register_decomposition(aten.renorm) +@out_wrapper() +def renorm( + input: TensorLikeType, p: RealNumberType, dim: int, maxnorm: RealNumberType +) -> TensorLikeType: + torch._check(not isinstance(p, complex), lambda: "renorm: p must be real-valued") + torch._check(p > 0, lambda: "renorm: non-positive norm not supported") + torch._check( + not isinstance(maxnorm, complex), lambda: "renorm: maxnorm must be real-valued" + ) + torch._check( + maxnorm >= 0, lambda: f"renorm: expected maxnorm to be >= 0 but got {maxnorm}" + ) + ndim = input.ndim + torch._check( + ndim > 1, + lambda: f"renorm: input needs at least 2 dimensions, got {ndim} dimensions", + ) + + dim = utils.canonicalize_dim(ndim, dim) + reduce_dims = list(range(ndim)) + del reduce_dims[dim] + + # For half and bfloat16, calculate norm in float precision then cast + # normalization factor to half + acc_type = utils.get_computation_dtype(input.dtype) + if acc_type != input.dtype: + norm = torch.linalg.vector_norm( + input, p, reduce_dims, keepdim=True, dtype=acc_type + ) + else: + norm = torch.linalg.vector_norm(input, p, reduce_dims, keepdim=True) + + eps = 1e-7 + norm_factor = torch.where(norm > maxnorm, maxnorm / (norm + eps), 1.0) + if acc_type != input.dtype: + norm_factor = prims.convert_element_type(norm_factor, input.dtype) + return (input * norm_factor).contiguous() + + +# CompositeImplicitAutograd - don't register decomp +@aten.stft.center.py_impl(DispatchKey.CompositeImplicitAutograd) +def stft( + input: Tensor, + n_fft: int, + hop_length: Optional[int] = None, + win_length: Optional[int] = None, + window: Optional[Tensor] = None, + center: bool = True, + pad_mode: str = "reflect", + normalized: bool = False, + onesided: Optional[bool] = None, + return_complex: Optional[bool] = None, +) -> Tensor: + torch._check( + window is None or window.device == input.device, + lambda: ( + f"stft input and window must be on the same device but got self on {input.device}" + + f" and window on {window.device}" # type: ignore[union-attr] + ), + ) + + hop_length_ = hop_length if hop_length is not None else n_fft // 4 + win_length_ = win_length if win_length is not None else n_fft + + if return_complex is None: + return_complex_ = input.is_complex() or ( + window is not None and utils.is_complex_dtype(window.dtype) + ) + torch._check( + return_complex_, + ( + "stft requires the return_complex parameter be given for real inputs, " + + "and will further require that return_complex=True in a future PyTorch release." + ), + ) + else: + return_complex_ = return_complex + + torch._check( + utils.is_float_dtype(input.dtype) or utils.is_complex_dtype(input.dtype), + lambda: "stft expected a tensor of floating point or complex values", + ) + torch._check(1 <= input.ndim <= 2, lambda: "stft expected a 1D or 2D tensor") + + original_ndim = input.ndim + if original_ndim == 1: + input = input.unsqueeze(0) + + if center: + extra_dims = 3 - input.ndim + pad_amount = n_fft // 2 + extended_shape = [*itertools.repeat(1, extra_dims), *input.shape] + input = aten.pad(input.view(extended_shape), [pad_amount, pad_amount], pad_mode) + input = input.view(input.size()[extra_dims:]) + + batch = input.size(0) + length = input.size(1) + torch._check( + 0 < n_fft <= length, + lambda: f"stft expected 0 < n_fft <= {length}, but got n_fft={n_fft}", + ) + torch._check( + hop_length_ > 0, + lambda: f"stft expected hop_length > 0 but got hop_length={hop_length_}", + ) + torch._check( + 0 < win_length_ <= n_fft, + lambda: f"stft expected 0 < win_length <= n_fft but got win_length={win_length_}", + ) + torch._check( + window is None or window.shape == (win_length_,), + lambda: ( + f"expected a 1D window tensor of size equal to win_length={win_length_}, " + + f"but got window with size {window.shape}" # type: ignore[union-attr] + ), + ) + + if win_length_ < n_fft: + if window is None: + window = torch.ones(win_length_, dtype=input.dtype, device=input.device) + left = (n_fft - win_length_) // 2 + window = aten.constant_pad_nd(window, [left, n_fft - win_length_ - left]) + + input = input.unfold(dimension=-1, size=n_fft, step=hop_length_) + if window is not None: + input = input * window + + complex_fft = utils.is_complex_dtype(input.dtype) + onesided = onesided if onesided is not None else not complex_fft + norm = "ortho" if normalized else None + if onesided: + torch._check( + not complex_fft, + lambda: "Cannot have onesided output if window or input is complex", + ) + out = torch.fft.rfft(input, dim=-1, norm=norm) + else: + out = torch.fft.fft(input, dim=-1, norm=norm) + + out.transpose_(1, 2) + + if original_ndim == 1: + out = out.squeeze_(0) + + return out if return_complex_ else torch.view_as_real(out) + + +# CompositeImplicitAutograd - don't register decomp +@aten.istft.default.py_impl(DispatchKey.CompositeImplicitAutograd) +def istft( + input: Tensor, + n_fft: int, + hop_length: Optional[int] = None, + win_length: Optional[int] = None, + window: Optional[Tensor] = None, + center: bool = True, + normalized: bool = False, + onesided: Optional[bool] = None, + length: Optional[int] = None, + return_complex=False, +) -> Tensor: + torch._check( + window is None or window.device == input.device, + lambda: ( + f"istft input and window must be on the same device but got self on {input.device}" + + f" and window on {window.device}" # type: ignore[union-attr] + ), + ) + + hop_length_ = hop_length if hop_length is not None else n_fft // 4 + win_length_ = win_length if win_length is not None else n_fft + + torch._check( + utils.is_complex_dtype(input.dtype), + lambda: ( + "istft input and window must be on the same device but got self on " + + f"{input.device} and window on {window.device}" # type: ignore[union-attr] + ), + ) + n_frames = input.size(-1) + fft_size = input.size(-2) + + expected_output_signal_len = n_fft + hop_length_ * (n_frames - 1) + torch._check(input.numel() > 0, lambda: "istft input tensor cannot be empty") + torch._check( + 2 <= input.ndim <= 3, + lambda: f"istft expected a tensor with 2 or 3 dimensions, but got {input.ndim}", + ) + onesided_ = onesided if onesided is not None else fft_size != n_fft + + if onesided_: + torch._check( + n_fft // 2 + 1 == fft_size, + lambda: ( + "istft expected the frequency dimension (3rd to the last) of the input tensor " + + "to match n_fft / 2 + 1 when onesided=True, but got {fft_size}" + ), + ) + else: + torch._check( + n_fft == fft_size, + lambda: ( + "istft expected the frequency dimension (3rd to the last) of the input tensor " + + "to match n_fft when onesided=False, but got {fft_size}", + ), + ) + + torch._check( + 0 < hop_length_ <= win_length_, + lambda: "istft expected 0 < hop_length <= win_length", + ) + torch._check( + 0 < win_length_ <= n_fft, lambda: "istft expected 0 < win_length <= n_fft" + ) + torch._check( + window is None or window.shape == (win_length_,), + lambda: "Invalid window shape. window has to be 1D and length of `win_length`", + ) + + if window is None: + real_dtype = utils.corresponding_real_dtype(input.dtype) + window_ = torch.ones(win_length_, dtype=real_dtype, device=input.device) + else: + window_ = window + + if win_length_ != n_fft: + left = (n_fft - win_length_) // 2 + window_ = aten.constant_pad_nd(window_, (left, n_fft - win_length_ - left), 0) + + original_ndim = input.ndim + if input.ndim == 2: + input = input.unsqueeze(0) + + input = input.transpose(1, 2) + norm = "ortho" if normalized else None + if return_complex: + torch._check( + not onesided_, + lambda: "cannot have onesided output if window or input is complex", + ) + input = torch.fft.ifft(input, dim=-1, norm=norm) + else: + torch._check( + window is None or not utils.is_complex_dtype(window.dtype), + lambda: "Complex windows are incompatible with return_complex=False", + ) + if not onesided_: + input = input.narrow(dim=-1, start=0, length=n_fft // 2 + 1) + input = torch.fft.irfft(input, dim=-1, norm=norm) + + assert input.size(2) == n_fft + + y_tmp = input * window_.view([1, 1, n_fft]) + y = aten.unfold_backward( + y_tmp, + input_sizes=(y_tmp.size(0), expected_output_signal_len), + dim=1, + size=n_fft, + step=hop_length_, + ) + window_envelop = aten.unfold_backward( + window_.pow(2).expand((1, n_frames, n_fft)), + input_sizes=(y_tmp.size(0), expected_output_signal_len), + dim=1, + size=n_fft, + step=hop_length_, + ) + + assert expected_output_signal_len == y.size(1) + assert expected_output_signal_len == window_envelop.size(1) + + start = n_fft // 2 if center else 0 + if length is not None: + end = start + length + elif center: + end = expected_output_signal_len - n_fft // 2 + else: + end = expected_output_signal_len + + length = max(0, end - start) + y = y.narrow(dim=1, start=start, length=length) + window_envelop = window_envelop.narrow(dim=1, start=start, length=length) + + window_envelop_lowest = window_envelop.abs().min().lt(1e-11) + torch._check( + not window_envelop_lowest.item(), + lambda: "window overlap add min less than 1e-11", + ) + + y = y / window_envelop + if original_ndim == 2: + y = y.squeeze(0) + + if end > expected_output_signal_len: + warnings.warn( + "The length of signal is shorter than the length parameter. Result is being " + + "padded with zeros in the tail. Please check your center and hop_length settings" + ) + y = aten.constant_pad_nd(y, (0, end - expected_output_signal_len), 0) + return y + + +# Get the new shape and stride after applying unfold to an input tensor +def _get_unfold_shape_stride( + a_shape: ShapeType, a_stride: StrideType, dimension: int, size: int, step: int +): + a_ndim = len(a_shape) + dim = utils.canonicalize_dim(a_ndim, dimension, wrap_scalar=True) + max_size = 1 if a_ndim == 0 else a_shape[dim] + last_stride = 1 if a_ndim == 0 else a_stride[dim] + + torch._check( + size <= max_size, + lambda: f"Maximum size for tensor at dimension {dim} is {max_size} but size is {size}", + ) + + torch._check( + step > 0, + lambda: f"Step is {step} but must be > 0", + ) + + shape = list(a_shape) + strides = list(a_stride) + shape.append(size) + strides.append(last_stride) + if dim < a_ndim: + shape[dim] = (shape[dim] - size) // step + 1 + strides[dim] *= step + return shape, strides + + +@register_decomposition(aten.repeat) +@out_wrapper() +def repeat(a: Tensor, *repeat_shape) -> Tensor: + repeat_shape = utils.extract_shape_from_varargs(repeat_shape, validate=False) + torch._check( + len(repeat_shape) >= len(a.shape), + lambda: "repeat: Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor", + ) + + if len(repeat_shape) == 0: + return torch.clone(a) + + num_new_dimensions = len(repeat_shape) - a.ndim + padded_shape = [1] * num_new_dimensions + for dim_size in a.shape: + padded_shape.append(dim_size) + + target_shape = tuple( + padded_size * repeat_size + for padded_size, repeat_size in zip(padded_shape, repeat_shape) + ) + + # return an empty tensor if one of the repeat_shape dimensions is zero + if 0 in repeat_shape: + return torch.empty( + target_shape, + dtype=a.dtype, + device=a.device, + requires_grad=a.requires_grad, + memory_format=utils.suggest_memory_format(a), + ) + + urtensor_shape = target_shape + urtensor_stride = utils.make_contiguous_strides_for(target_shape) + for dim, dim_size in enumerate(padded_shape): + # repeat each dimension by using unfold_copy operation + urtensor_shape, urtensor_stride = _get_unfold_shape_stride( + urtensor_shape, urtensor_stride, dim, dim_size, max(dim_size, 1) + ) + + # derive permute order by sorting urtensor strides + enumerated_stride = list(enumerate(urtensor_stride)) + enumerated_stride.sort(key=lambda item: item[1], reverse=True) + permute_order, sorted_stride = zip(*enumerated_stride) + + # add new and expand dimensions according to urtensor + repeat_xtensor = a.expand(urtensor_shape) + + # clone tensor to concretize expanded dimensions + cloned_result = torch.clone(repeat_xtensor) + + # transpose axis so strides are in sorted order + permuted_result = cloned_result.permute(permute_order) + + # reshape to get contiguous tensor with correct target shape + return permuted_result.reshape(target_shape) + + +def _reshape_view_helper(a: TensorLikeType, *shape, allow_copy: bool) -> TensorLikeType: + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious, sym_eq + + # Creates a valid shape + shape = utils.extract_shape_from_varargs(shape, validate=False) + # Reshape may be given a shape with a -1 length + # This indicates that the dimension's length should be inferred + shape = utils.infer_size(shape, a.numel()) + + # Short-circuits if shape is the same + if guard_size_oblivious(sym_eq(tuple(a.shape), tuple(shape))): + return prims.view_of(a) + + # Special-cases tensors with no elements + if guard_size_oblivious(a.numel() == 0): + return as_strided(a, shape, utils.make_contiguous_strides_for(shape)) + + # Special-cases reshaping zero dim tensors + if a.ndim == 0: + _a = a + for length in shape: + assert length == 1 + _a = unsqueeze(_a, -1) + return _a + + # Special-cases reshaping to zero dim tensors + if len(shape) == 0: + _a = a + for length in a.shape: + assert length == 1 + _a = squeeze(_a, -1) + return _a + + # Handles general case: a 1+D tensor reshaped into a distinct 1+D shape + + # NOTE [Reshape Algorithm] + # This algorithm works by attempting to greedily construct the desired dimensions in + # the output shape, left to right. It does this by, conceptually, accumulating + # dimensions of the original tensor, also left to right, until the dimension + # can be constructed using prims.split_dim. + # The algorithm also has special handling for tail squeezes/unsqueezes, like + # if a reshape from (5, 5) to (5, 5, 1) or vice versa. + # + # This algorithm does not flatten the original tensor and then split dims as appropriate + # because that would create copies more often than this algorithm. flatten is the only + # operation below which can create a view or a copy, and while it prefers creating + # views it may sometimes create a copy if the tensor's strides do not permit a view. + # As a result, this algorithm tries to minimize flattening. + # + # Note that a better version of this algorithm may exist. Regions which could be + # flattened without creating a copy can be identified in advance, and that might + # allow fewer flatten calls or faster short-circuiting to make a copy. + idx = 0 + a_ = a + for length in shape: + # Handles tail unsqueezes + if idx >= a_.ndim: + assert length == 1 + last_dim = a_.ndim - 1 + # NOTE: using split_dim instead of unsqueeze may seem silly here, + # but it's necessary to get the strides correct + a_ = prims.split_dim(a_, last_dim, a_.shape[last_dim]) + idx = idx + 1 + continue + + # Skips dimensions that are already the correct length + if guard_size_oblivious(length == a_.shape[idx]): + idx = idx + 1 + continue + + # Gathers enough original dimensions such that this new dimension can be created + # Note that this accumulation will terminate because we've verified a and the shape + # specify the same number of elements above + accum = a_.shape[idx] + end = idx + while guard_size_oblivious(accum % length != 0): + end = end + 1 + accum = accum * a_.shape[end] + if end != idx: + # NOTE: in this case multiple dimensions must be flatten to create the desired dimension + # This flattening is why reshape sometimes creates a copy -- because flattening + # may return a view of a copy + + # Checks if collapse can be a view and short-circuits to copying reshape if it can't + new_shape, new_strides = prims._collapse_view_helper(a_, idx, end) + if new_shape is None: + if allow_copy: + return prims.reshape(a, shape) + + msg = "Cannot view a tensor with shape {} and strides {} as a tensor with shape {}!".format( + a.shape, a.stride(), shape + ) + raise ValueError(msg) + + a_ = flatten(a_, idx, end) + + # Splits the (possibly flattened) dimension to create the desired dim length + if guard_size_oblivious(accum != length): + a_ = prims.split_dim(a_, idx, length) + + idx = idx + 1 + + # Squeezes tail + while idx < a_.ndim: + assert a_.shape[idx] == 1 + a_ = squeeze(a_, idx) + + return a_ + + +# CompositeImplicitAutograd - don't register decomp +# NOTE: shape is a vararg because Tensor.reshape can be called with as +# Tensor.reshape(a, b, c) or Tensor.reshape((a, b, c)) Function call +# torch.reshape doesn't support unpacked shapes +def reshape(a: TensorLikeType, *shape: ShapeType) -> TensorLikeType: + return _reshape_view_helper(a, *shape, allow_copy=True) + + +# CompositeImplicitAutograd - don't register decomp +def reshape_as(self: TensorLikeType, other: TensorLikeType) -> TensorLikeType: + return self.reshape(other.size()) + + +@register_decomposition(aten.roll) +@out_wrapper() +def roll( + a: TensorLikeType, shifts: DimsType, dims: DimsType = tuple() +) -> TensorLikeType: + """Reference implementation of :func:`torch.roll`.""" + dims = utils.canonicalize_dims(a.ndim, dims) + # ATen specifies int[1] type for shifts and dims which expands integers to tuples of length 1 + if not isinstance(shifts, Iterable): + shifts = (shifts,) + if not isinstance(dims, Iterable): + dims = (dims,) + + # Avoid modulo by zero + if a.numel() == 0: + # Keeping this as ref for now as FakeTensor runs into some issues with complex tensors + return a.clone() + + if a.dim() == 0 and len(dims) > 0: + raise IndexError( + f"Dimension specified as {dims[0]} but tensor has no dimensions" + ) + + len_shifts = len(shifts) + len_dims = len(dims) + if len_shifts != 1 or len_dims != 1: + if len_shifts == 0: + raise RuntimeError("`shifts` required") + # Takes care of the case when dims is not specified (default) + # By default, the tensor is flattened before shifting, after which the original shape is restored + if len_dims == 0 and len_shifts == 1: + return torch.roll(torch.flatten(a), shifts, 0).view(a.shape) + if len_shifts != len_dims: + raise RuntimeError( + f"shifts and dimensions must align. shifts: {len_shifts}, dims: {len_dims}" + ) + assert len_dims > 1 + tail_shifts = shifts[1:] + tail_dims = dims[1:] + first_dim_rolled = torch.roll(a, (shifts[0],), dims[0]) + return torch.roll(first_dim_rolled, tail_shifts, tail_dims) + + # This path is taken when only one dimension is rolled + # For example to get `first_dim_rolled` above + dim = dims[0] + size = a.shape[dim] + start = (size - shifts[0]) % size + idx = torch.arange(size, device=a.device) + return a.index_select(dim, torch.fmod(start + idx, size)) + + +@register_decomposition(aten.rot90) +@out_wrapper() +def rot90( + a: TensorLikeType, k: int = 1, dims: DimsSequenceType = (0, 1) +) -> TensorLikeType: + """Reference implementation of :func:`torch.rot90`.""" + if len(dims) != 2: + raise RuntimeError( + f"expected total rotation dims == 2, but got dims = {len(dims)}" + ) + if a.ndim < 2: + raise RuntimeError(f"expected total dims >= 2, but got total dims = {a.ndim}") + + # Do this after the initial checks to be compatible with the behavior in + # core. + dims = utils.canonicalize_dims(a.ndim, dims) + + if dims[0] == dims[1]: + raise RuntimeError( + f"expected rotation dims to be different, but got dim0 = {dims[0]} and dim1 = {dims[1]}" + ) + k = k % 4 # Rotation direction is from the second towards the first axis for k < 0 + if k == 1: + return torch.transpose(torch.flip(a, (dims[1],)), dims[0], dims[1]) + elif k == 2: + return torch.flip(a, dims) + elif k == 3: + return torch.transpose(torch.flip(a, (dims[0],)), dims[0], dims[1]) + else: + return clone(a, memory_format=torch.contiguous_format) + + +def _check_stack_inputs(tensors: TensorSequenceType) -> None: + entry_shape = tensors[0].shape + for i in range(1, len(tensors)): + assert tensors[i].shape == entry_shape, ( + f"stack expects each tensor to be equal size, but got {entry_shape} at entry 0" + f"and {tensors[i].shape} at entry {i}" + ) + + +@register_decomposition(aten.stack) +@out_wrapper() +def stack(tensors: TensorSequenceType, dim: int = 0) -> TensorLikeType: + assert len(tensors) > 0, "stack expects a non-empty TensorList" + wrapped_dim = utils.canonicalize_dim(tensors[0].ndim + 1, dim) + # Refs need sparse support to check other condition + if wrapped_dim < tensors[0].ndim: # and not tensors[0].is_sparse: + _check_stack_inputs(tensors) + result_sizes = list(tensors[0].shape) + result_sizes.insert(wrapped_dim, len(tensors)) + out = torch.cat(tensors, wrapped_dim) + return out.view(result_sizes) + + # If dim == tensors[0].ndim, view cannot efficiently handle it + return torch.cat([t.unsqueeze(wrapped_dim) for t in tensors], dim) + + +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def softmax( + a: TensorLikeType, + dim: int, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + result_dtype = dtype or a.dtype + computation_dtype = utils.get_computation_dtype(result_dtype) + a_ = _maybe_convert_to_dtype(a, computation_dtype) + if a.numel() == 0: + a_exp = exp(a_) + else: + a_max = amax(a_, dim, keepdim=True) + a_exp = exp(a_ - a_max) + return _maybe_convert_to_dtype( + true_divide(a_exp, sum(a_exp, dim, keepdim=True)), result_dtype + ) # type: ignore[return-value] + + +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def hstack(tensors: TensorSequenceType) -> TensorLikeType: + torch._check(len(tensors) > 0, lambda: "hstack expects a non-empty TensorList") + aligned_tensors = atleast_1d(*tensors) + if aligned_tensors[0].ndim == 1: + return cat(aligned_tensors, 0) + return cat(aligned_tensors, 1) + + +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def vstack(tensors: TensorSequenceType) -> TensorLikeType: + torch._check(len(tensors) > 0, lambda: "vstack expects a non-empty TensorList") + aligned_tensors = atleast_2d(*tensors) + return cat(aligned_tensors, 0) + + +# CompositeImplicitAutograd - don't register decomp +def unflatten(a: TensorLikeType, dim: int, sizes: ShapeType) -> TensorLikeType: + dim = utils.canonicalize_dim(a.ndim, dim) + torch._check(len(sizes) != 0, lambda: "unflatten: sizes must be non-empty") + return a.view(tuple(a.shape[:dim]) + tuple(sizes) + tuple(a.shape[dim + 1 :])) + + +@register_decomposition(aten.unbind) +def unbind(t: TensorLikeType, dim: int = 0) -> TensorSequenceType: + dim = utils.canonicalize_dim(t.ndim, dim) + torch._check_index( + len(t.shape) > 0, + lambda: "Dimension specified as 0 but tensor has no dimensions", + ) + if t.shape[dim] == 0: + return tuple() + else: + return tuple( + torch.squeeze(s, dim) for s in torch.tensor_split(t, t.shape[dim], dim) + ) + + +@out_wrapper() +def index_copy(x: TensorLike, dim: int, index: TensorLike, tensor: TensorLike): + return x.clone(memory_format=torch.contiguous_format).index_copy_( + dim, index, tensor + ) + + +def index_copy_(x: TensorLike, dim: int, index: TensorLike, tensor: TensorLike): + dim = utils.canonicalize_dims(x.ndim, dim) + torch._check( + index.ndim <= 1, + lambda: f"Index should have dimension 1 or 0 (got {index.ndim})", + ) + # Treat scalars as elements of \R^1 + y = x.unsqueeze(0) if x.ndim == 0 else x + idx = (slice(None),) * dim + (index,) + y[idx] = tensor + return x + + +@register_decomposition(aten.index_fill) +@out_wrapper() +def index_fill( + x: TensorLike, dim: int, index: TensorLike, value: Union[NumberType, TensorLike] +): + return _index_fill(x, dim, index, value, inplace=False) + + +@register_decomposition(aten.index_fill_) +def index_fill_( + x: TensorLike, dim: int, index: TensorLike, value: Union[NumberType, TensorLike] +): + return _index_fill(x, dim, index, value, inplace=True) + + +def _index_fill( + x: TensorLike, + dim: int, + index: TensorLike, + value: Union[NumberType, TensorLike], + *, + inplace: bool, +): + torch._check( + index.ndim <= 1, + lambda: f"Index should have dimension 1 or 0 (got {index.ndim})", + ) + if isinstance(value, TensorLike): + torch._check( + value.ndim == 0, + lambda: "Only supports 0-dimensional value tensor. " # type: ignore[union-attr] + f"Got a tensor with {value.ndim} dimensions.", + ) # type: ignore[arg-type] + else: + value = torch.scalar_tensor( + value, dtype=x.dtype, layout=x.layout, device=x.device # type: ignore[arg-type] + ) + + # index_copy has some unnecessary preconditions when x is a scalar. We do this to work through them + zero_dim = x.ndim == 0 + y = x.unsqueeze(0) if zero_dim else x + # index_copy does not broadcast on value so we have to do it manually + shape = list(y.shape) + shape[dim] = index.numel() + value = value.expand(shape) + index_copy = Tensor.index_copy_ if inplace else torch.index_copy + out = index_copy(y, dim, index, value) # type: ignore[operator] + if inplace: + return x + else: + if zero_dim: + # The clone is necessary so that it returns a fresh tensor rather than a view + out = out.squeeze(0).clone() + # index_fill preserves the strides. index_copy always returns contiguous tensors + if out.stride() != x.stride(): + new_out = torch.empty_like(x) + new_out.copy_(out) + out = new_out + return out + + +@out_wrapper() +def index_add( + x: TensorLike, + dim: int, + index: TensorLike, + tensor: TensorLike, + *, + alpha: NumberType = 1, +): + # index_add always returns a new contiguous tensor + return x.clone(memory_format=torch.contiguous_format).index_add_( + dim, index, tensor, alpha=alpha # type: ignore[arg-type] + ) + + +@register_decomposition(aten.index_select) +@out_wrapper() +def index_select(x: TensorLike, dim: int, index: TensorLike): + dim = utils.canonicalize_dims(x.ndim, dim) + torch._check( + index.ndim <= 1, + lambda: f"Index should have dimension 1 or 0 (got {index.ndim})", + ) + if index.ndim == 0: + index = index.unsqueeze(0) + if x.ndim == 0: + # Treat scalars as elements of \R^1 + # We cannot use x[idx] here as it accesses item() (??), hence this awkward construction + return torch.empty_like(x).index_copy(0, index, x.expand_as(index)) + + idx = (slice(None),) * dim + (index,) + return x[idx] + + +@register_decomposition(aten.squeeze.dims) +def squeeze(a: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType: + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + if dim is None: + dims = tuple(idx for idx, size in enumerate(a.shape) if size == 1) + return prims.squeeze(a, dims) if dims else prims.view_of(a) + + ndim = a.ndim + dim = utils.canonicalize_dims(ndim, dim) + dims = (dim,) if isinstance(dim, Dim) else dim + # Short-circuits if the tensor has no dimensions + if ndim == 0: + assert len(dims) == 0 or dims == (0,) + return prims.view_of(a) + + # Note: squeeze does not modify tensors when the given dim is not a dimension of length 1 + dims = tuple(d for d in dims if guard_size_oblivious(a.shape[d] == 1)) + if len(dims) == 0: + return prims.view_of(a) + if len(dims) == 1: + return prims.squeeze(a, dims) + dims_list = list(dims) + dims_list = sorted(dims_list, reverse=True) + for i in dims_list: + a = squeeze(a, i) + return a + + +# Note: does not work with TensorMetas because of data-dependent control-flow +# CompositeImplicitAutograd - don't register decomp +def tensor_split( + a: TensorLikeType, + indices_or_sections: Union[Tensor, DimsType], + dim: int = 0, +) -> Tuple[TensorLikeType, ...]: + _dim = utils.canonicalize_dim(a.ndim, dim) + if a.ndim == 0: + msg = "tensor_split: received a rank zero tensor, but expected a tensor of rank one or greater!" + raise ValueError(msg) + + # If indices_or_sections is a tensor, it must be a CPU Long tensor + if isinstance(indices_or_sections, TensorLike): + if not indices_or_sections.device.type == "cpu": + msg = "tensor_split: if indices_or_sections is a tensor it must be on the CPU, but received one on {}".format( + indices_or_sections.device + ) + raise ValueError(msg) + if indices_or_sections.dtype != torch.long: + msg = "tensor_split: if indices_or_sections is a tensor it must have long dtype, " + f" but received one with dtype {indices_or_sections.dtype}" + raise ValueError(msg) + + # Case 0 -- indices_or_sections is an integer or a scalar tensor n and a is split along dim into n parts of equal-ish length + if isinstance(indices_or_sections, IntLike) or ( + isinstance(indices_or_sections, TensorLike) and indices_or_sections.ndim == 0 + ): + sections: int = ( + indices_or_sections # type: ignore[assignment] + if isinstance(indices_or_sections, Number) + else indices_or_sections.item() + ) + + if sections <= 0: + msg = f"tensor_split: number of sections must be greater than 0, but was {sections}" + raise ValueError(msg) + + splits = [] + dim_size = a.shape[_dim] + min_split_size = math.floor(dim_size / sections) + num_splits_one_extra = dim_size % sections + start_idx = 0 + for split_idx in range(sections): + split_size = ( + min_split_size + 1 + if (split_idx < num_splits_one_extra) + else min_split_size + ) + s = prims.slice_in_dim(a, start_idx, start_idx + split_size, axis=_dim) + splits.append(s) + start_idx = start_idx + split_size + + return tuple(splits) + # Case 1 -- indices_or_sections is a sequence of integers or a 1D tensor describing the splits + else: + indices = indices_or_sections + if isinstance(indices_or_sections, TensorLike): + if indices_or_sections.ndim != 1: + msg = "tensor_split: non-scalar indices_or_sections tensors must have only one dimension, " + f"but received a tensor with {indices_or_sections.ndim} dimensions" + raise ValueError(msg) + + indices = indices_or_sections.tolist() + + splits = [] + start_idx = 0 + for x in indices: + splits.append(prims.slice_in_dim(a, start_idx, x, axis=_dim)) + start_idx = x + splits.append(prims.slice_in_dim(a, start_idx, a.shape[_dim], axis=_dim)) + return tuple(splits) + + +# CompositeImplicitAutograd - don't register decomp +def hsplit( + a: TensorLikeType, indices_or_sections: DimsType +) -> Tuple[TensorLikeType, ...]: + torch._check( + a.ndim >= 1, + lambda: ( + "torch.hsplit requires a tensor with at least 1 dimension, but got a tensor with " + + str(a.ndim) + + " dimensions!" + ), + ) + dim = 0 if a.ndim == 1 else 1 + if isinstance(indices_or_sections, IntLike): + split_size = indices_or_sections + torch._check( + (split_size != 0 and a.shape[dim] % split_size == 0), + lambda: ( + "torch.hsplit attempted to split along dimension " + + str(dim) + + ", but the size of the dimension " + + str(a.shape[dim]) + + " is not divisible by the split_size " + + str(split_size) + + "!" + ), + ) + return tensor_split(a, split_size, dim) + + torch._check_type( + isinstance(indices_or_sections, (list, tuple)), + lambda: ( + "hsplit(): received an invalid combination of arguments. " + "Expected indices_or_sections to be of type int, list of ints or tuple of ints " + f"but got type {type(indices_or_sections)}" + ), + ) + + split_sizes = indices_or_sections + return tensor_split(a, split_sizes, dim) + + +# CompositeImplicitAutograd - don't register decomp +def vsplit( + a: TensorLikeType, indices_or_sections: DimsType +) -> Tuple[TensorLikeType, ...]: + torch._check( + a.ndim >= 2, + lambda: ( + "torch.vsplit requires a tensor with at least 2 dimension, but got a tensor with " + + str(a.ndim) + + " dimensions!" + ), + ) + if isinstance(indices_or_sections, IntLike): + split_size = indices_or_sections + torch._check( + (split_size != 0 and a.shape[0] % split_size == 0), + lambda: ( + f"torch.vsplit attempted to split along dimension 0" + f", but the size of the dimension " + f"{a.shape[0]}" + f" is not divisible by the split_size " + f"{split_size}" + f"!" + ), + ) + return tensor_split(a, split_size, 0) + + torch._check_type( + isinstance(indices_or_sections, (list, tuple)), + lambda: ( + "vsplit(): received an invalid combination of arguments. " + "Expected indices_or_sections to be of type int, list of ints or tuple of ints " + f"but got type {type(indices_or_sections)}" + ), + ) + + split_sizes = indices_or_sections + return tensor_split(a, split_sizes, 0) + + +@register_decomposition(aten.diag.out) +@out_wrapper() +def diag( + self: TensorLikeType, + offset: int = 0, +) -> TensorLikeType: + ndim = self.dim() + torch._check( + ndim in (1, 2), lambda: f"diag(): Supports 1D or 2D tensors. Got {ndim}D" + ) + if ndim == 1: + return torch.diag_embed(self, offset) + else: + return torch.diagonal_copy(self, offset) + + +@register_decomposition(aten.diagonal_scatter) +@out_wrapper() +def diagonal_scatter( + input: TensorLikeType, + src: TensorLikeType, + offset: int = 0, + dim1: int = 0, + dim2: int = 1, +) -> TensorLikeType: + out = utils.clone_preserve_strides(input) + diag = out.diagonal(offset, dim1, dim2) + torch._check( + diag.shape == src.shape, + lambda: "expected src to have a size equal to the diagonal of the input." + f"Got {src.shape} for a diagonal of shape {diag.shape}", + ) + copy_to(diag, src) + return out + + +@register_decomposition(aten.diagonal) +def diagonal( + self: TensorLikeType, + offset: int = 0, + dim1: int = 0, + dim2: int = 1, +) -> TensorLikeType: + """ + Reference implementation of torch.diagonal + """ + num_dims = self.dim() + dim1 = utils.canonicalize_dim(idx=dim1, rank=num_dims) + dim2 = utils.canonicalize_dim(idx=dim2, rank=num_dims) + + torch._check( + dim1 != dim2, lambda: f"diagonal dimensions cannot be identical {dim1}, {dim2}" + ) + + storage_offset = self.storage_offset() + + if offset >= 0: + diag_size = max(min(self.size()[dim1], self.size()[dim2] - offset), 0) + else: + diag_size = max(min(self.size()[dim1] + offset, self.size()[dim2]), 0) + + if diag_size > 0: + if offset >= 0: + storage_offset += offset * self.stride()[dim2] + else: + storage_offset -= offset * self.stride()[dim1] + + sizes = [s for i, s in enumerate(self.size()) if i not in (dim1, dim2)] + sizes.append(diag_size) + + strides = [s for i, s in enumerate(self.stride()) if i not in (dim1, dim2)] + strides.append(self.stride()[dim1] + self.stride()[dim2]) + + result = self.as_strided(size=sizes, stride=strides, storage_offset=storage_offset) + + return result + + +diagonal_copy = _make_copy_from_view(diagonal) + + +@register_decomposition(aten.diag_embed) +@out_wrapper() +def diag_embed( + t: TensorLikeType, + offset: int = 0, + dim1: int = -2, + dim2: int = -1, +) -> TensorLikeType: + """ + Reference implementation of torch.diag_embed + """ + # convert from negative dims + rank = t.ndim + 1 + dim1 = utils.canonicalize_dim(rank=rank, idx=dim1) + dim2 = utils.canonicalize_dim(rank=rank, idx=dim2) + + # as per the docs, exchanging dims is equivalent to changing the sign of + # offset + if dim1 > dim2: + dim1, dim2 = dim2, dim1 + offset = -offset + + torch._check( + dim1 != dim2, lambda: f"diagonal dimensions cannot be identical {dim1}, {dim2}" + ) + + # as per the docs, the size of last dim is placed at dim1 and dim2 + last_dim = t.size(-1) + + if offset != 0: + # add padding to match the new size + t_shape = list(t.shape) + t_shape[-1] = builtins.abs(offset) + z = torch.zeros(t_shape, dtype=t.dtype, device=t.device, requires_grad=False) + pair = (z, t) if offset > 0 else (t, z) + t = torch.cat(pair, dim=-1) + # make sure the diagonal always has the same size + last_dim += builtins.abs(offset) + + # preserve original data, but place 1 at dim1 and move last dim to dim2 + t = t.unsqueeze(dim1).movedim(-1, dim2) + + # generate ranges shifting indices based on offset + a_range = torch.arange(last_dim, device=t.device, dtype=torch.int64) + b_range = torch.arange( + offset, last_dim + offset, device=t.device, dtype=torch.int64 + ) + + # broadcast + cond = a_range == b_range.unsqueeze(-1) + cond_shape = [last_dim if i in (dim1, dim2) else 1 for i in range(len(t.shape))] + cond = cond.reshape(cond_shape) + + # aten.diag_embed always returns a new contiguous tensor + # contiguous() is needed to correctly model the output stride + return utils.mask_tensor(cond, t).contiguous() + + +@register_decomposition(aten.block_diag) +@out_wrapper() +def _block_diag_iterable(tensors: List[TensorLikeType]) -> TensorLikeType: + """ + Reference implementation of torch.block_diag + """ + tensors_2d = [ + tensor.view(1, -1) if tensor.dim() <= 1 else tensor for tensor in tensors + ] + + ncols = builtins.sum(tensor.shape[1] for tensor in tensors_2d) + device = tensors_2d[0].device + + result = [] + + col_start = 0 + for i, tensor in enumerate(tensors_2d): + torch._check( + tensor.dim() == 2, + lambda: "Input tensors must have 2 or fewer dimensions. " + f"Input {i} has {tensor.dim()} dimensions", + ) + torch._check( + tensor.device == device, + lambda: "Input tensors must all be on the same device. " + f"Input 0 is on device {device} and input {i} is on device {tensor.device}.", + ) + row, col = tensor.shape + left = torch.zeros((row, col_start), device=device, dtype=tensor.dtype) + right = torch.zeros( + (row, ncols - col_start - col), device=device, dtype=tensor.dtype + ) + result += [torch.cat((left, tensor, right), dim=1)] + col_start += col + + return torch.cat(result, dim=0) + + +def block_diag(*tensors: List[TensorLikeType]) -> TensorLikeType: + """ + This is used as an input to PythonRefInfo. `torch.block_diag` + expects arguments splatted, but `aten.block_diag` expects only + one argument that is a list of Tensors. + """ + return _block_diag_iterable(tensors) + + +# CompositeImplicitAutograd - don't register decomp +def dsplit(a: TensorLikeType, sections: DimsType) -> TensorSequenceType: + if a.ndim < 3: + raise RuntimeError( + f"torch.dsplit requires a tensor with at least 3 dimension, but got a tensor with {a.ndim} dimensions!" + ) + if isinstance(sections, IntLike) and (sections == 0 or a.shape[2] % sections != 0): + raise RuntimeError( + "torch.dsplit attempted to split along dimension 2, " + + f"but the size of the dimension {a.shape[2]} is not divisible by the split_size {sections}!" + ) + return tensor_split(a, sections, 2) + + +@register_decomposition(aten.t.default) +def t(a: TensorLikeType): + # TODO: Add sparse support + # if a.is_sparse: + # sparse_dim = a.sparse_dim() + # dense_dim = a.dense_dim() + # if not (sparse_dim <= 2 and dense_dim == 0): + # raise RuntimeError( + # f"t() expects a tensor with <= 2 sparse and 0 dense dimensions, but got {sparse_dim} sparse and" + # f"{dense_dim} dense dimensions" + # ) + if a.ndim > 2: + raise RuntimeError( + f"t() expects a tensor with <= 2 dimensions, but self is {a.ndim}D" + ) + return torch.transpose(a, 0, 0 if a.ndim < 2 else 1) + + +# CompositeImplicitAutograd - don't register decomp +def T(a: TensorLikeType) -> TensorLikeType: + # n != 2 && n != 0 is deprecated in regular PyTorch. + torch._check( + a.ndim in (0, 2), + lambda: ( + "The use of `x.T` on tensors of dimension other than 0 or 2 " + "to reverse their shape is not supported." + ), + ) + return a.t() + + +@register_decomposition(aten.alias) +def alias(a: TensorLikeType) -> TensorLikeType: + return prims.view_of(a) + + +@register_decomposition(aten.transpose) +def transpose(a: TensorLikeType, dim0: int, dim1: int) -> TensorLikeType: + _dim0, _dim1 = utils.canonicalize_dims(a.ndim, (dim0, dim1)) # type: ignore[misc] + + if a.ndim <= 1 or dim0 == dim1: + return aten.alias.default(a) + + _permutation = list(range(0, a.ndim)) + _permutation[_dim0] = _dim1 + _permutation[_dim1] = _dim0 + return torch.permute(a, _permutation) + + +# Aliases for transpose +swap_axes = transpose + + +@register_decomposition(aten.unfold) +def unfold( + self: TensorLikeType, dimension: int, size: int, step: int +) -> TensorLikeType: + shape, strides = _get_unfold_shape_stride( + self.shape, self.stride(), dimension, size, step + ) + return self.as_strided(shape, strides) + + +@register_decomposition(aten.unfold_copy) +@out_wrapper() +def unfold_copy(self: TensorLikeType, dimension: int, size: int, step: int): + return self.unfold(dimension, size, step).clone( + memory_format=torch.contiguous_format + ) + + +def _cumsumprod_common( + func, + init, + a: TensorLikeType, + dim: int, + *, + dtype: Optional[torch.dtype] = None, + out: Optional[Tensor] = None, +) -> TensorLikeType: + # We implement all the kwargs of a reduction. ATen just handles dtype + # nb. This decomposition may not be as efficient as a backend-specific implementation + ndim = a.ndim + dim = utils.canonicalize_dim(ndim, dim) + if ndim == 0: + return func(a.unsqueeze(0), dim=0, dtype=dtype, out=out) + a = a.unsqueeze(dim + 1) + rg = torch.arange(a.shape[dim], device=a.device) + mask = rg.unsqueeze(1) <= rg + for _ in range(ndim - dim - 1): + mask = mask.unsqueeze(-1) + masked_a = torch.where(mask, a, init) + return func(masked_a, dim=dim, dtype=dtype, out=out) + + +@register_decomposition(aten.cumsum) +def cumsum( + a: TensorLikeType, + dim: int, + *, + dtype: Optional[torch.dtype] = None, + out: Optional[Tensor] = None, +) -> TensorLikeType: + return _cumsumprod_common(func=sum, init=0, a=a, dim=dim, dtype=dtype, out=out) + + +@register_decomposition(aten.cumprod) +def cumprod( + a: TensorLikeType, + dim: int, + *, + dtype: Optional[torch.dtype] = None, + out: Optional[Tensor] = None, +) -> TensorLikeType: + return _cumsumprod_common(func=prod, init=1, a=a, dim=dim, dtype=dtype, out=out) + + +# Note: although squeeze is documented as having the out= kwarg it doesn't +@register_decomposition(aten.unsqueeze) +def unsqueeze(a: TensorLikeType, dim: int) -> TensorLikeType: + # Note that unsqueeze canonicalizes with rank + 1 because it allows + # a new innermost dimension to be specified + ndim = a.ndim + 1 + dim = utils.canonicalize_dim(ndim, dim) + return prims.expand_dims(a, (dim,), ndim=ndim) + + +# NOTE: shape is a vararg because Tensor.reshape can be called with as +# Tensor.view(a, b, c) or Tensor.view((a, b, c)) Function call torch.view +# doesn't support unpacked shapes +# TODO: Turn this into a decomposition (currently fails on reshape meta tests) +@register_decomposition(aten.view.default) +def view(a: TensorLikeType, *shape: ShapeType) -> TensorLikeType: + return _reshape_view_helper(a, *shape, allow_copy=False) + + +# CompositeImplicitAutograd - don't register decomp +def view_as(self: TensorLikeType, other: TensorLikeType) -> TensorLikeType: + return self.view(other.size()) + + +# CompositeImplicitAutograd - don't register decomp +def ravel(a: TensorLikeType) -> TensorLikeType: + return reshape(a, (-1,)) + + +# CompositeImplicitAutograd - don't register decomp +# missing ref impl. for aten.gather +@out_wrapper() +def take_along_dim( + a: torch.Tensor, indices: torch.Tensor, dim: Optional[int] = None +) -> torch.Tensor: + torch._check( + a.ndim == indices.ndim, + lambda: ( + "torch.take_along_dim(): input and indices should have the same " + f"number of dimensions, but got {a.ndim} dimensions for input, and " + f"{indices.ndim} dimensions for indices" + ), + ) + + torch._check( + utils.is_integer_dtype(indices.dtype), + lambda: ( + "torch.take_along_dim(): dtype of indices should be int but got " + f"{indices.dtype} instead" + ), + ) + + if dim is None: + return torch.gather(a.view(-1), 0, indices.view(-1)) + else: + self_sizes = list(a.shape) + self_sizes[dim] = indices.size(dim) + broadcast_shape = utils.infer_size_shapes(self_sizes, indices.size()) + indices_broadcast = broadcast_to(indices, broadcast_shape) + + indices_sizes = list(indices.shape) + indices_sizes[dim] = a.size(dim) + broadcast_shape = utils.infer_size_shapes(indices_sizes, a.size()) + self_broadcast = broadcast_to(a, broadcast_shape) + + return torch.gather(self_broadcast, dim, indices_broadcast) + + +@out_wrapper() +def empty( + *shape, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + requires_grad: bool = False, + pin_memory: bool = False, + memory_format: torch.memory_format = torch.contiguous_format, +) -> TensorLikeType: + torch._check( + memory_format != torch.preserve_format, + lambda: "torch.empty: the Preserve memory format is not supported", + ) + + shape = utils.extract_shape_from_varargs(shape) + + if memory_format == torch.contiguous_format: + strides = utils.make_contiguous_strides_for(shape) + elif memory_format == torch.channels_last_3d: + strides = utils.make_channels_last_3d_strides_for(shape) + else: # memory_format == torch.channels_last + torch._check( + memory_format == torch.channels_last, + lambda: f"torch.empty: received an unknown memory format {memory_format}!", + ) + strides = utils.make_channels_last_2d_strides_for(shape) + + return torch.empty_strided( + shape, + strides, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@out_wrapper() +def empty_permuted( + shape, + physical_layout, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + requires_grad: bool = False, + pin_memory: bool = False, +) -> TensorLikeType: + return prims.empty_permuted( + shape, + physical_layout, + dtype=dtype, + device=device, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.new_empty) +@out_wrapper() +def new_empty( + a: TensorLikeType, + size: ShapeType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, +) -> TensorLikeType: + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + return torch.empty( + size, + dtype=dtype, + device=device, + pin_memory=pin_memory, + layout=layout, + ) + + +@register_decomposition(aten.new_empty_strided) +@out_wrapper() +def new_empty_strided( + a: TensorLikeType, + size: ShapeType, + stride: StrideType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, +) -> TensorLikeType: + """ + Reference implementation of torch.Tensor.new_empty_strided + """ + + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + return torch.empty_strided( + size, + stride, + dtype=dtype, + device=device, + pin_memory=pin_memory, + layout=layout, + ) + + +@register_decomposition(aten.zeros.default) +@out_wrapper() +def zeros( + *size, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + size = utils.extract_shape_from_varargs(size) + + if dtype is None: + dtype = torch.get_default_dtype() + + return torch.full( + size, + False if dtype == torch.bool else 0, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.new_zeros) +@out_wrapper() +def new_zeros( + a: TensorLikeType, + size: ShapeType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + return torch.full( + size, + False if (dtype or a.dtype) == torch.bool else 0, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.ones.default) +@out_wrapper() +def ones( + *size, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + size = utils.extract_shape_from_varargs(size) + + if dtype is None: + dtype = torch.get_default_dtype() + + return torch.full( + size, + True if dtype == torch.bool else 1, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.new_ones) +@out_wrapper() +def new_ones( + a: TensorLikeType, + size: ShapeType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + return torch.full( + size, + True if (dtype or a.dtype) == torch.bool else 1, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.new_full) +@out_wrapper() +def new_full( + a: TensorLikeType, + size: ShapeType, + fill_value: NumberType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, +) -> TensorLikeType: + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + return torch.full( + size, + fill_value, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + ) + + +@register_decomposition(aten.empty_like) +@out_wrapper() +def empty_like( + a: TensorLikeType, + *, + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceLikeType] = None, + layout: Optional[torch.layout] = None, + pin_memory: bool = False, + requires_grad: bool = False, + memory_format: torch.memory_format = torch.preserve_format, +) -> TensorLikeType: + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + if memory_format != torch.preserve_format: + return torch.empty( + a.shape, + dtype=dtype, + layout=layout, + device=device, + requires_grad=requires_grad, + pin_memory=pin_memory, + memory_format=memory_format, + ) + + # memory_format == torch.preserve_format + logical_to_physical_perm = ( + utils.compute_elementwise_output_logical_to_physical_perm(a) + ) + # identity perm is [2, 1, 0] + return torch.empty_permuted( + a.shape, + logical_to_physical_perm, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_decomposition([aten.arange.start_step, aten.arange.start_out]) +@out_wrapper() +def arange( + start: NumberType = 0, + end: Optional[NumberType] = None, + step: NumberType = 1, + *, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + utils.check_layout(layout) + utils.check_pin_memory(pin_memory) + device = torch.device(utils.device_or_default(device)) + + assert not isinstance(start, complex) + assert not isinstance(end, complex) + assert not isinstance(step, complex) + + # Case: torch.arange(5) + if end is None: + end = start + start = 0 + torch._check(step != 0, lambda: "step must be nonzero") + if step > 0: + torch._check( + end >= start, + lambda: "upper bound and lower bound inconsistent with step sign", + ) + elif step < 0: + torch._check( + end <= start, + lambda: "upper bound and lower bound inconsistent with step sign", + ) + + def is_finite(x): + return not isinstance(x, FloatWithoutSymFloat) or math.isfinite(x) + + torch._check( + is_finite(start) and is_finite(end), + lambda: f"unsupported range: {start} -> {end}", + ) + torch._check( + is_finite(step), + lambda: f"step must be finite but got {step}", + ) + + if dtype is None: + args = (start, end, step) + integer_args = builtins.all(isinstance(arg, IntLike) for arg in args) + dtype = torch.int64 if integer_args else torch.get_default_dtype() + + is_integer = utils.is_integer_dtype(dtype) + if is_integer: + xstart = sym_int(start) + xend = sym_int(end) + xstep = sym_int(step) + + # For int64 we truncate arguments to int before calculating length, but + # other integral dtypes we don't. Weird... but needed to match ATen shapes. + if dtype == torch.int64: + # Uses floordiv to avoid ceil in inductor. + sgn = bool(xstep > 0) - bool(xstep < 0) # type: ignore[possibly-undefined] + length = (xend - xstart + xstep - sgn) // xstep # type: ignore[possibly-undefined] + else: + length = math.ceil((end - start) / step) + + if is_integer: + return prims.iota( + length, + start=xstart, # type: ignore[possibly-undefined] + step=xstep, # type: ignore[possibly-undefined] + dtype=dtype, + device=device, + requires_grad=requires_grad, + ) + + computation_dtype = utils.get_acc_type(dtype, device) + index = prims.iota( + length, + start=0, + step=1, + dtype=torch.int64, + device=device, + requires_grad=False, + ) + index = _maybe_convert_to_dtype(index, computation_dtype) + result = start + step * index + result = _maybe_convert_to_dtype(result, dtype) + + if requires_grad: + result.requires_grad_(True) + return result + + +@register_decomposition(aten.lerp) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("start", "end", "weight"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def lerp(start: Tensor, end: Tensor, weight: Union[Tensor, NumberType]): + inputs = [start, end] + if isinstance(weight, Number): + weight = start.new_full((), weight) # type: ignore[arg-type] + else: + inputs.append(weight) + assert isinstance(weight, Tensor) # mypy + # We implement it this way for numerical stability. We assume (in the stability optimisation) + # that 0 <= weight <= 1. We take the abs to deal with complex numbers + # We want to perform operations near zero, which is where floating points are most precise + # thus, we perform the following optimisation: + # If weight.abs() >= 0.5: + # return (1 - weight) * (start - end) + end + mask = weight.abs() >= 0.5 + coeff = torch.where(mask, weight - 1, weight) + base = torch.where(mask, end, start) + output = coeff * (end - start) + base + # make sure the decomposition output's stride is same as non-decomposition path. + stride = utils.compute_elementwise_output_strides(*_maybe_broadcast(*inputs)) + if output.stride() != stride: + output = prims.copy_strided(output, stride) + + return handle_noncontiguous_outputs(inputs, output) + + +@register_decomposition(aten.linspace) +@out_wrapper() +def linspace( + start: Union[NumberType, TensorLikeType], + end: Union[NumberType, TensorLikeType], + steps: NumberType, + *, + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceLikeType] = None, + layout: torch.layout = torch.strided, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + if isinstance(start, TensorLikeType): + torch._check( + start.dim() == 0, + lambda: "linspace only supports 0-dimensional start and end tensors", + ) + start = _maybe_convert_to_dtype(start, torch.float64) + if isinstance(end, TensorLikeType): + torch._check( + end.dim() == 0, + lambda: "linspace only supports 0-dimensional start and end tensors", + ) + end = _maybe_convert_to_dtype(end, torch.float64) + + if py_any(isinstance(arg, complex) for arg in (start, end, steps)): + default_complex_dtype = utils.corresponding_complex_dtype( + torch.get_default_dtype() + ) + if dtype is None: + dtype = default_complex_dtype + else: + torch._check( + utils.is_complex_dtype(dtype), + lambda: f"linspace(): inferred dtype {default_complex_dtype} can't be safely cast to passed dtype {dtype}", + ) + else: + dtype = dtype or torch.get_default_dtype() + assert isinstance(dtype, torch.dtype) + + # steps does not participate in the computation of the dtype + torch._check_type( + isinstance(steps, IntLike), + lambda: f"received an invalid combination of arguments - got \ +({type(start).__name__}, {type(end).__name__}, {type(steps).__name__})", + ) + assert isinstance(steps, IntLike) # for mypy + torch._check(steps >= 0, lambda: "number of steps must be non-negative") + + factory_kwargs = { + "layout": layout, + "device": device, + "pin_memory": pin_memory, + "requires_grad": requires_grad, + } + if steps == 0: + return torch.full((0,), 0, dtype=dtype, **factory_kwargs) # type: ignore[arg-type] + if steps == 1: + if isinstance(start, TensorLikeType): + return torch.empty((steps,), dtype=dtype, **factory_kwargs).copy_(start) # type: ignore[arg-type] + else: + return torch.full((steps,), start, dtype=dtype, **factory_kwargs) # type: ignore[arg-type] + + # Perform in arange in int because some backends like ATen or Triton do not support all the dtypes + rg = torch.arange(0, steps, **factory_kwargs) # type: ignore[arg-type] + + # Small types need to be computed in higher precision as this is, at heart, an associative scan + dtype_red = ( + torch.int64 + if (utils.is_boolean_dtype(dtype) or utils.is_integer_dtype(dtype)) + else dtype + ) + computation_dtype, _ = utils.reduction_dtypes( + rg, REDUCTION_OUTPUT_TYPE_KIND.SAME, dtype_red + ) + cast_rg = partial(_maybe_convert_to_dtype, dtype=computation_dtype) + + # We implement torch.lerp without performing rg / (steps - 1) explicitly + # With this we get out[0] == start, out[-1] == end + step = (end - start) / (steps - 1) + out = torch.where( + rg < steps / 2, + start + step * cast_rg(rg), # type: ignore[arg-type,operator] + end - step * cast_rg((steps - 1) - rg), # type: ignore[arg-type,operator] + ) + return _maybe_convert_to_dtype(out, dtype) # type: ignore[return-value] + + +@register_decomposition(aten.logspace) +@out_wrapper() +def logspace( + start: Union[NumberType, TensorLikeType], + end: Union[NumberType, TensorLikeType], + steps: NumberType, + base: NumberType = 10, + *, + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceLikeType] = None, + layout: torch.layout = torch.strided, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + if dtype is None: + dtype = torch.get_default_dtype() + + # NB: NumPy doesn't have this cast + if prims.utils.is_integer_dtype(dtype): + if isinstance(start, FloatLike): + start = sym_int(start) + elif isinstance(start, TensorLikeType): + torch._check( + start.dim() == 0, + lambda: "logspace only supports 0-dimensional start and end tensors", + ) + start = _maybe_convert_to_dtype(start, dtype) + if isinstance(end, FloatLike): + end = sym_int(end) + elif isinstance(end, TensorLikeType): + torch._check( + end.dim() == 0, + lambda: "logspace only supports 0-dimensional start and end tensors", + ) + end = _maybe_convert_to_dtype(end, dtype) + + if py_any(isinstance(arg, complex) for arg in (start, end, steps)): + default_complex_dtype = utils.corresponding_complex_dtype( + torch.get_default_dtype() + ) + dtype = default_complex_dtype + _dtype = None # torch.linspace will update the correct dtype + else: + _dtype = torch.float64 + + assert not isinstance(base, complex) # for mypy + if base < 0: + raise NotImplementedError + ret = torch.linspace( # type: ignore[misc] + start, # type: ignore[arg-type] + end, # type: ignore[arg-type] + steps, # type: ignore[arg-type] + dtype=_dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + return _maybe_convert_to_dtype(torch.pow(base, ret), dtype) # type: ignore[arg-type,return-value] + + +@overload +def meshgrid(tensors: Sequence[TensorLikeType], indexing: str): + pass + + +@overload +def meshgrid(*tensors: TensorLikeType, indexing: str): + pass + + +@register_decomposition(aten.meshgrid) +def meshgrid( + *tensors: Union[TensorLikeType, List[TensorLikeType], Tuple[TensorLikeType]], + indexing: str, +) -> List[TensorLikeType]: + # This ref simultaneously handles two overloads (see stubs above) + # The `indexing` argument is currently optional for torch.meshgrid, but we + # plan to make the argument required: https://github.com/pytorch/pytorch/issues/50276 + if isinstance(tensors[0], (list, tuple)): + assert len(tensors) == 1 + tensors = tuple(tensors[0]) + + torch._check( + py_all(isinstance(a, TensorLike) for a in tensors), + lambda: "meshgrid expects its inputs to be tensors", + ) + + torch._check(len(tensors) > 0, lambda: "meshgrid expects a non-empty TensorList") + + for i in range(len(tensors) - 1): + torch._check( + tensors[i].dtype == tensors[i + 1].dtype, # type: ignore[union-attr] + lambda: "meshgrid expects all tensors to have the same dtype", + ) + torch._check( + tensors[i].device == tensors[i + 1].device, # type: ignore[union-attr] + lambda: "meshgrid expects all tensors to have the same device", + ) + + swap_first_and_second_tensors = False + if indexing == "xy": + swap_first_and_second_tensors = len(tensors) >= 2 + if swap_first_and_second_tensors: + tensors = (tensors[1], tensors[0], *tensors[2:]) + else: + torch._check( + indexing == "ij", + lambda: ( + 'torch.meshgrid: indexing must be one of "xy" or "ij", ' + f"but received: {indexing}" + ), + ) + + result_shape: List[int] = [] + for t in tensors: + assert isinstance(t, TensorLike) # mypy + torch._check( + t.ndim == 0 or t.ndim == 1, + lambda: f"torch.meshgrid: Expected 0D or 1D tensor in the tensor list but got: {t}", + ) + result_shape.append(t.numel()) + + grids: List[TensorLikeType] = [] + for i, t in enumerate(tensors): + assert isinstance(t, TensorLike) # mypy + if t.ndim == 0: + t = t.view((1,)) + grids.append(prims.broadcast_in_dim(t, result_shape, (i,))) + + if swap_first_and_second_tensors: + # Swap outputs if we originally swapped at the beginning + grids[0], grids[1] = grids[1], grids[0] + + return grids + + +# CompositeImplicitAutograd - don't register decomp +def movedim( + input: TensorLikeType, + source: Union[int, DimsSequenceType], + destination: Union[int, DimsSequenceType], +) -> TensorLikeType: + """ + Reference implementation of torch.movedim + """ + if type(source) is int: + source = (source,) + if type(destination) is int: + destination = (destination,) + + # Converts to list to produce a compatible error message with core PyTorch, + # which prints sequences in square brackets. + torch._check( + len(source) == len(destination), # type: ignore[arg-type] + lambda: ( + "movedim: Invalid source or destination dims: source " # type: ignore[arg-type] + f"({list(source)} dims) should contain the same number " # type: ignore[arg-type] + f"of dims as destination ({list(destination)} dims)" # type: ignore[arg-type] + ), + ) + + rank = input.ndim + ss = tuple(utils.canonicalize_dims(rank=rank, indices=source)) # type: ignore[arg-type] + ds = tuple(utils.canonicalize_dims(rank=rank, indices=destination)) # type: ignore[arg-type] + + sss = set(ss) + dss = set(ds) + + # See above on why this converts to list in error messages. + torch._check( + len(ss) == len(sss), + lambda: f"movedim: repeated dim in `source` ({list(source)})", # type: ignore[arg-type] + ) + torch._check( + len(ds) == len(dss), + lambda: f"movedim: repeated dim in `destination` ({list(destination)})", # type: ignore[arg-type] + ) + + m = dict(zip(ds, ss)) + dims = [] + si = 0 # source index + for di in range(rank): + # check if the destination index is in the mapping + s = m.get(di) + if s is not None: + # insert source index if found + dims.append(s) + else: + # insert source index sequentially, skipping indices from the mapping + while si in sss: + si += 1 + dims.append(si) + si += 1 + + result = torch.permute(input, tuple(dims)) + + return result + + +# NOTE: for convenience, shape can be a tuple of ints or a tuple containing a tuple of ints +@register_decomposition(aten.empty_strided) +@out_wrapper() +def empty_strided( + shape: Union[ShapeType, Tuple[ShapeType]], + strides: StrideType, + *, + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceLikeType] = None, + layout: torch.layout = torch.strided, + requires_grad: bool = False, + pin_memory: bool = False, +) -> TensorLikeType: + # Layout == strided, pin_memory is False + utils.check_layout(layout) + utils.check_pin_memory(pin_memory) + + shape = utils.extract_shape_from_varargs(shape) + dtype = torch.get_default_dtype() if dtype is None else dtype + device = torch.device("cpu") if device is None else device + + return prims.empty_strided( + shape, + strides, + dtype=dtype, + device=device, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.eye) +@out_wrapper() +def eye( + n: int, + m: Optional[int] = None, + *, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, # TODO: unused +) -> TensorLikeType: + """ + Reference implementation of torch.eye + """ + if m is None: + m = n + + torch._check(n >= 0, lambda: f"n must be greater or equal to 0, got {n}") + torch._check(m >= 0, lambda: f"m must be greater or equal to 0, got {m}") + + range_n = torch.arange(n, dtype=torch.int64, device=device, requires_grad=False) + range_m = torch.arange(m, dtype=torch.int64, device=device, requires_grad=False) + + cond = range_n.unsqueeze(-1) == range_m + if dtype is torch.bool: + return cond + else: + one = torch.ones( + (1,), + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=False, + ) + return torch.where(cond, one, 0) + # TODO: Use requires_grad. All refs taking the requires_grad kwarg must + # return a leaf tensor. + # result.requires_grad_(requires_grad) + + +@register_decomposition([aten.full.default, aten.full.out]) +@out_wrapper() +def full( + shape: ShapeType, + fill_value: NumberType, + *, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + utils.check_layout(layout) + utils.check_pin_memory(pin_memory) + + dtype = dtype if dtype is not None else utils.type_to_dtype(type(fill_value)) + device = device if device is not None else torch.device("cpu") + + e = empty( + shape, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + return torch.fill(e, fill_value) # type: ignore[arg-type] + + +def full_like( + a: TensorLikeType, + fill_value: NumberType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, + memory_format: torch.memory_format = torch.preserve_format, +) -> TensorLikeType: + e = torch.empty_like( + a, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + memory_format=memory_format, + ) + return fill(e, fill_value) + + +@register_decomposition(aten.zeros_like) +@out_wrapper() +def zeros_like( + a: TensorLikeType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, + memory_format: torch.memory_format = torch.preserve_format, +) -> TensorLikeType: + return torch.full_like( + a, + False if (dtype or a.dtype) == torch.bool else 0, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + memory_format=memory_format, + ) + + +@register_decomposition(aten.ones_like) +@out_wrapper() +def ones_like( + a: TensorLikeType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, + memory_format: torch.memory_format = torch.preserve_format, +) -> TensorLikeType: + return torch.full_like( + a, + True if (dtype or a.dtype) == torch.bool else 1, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + memory_format=memory_format, + ) + + +@register_decomposition(aten.randn.default) +@out_wrapper() +def randn( + *shape, + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceLikeType] = None, + layout: Optional[torch.layout] = None, + requires_grad: bool = False, + pin_memory: bool = False, +) -> TensorLikeType: + utils.check_pin_memory(pin_memory) + + shape_ = utils.extract_shape_from_varargs(shape) + + dtype = utils.dtype_or_default(dtype) + device = utils.device_or_default(device) + + return prims.normal( + shape_, + mean=0.0, + std=1.0, + dtype=dtype, + device=device, + requires_grad=requires_grad, + ) + + +def scalar_tensor( + a: NumberType, + *, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, +) -> TensorLikeType: + utils.check_layout(layout) + utils.check_pin_memory(pin_memory) + dtype = dtype if dtype is not None else utils.type_to_dtype(type(a)) + device = device if device is not None else torch.device("cpu") + return prims.scalar_tensor(a, dtype=dtype, device=device) + + +# +# Randomness References +# + + +def _uniform_helper( + shape: ShapeType, + low: Union[bool, int, float] = 0.0, + high: Union[bool, int, float] = 1.0, + *, + dtype: torch.dtype, + device: DeviceLikeType, +) -> TensorLikeType: + utils.validate_shape(shape) + + assert isinstance(low, Number) + assert isinstance(high, Number) + low = sym_float(low) + high = sym_float(high) + + assert isinstance(dtype, torch.dtype) + device = utils.canonicalize_device(device) + + return prims._uniform_helper(shape, low=low, high=high, dtype=dtype, device=device) + + +@register_decomposition(aten.masked_fill) +@out_wrapper() +def masked_fill(a: TensorLikeType, mask: TensorLikeType, value: TensorOrNumberLikeType): + python_type = utils.dtype_to_type(a.dtype) + if isinstance(value, Number): + value_type = type(value) + else: + # NOTE: Could not use value = item(value) as it resulted in + # RuntimeError: Cannot cast FakeTensor(cpu) to number + value_ndim = value.ndim + torch._check( + value_ndim == 0, + lambda: f"only supports a 0-dimensional value tensor, but got tensor with {value_ndim} dimension", + ) + # `masked_fill` allows cpu scalar to be moved to cuda and xpu but not otherwise. + is_cpu_scalar = a.device.type in ["cuda", "xpu"] and value.device.type == "cpu" + torch._check( + is_cpu_scalar or value.device == a.device, + lambda: "Expected `value` to be on same device as `a`", + ) + value_type = utils.dtype_to_type(value.dtype) + + if value_type is complex: + # only downcasting from complex to lower type is not allowed. + # We allow casting `value` to lower type for other case + # Eg. float -> int. + # Ref: https://github.com/pytorch/pytorch/issues/79195 + torch._check( + utils.is_weakly_lesser_type(value_type, python_type), + lambda: f"could not convert to type {python_type} without overflow", + ) + + # Since `where` allows type-promotion, + # cast value to correct type before passing to `where` + value = _maybe_convert_to_dtype(value, a.dtype) + r = torch.where(mask, value, a) # type: ignore[arg-type] + + # aten.mask_fill always return a new contiguous tensor + # contiguous() is needed to correctly model the output stride + return r.contiguous() + + +@register_decomposition(aten.masked_fill_) +def masked_fill_( + a: TensorLikeType, mask: TensorLikeType, value: TensorOrNumberLikeType +) -> TensorLikeType: + b = torch.masked_fill(a, mask, value) # type: ignore[arg-type] + a.copy_(b) + return a + + +# CompositeImplicitAutograd - don't register decomp +def allclose( + a: TensorLikeType, + b: TensorLikeType, + rtol: float = 1e-05, + atol: float = 1e-08, + equal_nan: bool = False, +) -> bool: + """ + Reference implementation of torch.allclose + """ + _check_close_args(name="torch.allclose", a=a, b=b, rtol=rtol, atol=atol) + + return bool( + torch.all(torch.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)).item() + ) + + +def equal(a: TensorLikeType, b: TensorLikeType) -> bool: + utils.check_same_device(a, b, allow_cpu_scalar_tensors=False) + utils.check_same_dtype(a, b) + + # Shape check + if a.ndim != b.ndim: + return False + + for x, y in zip(a.shape, b.shape): + if x != y: + return False + + # Short-circuits if there are no elements to validate + if a.numel() == 0: + return True + + return item(all(eq(a, b))) # type: ignore[return-value] + + +@register_decomposition(aten.norm) +@out_wrapper(exact_dtype=True) +def norm( + input: TensorLikeType, + p: Optional[Union[float, str]] = "fro", + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + # In these cases we compute the "Frobenius norm" + if ( + p == "fro" and (dim is None or isinstance(dim, Dim) or len(dim) <= 2) + ) or p is None: + p = 2 + if isinstance(dim, Dim): + dim = [dim] + if isinstance(p, str): + # Here we either call the nuclear norm, or we call matrix_norm with some arguments + # that will throw an error + if dim is None: + dim = tuple(range(input.ndim)) + return torch.linalg.matrix_norm(input, p, dim, keepdim, dtype=dtype) + else: + return torch.linalg.vector_norm(input, p, dim, keepdim, dtype=dtype) + + +@register_decomposition(aten.trace) +@out_wrapper() +def trace(self: TensorLikeType) -> TensorLikeType: + torch._check( + self.ndim == 2, lambda: "expected a matrix, but got tensor with dim {self.ndim}" + ) + return torch.sum(torch.diag(self, 0)) + + +def _make_r_binary_op(base_op): + def rop( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], + ) -> TensorLikeType: + return base_op(b, a) + + return rop + + +rtruediv = _make_r_binary_op(true_divide) +rfloordiv = _make_r_binary_op(floor_divide) +rpow = _make_r_binary_op(pow) + + +@register_decomposition(aten.triu) +@out_wrapper() +def triu(a: TensorLikeType, diagonal: int = 0) -> TensorLikeType: + torch._check( + a.ndim >= 2, lambda: "triu: input tensor must have at least 2 dimensions" + ) + h, w = a.shape[-2:] + mask = ( + torch.arange(w, device=a.device).unsqueeze(-2) + - torch.arange(h, device=a.device).unsqueeze(-1) + ) >= diagonal + + # aten.triu always returns a new contiguous tensor + # contiguous() is needed to correctly model the output stride + return utils.mask_tensor(mask, a).contiguous() + + +@register_decomposition(aten.tril) +@out_wrapper() +def tril(a: TensorLikeType, diagonal: int = 0) -> TensorLikeType: + torch._check( + a.ndim >= 2, lambda: "tril: input tensor must have at least 2 dimensions" + ) + h, w = a.shape[-2:] + mask = ( + torch.arange(w, device=a.device).unsqueeze(-2) + - torch.arange(h, device=a.device).unsqueeze(-1) + ) <= diagonal + + # aten.tril always returns a new contiguous tensor + # contiguous() is needed to correctly model the output stride + return utils.mask_tensor(mask, a).contiguous() + + +# This is based on get_tril_size in aten/src/ATen/native/TensorFactories.h +# The components of the matrix that belong to the lower triangle with offset +# form a pentagon that can be broken down into a top trapezoid and a bottom +# rectangle. For the implementation of tril_indices, we need the sizes of +# both of these, as well as the length of the top side of the trapezoid. +def _get_tril_sizes(row: int, col: int, offset: int) -> Tuple[int, int, int]: + if row == 0 or col == 0: + return 0, 0, 0 + + m_first_row = min(col, 1 + offset) if offset > 0 else int(row + offset > 0) + m_last_row = max(0, min(col, row + offset)) + n_row_all = max(0, min(row, row + offset)) + n_row_trapezoid = m_last_row - m_first_row + 1 + + # Number of elements in top trapezoid + trapezoid_size = (m_first_row + m_last_row) * n_row_trapezoid // 2 + # Number of elements in bottom rectangle + diff_row = n_row_all - n_row_trapezoid + rectangle_size = max(0, diff_row * col) + + return trapezoid_size, rectangle_size, m_first_row + + +def _trilu_checks( + name: str, + row: int, + col: int, + dtype: torch.dtype, + layout: torch.layout, + pin_memory: bool, +): + torch._check(row >= 0, lambda: f"row must be non-negative, got {row}") + torch._check(col >= 0, lambda: f"col must be non-negative, got {col}") + torch._check( + dtype in (torch.int32, torch.int64), + lambda: f"\"{name}\" not implemented for '{dtype}'", + ) + + +# This is based on tril_indices_cuda in aten/src/ATen/native/cuda/TensorFactories.cu +@register_decomposition(aten.tril_indices) +@out_wrapper() +def tril_indices( + row: int, + col: int, + offset: int = 0, + *, + dtype: torch.dtype = torch.long, + layout: torch.layout = torch.strided, + device: DeviceLikeType = "cpu", + pin_memory: bool = False, +) -> TensorLikeType: + _trilu_checks("tril_indices", row, col, dtype, layout, pin_memory) + + trapezoid_size, rectangle_size, m_first_row = _get_tril_sizes(row, col, offset) + row_offset = max(0, -offset) + + arange_kw = partial( + torch.arange, layout=layout, device=device, pin_memory=pin_memory + ) + + # first we do the indices for top trapezoid + xs1 = arange_kw(0, trapezoid_size, dtype=torch.float64) + b = m_first_row - 0.5 + row_inds1 = torch.floor(-b + torch.sqrt(b * b + 2 * xs1)) + col_inds1 = torch.floor(xs1 - (2 * m_first_row - 1 + row_inds1) * row_inds1 * 0.5) + row_inds1 = _maybe_convert_to_dtype(row_inds1 + row_offset, dtype) + col_inds1 = _maybe_convert_to_dtype(col_inds1, dtype) + + # then bottom rectangle + xs2 = arange_kw(0, rectangle_size, dtype=dtype) + row_inds2 = xs2 // col + (col - m_first_row + 1 + row_offset) + col_inds2 = xs2 % col + + return torch.stack( + (torch.cat((row_inds1, row_inds2)), torch.cat((col_inds1, col_inds2))) + ) + + +# Similar to _get_tril_sizes above, but here there is a top trapezoid and +# a bottom rectangle instead. Note that you can't reduce this to +# _get_tril_sizes(col, row, -offset) because that would correspond to +# decomposing into a left trapezoid and right rectangle. +def _get_triu_sizes(row: int, col: int, offset: int) -> Tuple[int, int, int]: + if row == 0 or col == 0: + return 0, 0, 0 + + m_first_row = max(0, col - offset) if offset > 0 else col + + # Number of elements in top rectangle + rectangle_size = max(0, min(row, -offset) * col) + + # Number of elements in bottom trapezoid + trapezoid_size_tril, rectangle_size_tril, _ = _get_tril_sizes(row, col, offset - 1) + triu_size = row * col - (trapezoid_size_tril + rectangle_size_tril) + trapezoid_size = triu_size - rectangle_size + + return trapezoid_size, rectangle_size, m_first_row + + +@register_decomposition(aten.triu_indices) +@out_wrapper() +def triu_indices( + row: int, + col: int, + offset: int = 0, + *, + dtype: torch.dtype = torch.long, + layout: torch.layout = torch.strided, + device: DeviceLikeType = "cpu", + pin_memory: bool = False, +) -> TensorLikeType: + _trilu_checks("triu_indices", row, col, dtype, layout, pin_memory) + + trapezoid_size, rectangle_size, m_first_row = _get_triu_sizes(row, col, offset) + col_offset = max(0, offset) + + arange_kw = partial( + torch.arange, layout=layout, device=device, pin_memory=pin_memory + ) + + # indices for top rectangle + xs2 = arange_kw(0, rectangle_size, dtype=dtype) + row_inds2 = xs2 // col + col_inds2 = xs2 % col + + # bottom trapezoid + xs1 = arange_kw(0, trapezoid_size, dtype=torch.float64) + b = -0.5 - m_first_row + row_inds1 = torch.floor(-b - torch.sqrt(b * b - 2 * xs1)) + col_inds1 = torch.floor(xs1 - ((2 * m_first_row - 1 - row_inds1) * row_inds1) * 0.5) + row_inds1 = _maybe_convert_to_dtype(row_inds1, dtype) + col_inds1 = _maybe_convert_to_dtype(col_inds1, dtype) + + if col: + row_inds1 = row_inds1 + (rectangle_size // col) + col_inds1 = col_inds1 + col_offset + + return torch.stack( + (torch.cat((row_inds2, row_inds1)), torch.cat((col_inds2, col_inds1))) + ) + + +@register_decomposition(aten.bucketize) +@out_wrapper(exact_dtype=True) +def bucketize( + a: TensorLikeType, + boundaries: TensorLikeType, + *, + out_int32: bool = False, + right: bool = False, +): + torch._check( + boundaries.dim() == 1, + lambda: f"boundaries tensor must be 1 dimension but got dim({boundaries.dim()})", + ) + + out_dtype = torch.int32 if out_int32 else torch.int64 + n_boundaries = boundaries.shape[-1] + if n_boundaries == 0: + return torch.zeros_like(a) + # We are trying to find the bucket (defined by pairs of consecutive elements of `boundaries`) + # each element of `a` belongs to. We use binary search to achieve logarithimic complexity, + # but each step of the search is done "in parallel" over all elements of `a` + # can't use int32 as indexes, so we have to do all computations with int64 and convert at the end + start = torch.zeros(a.shape, device=a.device, dtype=torch.int64) + end = start + n_boundaries + # Max depth of the binary search + # Since we can't break out of the loop at different points for different elements of a, + # we just do the max amount of iterations that binary search requires and add condition + # tensor (cond_update below) to stop updating once the search terminates + + # For first iteration through loop we can skip some checks, we have separate implementation + mid = start + (end - start) // 2 + mid_val = boundaries[mid] + if right: + cond_mid = mid_val > a + else: + cond_mid = mid_val >= a + start = torch.where(cond_mid, start, mid + 1) + + if n_boundaries > 1: + cond_update = torch.ones_like(a, dtype=torch.bool) + niters = int(math.log2(n_boundaries)) + for _ in range(niters): + end = torch.where(cond_mid & cond_update, mid, end) + cond_update = start < end + # start might end up pointing to 1 past the end, we guard against that + mid = torch.where(cond_update, start + (end - start) // 2, 0) + mid_val = boundaries[mid] + # If right is true, the buckets are closed on the *left* + # (i.e., we are doing the equivalent of std::upper_bound in C++) + # Otherwise they are closed on the right (std::lower_bound) + if right: + cond_mid = mid_val > a + else: + cond_mid = mid_val >= a + start = torch.where((~cond_mid) & cond_update, mid + 1, start) + + return start.to(dtype=out_dtype) + + +@register_decomposition(aten.cauchy) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def cauchy(self, median=0, sigma=1, generator=None): + assert generator is None + torch._check( + not utils.is_complex_dtype(self.dtype) + and not utils.is_integer_dtype(self.dtype) + and not utils.is_boolean_dtype(self.dtype), + lambda: f"Cauchy distribution is a continuous probability distribution. \ + dtype must be a floating point but you specified {self.dtype}", + ) + torch._check( + sigma > 0.0, + lambda: f"cauchy_ expects sigma > 0.0, but found sigma={sigma}", + ) + return median + sigma * torch.tan(math.pi * (torch.rand_like(self) - 0.5)) + + +@register_decomposition(aten.exponential) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def exponential(self, rate=1, generator=None): + assert generator is None + torch._check( + not utils.is_complex_dtype(self.dtype) + and not utils.is_integer_dtype(self.dtype) + and not utils.is_boolean_dtype(self.dtype), + lambda: f"Exponential distribution is a continuous probability distribution. \ + dtype must be a floating point but you specified {self.dtype}", + ) + torch._check( + rate > 0.0, + lambda: f"exponential_ expects lambda > 0.0, but found lambda={rate}", + ) + return -1 / rate * torch.log1p(-torch.rand_like(self)) + + +@register_decomposition(aten.geometric) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def geometric(self, p, generator=None): + assert generator is None + # TODO: fix inductor rand_like for integer, bool dtypes + torch._check( + not utils.is_complex_dtype(self.dtype) + and not utils.is_boolean_dtype(self.dtype), + lambda: f"geometric not implemented for {self.dtype}", + ) + torch._check( + 0 < p and p < 1, + lambda: f"geometric_ expects p to be in (0, 1), but got p={p}", + ) + return torch.floor(torch.log1p(-torch.rand_like(self)) / math.log1p(-p)) + 1 + + +@register_decomposition(aten.log_normal) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def log_normal(self, mean=1, std=2, generator=None): + assert generator is None + torch._check( + not utils.is_complex_dtype(self.dtype) + and not utils.is_integer_dtype(self.dtype) + and not utils.is_boolean_dtype(self.dtype), + lambda: f"log_normal not implemented for {self.dtype}", + ) + torch._check( + 0 < std, + lambda: f"log_normal_ expects std > 0.0, but found std={std}", + ) + return torch.exp(std * torch.randn_like(self) + mean) + + +# TODO: add support for functionalization aten.normal_functional +# NOTE: the device and dtype will be ignored when shape is None +@register_decomposition(aten.normal) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=( + "mean", + "std", + ), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def normal( + mean=0, + std=1, + size=None, + *, + generator=None, + dtype=None, + layout=None, + device=None, + pin_memory=None, +): + assert layout is None or layout == torch.strided + + if not isinstance(std, TensorLike): + torch._check( + std >= 0, lambda: f"normal expects std >= 0.0, but found std {std}" + ) + + if size is None: + tensors = tuple(t for t in (mean, std) if isinstance(t, TensorLike)) + torch._check( + len(tensors) > 0, + lambda: "normal expects that either mean or std is a tensor, or size is defined", + ) + torch._check( + layout is None and pin_memory is None, + lambda: "Cannot pass layout, or pin_memory without size", + ) + + size = _broadcast_shapes(*(t.shape for t in tensors)) + dtype = tensors[0].dtype + device = tensors[0].device + else: + torch._check( + not isinstance(mean, TensorLike) and not isinstance(std, TensorLike), + lambda: "normal expects mean and std to be scalars when size is defined", + ) + dtype = torch.get_default_dtype() if dtype is None else dtype + device = torch.device("cpu") if device is None else device + + normal_samples = prims.normal( + size, + mean=0.0, + std=1.0, + dtype=dtype, + device=device, + requires_grad=False, + generator=generator, + ) + return std * normal_samples + mean + + +@register_decomposition(aten.normal_) +def normal_(self, mean=0, std=1, *, generator=None): + return normal(mean, std, self.shape, out=self, generator=generator) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def rad2deg(self: TensorLikeType): + torch._check( + not utils.is_complex_dtype(self.dtype), + lambda: "rad2deg is not supported for complex tensors.", + ) + M_180_PI = 57.295779513082320876798154814105170332405472466564 + return self * M_180_PI + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def deg2rad(self: TensorLikeType): + torch._check( + not utils.is_complex_dtype(self.dtype), + lambda: "deg2rad is not supported for complex tensors.", + ) + M_PI_180 = 0.017453292519943295769236907684886127134428718885417 + return self * M_PI_180 + + +@register_decomposition(aten.count_nonzero) +@out_wrapper() +def count_nonzero(self, dim: Optional[DimsType] = None): + return (self != 0).sum(dim) + + +def _dot_check(self, other): + torch._check( + self.dim() == 1 and other.dim() == 1, + lambda: f"1D tensors expected, but got {self.dim()}D and {other.dim()}D tensors", + ) + + def numel_error(): + return ( + f"inconsistent tensor size, expected tensor [{self.numel()}] and src [{other.numel()}] to have the" + f"same number of elements, but got {self.numel()} and {other.numel()} elements respectively" + ) + + torch._check(self.numel() == other.numel(), numel_error) + + +@register_decomposition(aten.dot) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self", "other"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def dot(self, other): + if self.is_complex(): + if self.is_conj(): + if other.is_conj(): + return torch.dot(self.conj(), other.conj()).conj() + else: + return torch.vdot(self.conj(), other) + elif other.is_conj(): + return torch.vdot(other.conj(), self) + + _dot_check(self, other) + return (self * other).sum() + + +@register_decomposition(aten.vdot) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self", "other"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def vdot(self, other): + if not self.is_complex(): + return torch.dot(self, other) + + if self.is_conj(): + if other.is_conj(): + return torch.vdot(other.conj(), self.conj()) + else: + return torch.dot(self.conj(), other) + elif other.is_conj(): + return torch.dot(self, other.conj()).conj() + + _dot_check(self, other) + # The decomposition fails if you do self.conj()... not sure why + return (self.conj_physical() * other).sum() + + +# inplace +abs_ = _make_inplace(abs) +acos_ = _make_inplace(acos) +acosh_ = _make_inplace(acosh) +add_ = _make_inplace(add) +addcmul_ = _make_inplace(addcmul) +addcdiv_ = _make_inplace(addcdiv) +asin_ = _make_inplace(asin) +asinh_ = _make_inplace(asinh) +atan_ = _make_inplace(atan) +atanh_ = _make_inplace(atanh) +atan2_ = _make_inplace(atan2) +bitwise_and_ = _make_inplace(bitwise_and) +bitwise_left_shift_ = _make_inplace(bitwise_left_shift) +bitwise_not_ = _make_inplace(bitwise_not) +bitwise_or_ = _make_inplace(bitwise_or) +bitwise_right_shift_ = _make_inplace(bitwise_right_shift) +bitwise_xor_ = _make_inplace(bitwise_xor) +ceil_ = _make_inplace(ceil) +clamp_ = _make_inplace(clamp) +clamp_min_ = _make_inplace(clamp_min) +clamp_max_ = _make_inplace(clamp_max) +conj_physical_ = _make_inplace(conj_physical) +copysign_ = _make_inplace(copysign) +cos_ = _make_inplace(cos) +cosh_ = _make_inplace(cosh) +cumsum_ = _make_inplace(cumsum) +cumprod_ = _make_inplace(cumprod) +deg2rad_ = _make_inplace(deg2rad) +digamma_ = _make_inplace(digamma) +div_ = _make_inplace(div) +eq_ = _make_inplace(eq) +erf_ = _make_inplace(erf) +erfc_ = _make_inplace(erfc) +erfinv_ = _make_inplace(erfinv) +exp_ = _make_inplace(exp) +exp2_ = _make_inplace(exp2) +expm1_ = _make_inplace(expm1) +float_power_ = _make_inplace(float_power) +floor_ = _make_inplace(floor) +floor_divide_ = _make_inplace(floor_divide) +fmod_ = _make_inplace(fmod) +frac_ = _make_inplace(frac) +gcd_ = _make_inplace(gcd) +ge_ = _make_inplace(ge) +gt_ = _make_inplace(gt) +heaviside_ = _make_inplace(heaviside) +hypot_ = _make_inplace(hypot) +igamma_ = _make_inplace(igamma) +igammac_ = _make_inplace(igammac) +i0_ = _make_inplace(i0) +lcm_ = _make_inplace(lcm) +le_ = _make_inplace(le) +lerp_ = _make_inplace(lerp) +lgamma_ = _make_inplace(lgamma) +log10_ = _make_inplace(log10) +log1p_ = _make_inplace(log1p) +log2_ = _make_inplace(log2) +log_ = _make_inplace(log) +logical_and_ = _make_inplace(logical_and) +logical_not_ = _make_inplace(logical_not) +logical_or_ = _make_inplace(logical_or) +logical_xor_ = _make_inplace(logical_xor) +lt_ = _make_inplace(lt) +mul_ = _make_inplace(mul) +mvlgamma_ = _make_inplace(mvlgamma) +nan_to_num_ = _make_inplace(nan_to_num) +ne_ = _make_inplace(ne) +neg_ = _make_inplace(neg) +nextafter_ = _make_inplace(nextafter) +pow_ = _make_inplace(pow) +rad2deg_ = _make_inplace(rad2deg) +reciprocal_ = _make_inplace(reciprocal) +remainder_ = _make_inplace(remainder) +rsqrt_ = _make_inplace(rsqrt) +sgn_ = _make_inplace(sgn) +sigmoid_ = _make_inplace(sigmoid) +sign_ = _make_inplace(sign) +sin_ = _make_inplace(sin) +sinc_ = _make_inplace(sinc) +sinh_ = _make_inplace(sinh) +sqrt_ = _make_inplace(sqrt) +square_ = _make_inplace(square) +sub_ = _make_inplace(sub) +tan_ = _make_inplace(tan) +tanh_ = _make_inplace(tanh) +tril_ = _make_inplace(tril) +triu_ = _make_inplace(triu) +true_divide_ = _make_inplace(true_divide) +trunc_ = _make_inplace(trunc) +xlogy_ = _make_inplace(xlogy) +cauchy_ = _make_inplace(cauchy) +exponential_ = _make_inplace(exponential) +geometric_ = _make_inplace(geometric) +log_normal_ = _make_inplace(log_normal) +zero_ = _make_inplace(zero) + + +# xref: isStorage in torch/csrc/DynamicTypes.cpp +def _isStorage(obj): + return isinstance(obj, (torch.TypedStorage, torch.UntypedStorage)) + + +# xref: compute_sizes in torch/csrc/utils/tensor_new.cpp +def _compute_sizes(seq, scalar_type): + MAX_DIMS = 128 + is_storage = _isStorage(seq) + sizes = [] + # TODO: this is inaccurate, we actually test PySequence_Check + while isinstance(seq, (list, tuple)): + length = len(seq) + if is_storage: + length //= scalar_type.itemsize + sizes.append(length) + if len(sizes) > MAX_DIMS: + raise ValueError(f"too many dimensions '{type(seq).__name__}'") + if length == 0: + break + try: + handle = seq[0] + except Exception: + raise ValueError( # noqa: TRY200 + f"could not determine the shape of object type '{type(seq).__name__}'" + ) + seq = handle + + return sizes + + +# xref: infer_scalar_type in torch/csrc/utils/tensor_new.cpp +def _infer_scalar_type(obj): + if isinstance(obj, FloatLike): + return torch.get_default_dtype() + if isinstance(obj, IntLike) and not isinstance(obj, bool): # careful! + return torch.int64 + if isinstance(obj, bool): + return torch.bool + if isinstance(obj, complex): + default_dtype = torch.get_default_dtype() + if default_dtype is torch.float: + return torch.cfloat + elif default_dtype is torch.double: + return torch.cdouble + else: + raise RuntimeError("invalid default scalar type for complex") + if isinstance(obj, torch.Tensor): + return obj.dtype + if isinstance(obj, str): + raise TypeError(f"new(): invalid data type '{type(obj).__name__}'") + # TODO: this is inaccurate, we actually test PySequence_Check + if isinstance(obj, (list, tuple)): + scalarType = None + length = len(obj) + # match NumPy semantics, except use default tensor type instead of + # double. + if length == 0: + return torch.get_default_dtype() + for i in range(length): + cur_item = obj[i] + # TODO: test this + """ + if cur_item is obj: + raise TypeError("new(): self-referential lists are incompatible") + """ + item_scalarType = _infer_scalar_type(cur_item) # recurse! + if scalarType is not None: + scalarType = torch.promote_types(scalarType, item_scalarType) + else: + scalarType = item_scalarType + if scalarType is torch.cdouble: + # this won't change (unless we hit undefined, but that will + # fail later) + return scalarType + return scalarType + raise RuntimeError(f"Could not infer dtype of {type(obj).__name__}") + + +# Analogous to recursive_store +# xref: recursive_store in torch/csrc/utils/tensor_new.cpp +def _recursive_build(scalarType: torch.dtype, obj: TensorOrNumberLikeType): + if isinstance(obj, Tensor) and obj.ndim <= 1: + obj = obj.item() + # fall through into next case + if isinstance(obj, Number): + return torch.scalar_tensor(obj, dtype=scalarType) + + seq = obj + return torch.stack([_recursive_build(scalarType, item) for item in seq]) + + +# xref: internal_new_from_data in torch/csrc/utils/tensor_new.cpp +def _internal_new_from_data( + options, + scalar_type, + device_opt, + data, + copy_variables, + copy_numpy, + type_inference, + pin_memory=False, +): + if isinstance(data, torch.Tensor): + torch._check( + not pin_memory, lambda: "Can't pin tensor constructed from a variable" + ) + var = data + if copy_variables: + var = var.detach() + inferred_scalar_type = var.dtype if type_inference else scalar_type + device = device_opt if device_opt is not None else var.device + return var.to( + device=device, + dtype=inferred_scalar_type, + non_blocking=False, + copy=copy_variables, + ) + + # TODO + if hasattr(data, "__cuda_array_interface__"): + return NotImplemented + + # TODO: test for numpy input with PyArray_Check + + device = device_opt if device_opt is not None else options["device"] + inferred_scalar_type = _infer_scalar_type(data) if type_inference else scalar_type + + # NB: Don't need to avoid tracing, as we aren't going to do any manual + # pointer filling tricks + if _isStorage(data): + return NotImplemented + else: + if torch.device(device).type == "meta": + return NotImplemented + + # In the C implementation, we would directly start poking the memory + # of a freshly allocated CPU tensor. Here, we're going to do an + # alternate, heinously slow implementation: turn each individual + # scalar into a tensor, and then repeatedly cat them together + tensor = _recursive_build(inferred_scalar_type, data) + + tensor = tensor.to(device, inferred_scalar_type, non_blocking=False, copy=False) + + # NB: lift_fresh is not needed, because we built the tensor from scalars + # guaranteeing a fresh tensor in this case + return tensor + + +# xref: tensor_ctor in torch/csrc/utils/tensor_new.cpp +def tensor(data, *, dtype=None, device=None, pin_memory=False, requires_grad=False): + # TODO (or not): support names kwarg + if isinstance(data, torch.Tensor): + warnings.warn( + "To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() " + "or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor)" + ) + type_inference = dtype is None + new_tensor = _internal_new_from_data( + # device="cpu" because that's what you get with torch.tensor(2) no + # device by default + {"device": "cpu"}, # TODO: use torch.get_default_tensor_type + dtype if dtype is not None else torch.get_default_dtype(), + device, + data, + copy_variables=True, + copy_numpy=True, + type_inference=type_inference, + pin_memory=pin_memory, + ) + new_tensor.detach_() + new_tensor.requires_grad_(requires_grad) + return new_tensor + + +# Views +# We can't model these as above, as the pattern of doing `op(a, out=a)` does not work for a view function +# given that it does not reshape the input (it just copies the result into it) + +# squeeze_ = _make_inplace(squeeze) +# t_ = _make_inplace(t) +# transpose_ = _make_inplace(transpose) +# unsqueeze_ = _make_inplace(unsqueeze) + + +import torch._refs._conversions +import torch._refs.fft +import torch._refs.linalg +import torch._refs.nn.functional +import torch._refs.special diff --git a/venv/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa794758ab9076a1857c3f412d78fe1bcb550e9a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_refs/__pycache__/_conversions.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_refs/__pycache__/_conversions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..377ce8dbe5e42a9cb2b98f84c3124bdd1c769245 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_refs/__pycache__/_conversions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_refs/__pycache__/fft.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_refs/__pycache__/fft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b6d05784792a7776446c532d41cc47c00d95bdd Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_refs/__pycache__/fft.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_refs/_conversions.py b/venv/lib/python3.10/site-packages/torch/_refs/_conversions.py new file mode 100644 index 0000000000000000000000000000000000000000..fa1ca2428255aa9fe3892328f6ab95cc5f5b7568 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_refs/_conversions.py @@ -0,0 +1,118 @@ +import torch +import torch._prims_common as utils + +# Utilities should come BEFORE this import +from torch._decomp import register_decomposition + +from torch._prims_common import TensorLikeType +from torch._prims_common.wrappers import out_wrapper +from torch._refs import _broadcast_shapes + +# Data conversion references. +# +# Note: this module breaks the usual _refs to torch naming scheme where +# _refs.foo.bar is a ref for torch.foo.bar. The following definitions are not +# part of _refs/__init__.py to avoid name clashes with Python builtin types +# (like int). + +__all__ = [ + # dtypes + "bfloat16", + "bool", + "byte", + "cdouble", + "cfloat", + "chalf", + "char", + "double", + "float", + "half", + "int", + "long", + "short", + # misc + "complex", + "polar", +] + + +def _make_conversion_method(name: str, dtype: torch.dtype): + def fn( + self: TensorLikeType, memory_format: torch.memory_format = torch.preserve_format + ) -> TensorLikeType: + return self.to(dtype, memory_format=memory_format) # type: ignore[call-overload] + + fn.__name__ = name + return fn + + +bfloat16 = _make_conversion_method("bfloat16", torch.bfloat16) + +bool = _make_conversion_method("bool", torch.bool) + +byte = _make_conversion_method("byte", torch.uint8) + +cdouble = _make_conversion_method("cdouble", torch.cdouble) + +cfloat = _make_conversion_method("cfloat", torch.cfloat) + +chalf = _make_conversion_method("chalf", torch.complex32) + +char = _make_conversion_method("char", torch.int8) + +double = _make_conversion_method("double", torch.double) + +float = _make_conversion_method("float", torch.float) + +half = _make_conversion_method("half", torch.half) + +int = _make_conversion_method("int", torch.int) + +long = _make_conversion_method("long", torch.long) + +short = _make_conversion_method("short", torch.short) + + +@register_decomposition(torch._ops.ops.aten.complex) +# Note: complex has type promotion tests disabled due to different semantics. +# exact_dtype is for compat with complex_check_dtype from core. +@out_wrapper(exact_dtype=True) +def complex(real: TensorLikeType, imag: TensorLikeType) -> TensorLikeType: + allowed_dtypes = (torch.float32, torch.float64, torch.float16) + torch._check( + real.dtype in allowed_dtypes and imag.dtype in allowed_dtypes, + lambda: ( + f"Expected both inputs to be Half, Float or Double tensors but got " + f"{real.dtype} and {imag.dtype}" + ), + ) + torch._check( + real.dtype == imag.dtype, + lambda: ( + f"Expected object of scalar type {real.dtype} but got " + f"scalar type {imag.dtype} for second argument" + ), + ) + result_dtype = utils.corresponding_complex_dtype(real.dtype) # type: ignore[arg-type] + common_shape = _broadcast_shapes(real.shape, imag.shape) + result = real.new_empty( + common_shape, + dtype=result_dtype, + layout=real.layout, + device=real.device, + # pin_memory=real.is_pinned(), # NYI + ) + result.real = real + result.imag = imag + return result + + +@register_decomposition(torch._ops.ops.aten.polar) +# Note: polar has type promotion tests disabled due to different semantics. +# exact_dtype is for compat with complex_check_dtype from core. +@out_wrapper(exact_dtype=True) +def polar(abs: TensorLikeType, angle: TensorLikeType) -> TensorLikeType: + result = torch.complex(abs, angle) + result.real = abs * torch.cos(angle) + result.imag = abs * torch.sin(angle) + return result diff --git a/venv/lib/python3.10/site-packages/torch/_refs/fft.py b/venv/lib/python3.10/site-packages/torch/_refs/fft.py new file mode 100644 index 0000000000000000000000000000000000000000..06dda2d3accb766cec40ee3a2e84a6cafe2e37c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_refs/fft.py @@ -0,0 +1,590 @@ +import math + +from typing import Iterable, List, Literal, NamedTuple, Optional, Sequence, Tuple, Union + +import torch +import torch._prims as prims +import torch._prims_common as utils +from torch._decomp import register_decomposition +from torch._prims_common import DimsType, ShapeType, TensorLikeType +from torch._prims_common.wrappers import _maybe_convert_to_dtype, out_wrapper + +__all__ = [ + # Transforms + "fft", + "fft2", + "fftn", + "hfft", + "hfft2", + "hfftn", + "rfft", + "rfft2", + "rfftn", + "ifft", + "ifft2", + "ifftn", + "ihfft", + "ihfft2", + "ihfftn", + "irfft", + "irfft2", + "irfftn", + # Helpers + "fftshift", + "ifftshift", +] + +NormType = Union[None, Literal["forward", "backward", "ortho"]] +_NORM_VALUES = {None, "forward", "backward", "ortho"} +aten = torch._ops.ops.aten + + +def _apply_norm( + x: TensorLikeType, norm: NormType, signal_numel: int, forward: bool +) -> TensorLikeType: + """Apply normalization to the un-normalized FFT result""" + torch._check(norm in _NORM_VALUES, lambda: f"Invalid normalization mode: {norm}") + + if norm == "ortho": + return x * (1 / math.sqrt(signal_numel)) + + normalize = (not forward and (norm is None or norm == "backward")) or ( + forward and norm == "forward" + ) + return x * (1 / signal_numel) if normalize else x + + +def _promote_type_fft( + dtype: torch.dtype, require_complex: bool, device: torch.device +) -> torch.dtype: + """Helper to promote a dtype to one supported by the FFT primitives""" + if dtype.is_complex: + return dtype + + # Promote integral to default float type + if not dtype.is_floating_point: + dtype = torch.get_default_dtype() + + allowed_types = [torch.float32, torch.float64] + maybe_support_half = device.type in ["cuda", "meta"] + + if maybe_support_half: + allowed_types.append(torch.float16) + torch._check(dtype in allowed_types, lambda: f"Unsupported dtype {dtype}") + + if require_complex: + dtype = utils.corresponding_complex_dtype(dtype) + + return dtype + + +def _maybe_promote_tensor_fft( + t: TensorLikeType, require_complex: bool = False +) -> TensorLikeType: + """Helper to promote a tensor to a dtype supported by the FFT primitives""" + cur_type = t.dtype + new_type = _promote_type_fft(cur_type, require_complex, t.device) + return _maybe_convert_to_dtype(t, new_type) # type: ignore[return-value] + + +def _resize_fft_input( + x: TensorLikeType, dims: Tuple[int, ...], sizes: Tuple[int, ...] +) -> TensorLikeType: + """ + Fixes the shape of x such that x.size(dims[i]) == sizes[i], + either by zero-padding, or by slicing x starting from 0. + """ + assert len(dims) == len(sizes) + must_copy = False + x_sizes = x.shape + pad_amount = [0] * len(x_sizes) * 2 + for i in range(len(dims)): + if sizes[i] == -1: + continue + + if x_sizes[dims[i]] < sizes[i]: + must_copy = True + pad_idx = len(pad_amount) - 2 * dims[i] - 1 + pad_amount[pad_idx] = sizes[i] - x_sizes[dims[i]] + + if x_sizes[dims[i]] > sizes[i]: + x = x.narrow(dims[i], 0, sizes[i]) + + return torch.constant_pad_nd(x, pad_amount) if must_copy else x + + +def _fft_c2r( + func_name: str, + input: TensorLikeType, + n: Optional[int], + dim: int, + norm: NormType, + forward: bool, +) -> TensorLikeType: + """Common code for performing any complex to real FFT (irfft or hfft)""" + input = _maybe_promote_tensor_fft(input, require_complex=True) + dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),) + last_dim_size = n if n is not None else 2 * (input.shape[dim] - 1) + torch._check( + last_dim_size >= 1, + lambda: f"Invalid number of data points ({last_dim_size}) specified", + ) + + if n is not None: + input = _resize_fft_input(input, dims=dims, sizes=(last_dim_size // 2 + 1,)) + + if forward: + input = torch.conj(input) + + output = prims.fft_c2r(input, dim=dims, last_dim_size=last_dim_size) + return _apply_norm(output, norm=norm, signal_numel=last_dim_size, forward=forward) + + +def _fft_r2c( + func_name: str, + input: TensorLikeType, + n: Optional[int], + dim: int, + norm: NormType, + forward: bool, + onesided: bool, +) -> TensorLikeType: + """Common code for performing any real to complex FFT (rfft or ihfft)""" + torch._check( + not input.dtype.is_complex, + lambda: f"{func_name} expects a floating point input tensor, but got {input.dtype}", + ) + input = _maybe_promote_tensor_fft(input) + dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),) + dim_size = n if n is not None else input.shape[dim] + torch._check( + dim_size >= 1, lambda: f"Invalid number of data points ({dim_size}) specified" + ) + + if n is not None: + input = _resize_fft_input(input, dims, (n,)) + + ret = prims.fft_r2c(input, dim=dims, onesided=onesided) + ret = _apply_norm(ret, norm, dim_size, forward) + return ret if forward else torch.conj(ret) + + +def _fft_c2c( + func_name: str, + input: TensorLikeType, + n: Optional[int], + dim: int, + norm: NormType, + forward: bool, +) -> TensorLikeType: + """Common code for performing any complex to complex FFT (fft or ifft)""" + torch._check( + input.dtype.is_complex, + lambda: f"{func_name} expects a complex input tensor, but got {input.dtype}", + ) + dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),) + dim_size = n if n is not None else input.shape[dim] + torch._check( + dim_size >= 1, lambda: f"Invalid number of data points ({dim_size}) specified" + ) + + if n is not None: + input = _resize_fft_input(input, dims, (n,)) + + ret = prims.fft_c2c(input, dim=dims, forward=forward) + return _apply_norm(ret, norm, dim_size, forward) + + +@register_decomposition(aten.fft_fft) +@out_wrapper() +def fft( + input: TensorLikeType, + n: Optional[int] = None, + dim: int = -1, + norm: NormType = None, +) -> TensorLikeType: + if input.dtype.is_complex: + return _fft_c2c("fft", input, n, dim, norm, forward=True) + else: + return _fft_r2c("fft", input, n, dim, norm, forward=True, onesided=False) + + +@register_decomposition(aten.fft_ifft) +@out_wrapper() +def ifft( + input: TensorLikeType, + n: Optional[int] = None, + dim: int = -1, + norm: NormType = None, +) -> TensorLikeType: + if input.dtype.is_complex: + return _fft_c2c("ifft", input, n, dim, norm, forward=False) + else: + return _fft_r2c("ifft", input, n, dim, norm, forward=False, onesided=False) + + +@register_decomposition(aten.fft_rfft) +@out_wrapper() +def rfft( + input: TensorLikeType, + n: Optional[int] = None, + dim: int = -1, + norm: NormType = None, +) -> TensorLikeType: + return _fft_r2c("rfft", input, n, dim, norm, forward=True, onesided=True) + + +@register_decomposition(aten.fft_irfft) +@out_wrapper() +def irfft( + input: TensorLikeType, + n: Optional[int] = None, + dim: int = -1, + norm: NormType = None, +) -> TensorLikeType: + return _fft_c2r("irfft", input, n, dim, norm, forward=False) + + +@register_decomposition(aten.fft_hfft) +@out_wrapper() +def hfft( + input: TensorLikeType, + n: Optional[int] = None, + dim: int = -1, + norm: NormType = None, +) -> TensorLikeType: + return _fft_c2r("hfft", input, n, dim, norm, forward=True) + + +@register_decomposition(aten.fft_ihfft) +@out_wrapper() +def ihfft( + input: TensorLikeType, + n: Optional[int] = None, + dim: int = -1, + norm: NormType = None, +) -> TensorLikeType: + return _fft_r2c("ihfft", input, n, dim, norm, forward=False, onesided=True) + + +class _ShapeAndDims(NamedTuple): + shape: Tuple[int, ...] + dims: Tuple[int, ...] + + +def _canonicalize_fft_shape_and_dim_args( + input: TensorLikeType, shape: Optional[ShapeType], dim: Optional[DimsType] +) -> _ShapeAndDims: + """Convert the shape and dim arguments into a canonical form where neither are optional""" + input_dim = input.ndim + input_sizes = input.shape + + if dim is not None: + if not isinstance(dim, Sequence): + dim = (dim,) + ret_dims = utils.canonicalize_dims(input_dim, dim, wrap_scalar=False) + + # Check dims are unique + torch._check( + len(set(ret_dims)) == len(ret_dims), lambda: "FFT dims must be unique" + ) + + if shape is not None: + if not isinstance(shape, Sequence): + shape = (shape,) + + # Has shape, might have dim + torch._check( + dim is None or len(dim) == len(shape), + lambda: "When given, dim and shape arguments must have the same length", + ) + transform_ndim = len(shape) + + torch._check( + transform_ndim <= input_dim, + lambda: f"Got shape with {transform_ndim} values but input tensor " + f"only has {input_dim} dimensions.", + ) + + # If shape is given, dims defaults to the last len(shape) dimensions + if dim is None: + ret_dims = tuple(range(input_dim - transform_ndim, input_dim)) + + # Translate any -1 values in shape to the default length + ret_shape = tuple( + s if s != -1 else input_sizes[d] for (s, d) in zip(shape, ret_dims) # type: ignore[possibly-undefined] + ) + elif dim is None: + # No shape, no dim + ret_dims = tuple(range(input_dim)) + ret_shape = tuple(input_sizes) + else: + # No shape, has dim + ret_shape = tuple(input_sizes[d] for d in ret_dims) # type: ignore[possibly-undefined] + + for n in ret_shape: + torch._check(n > 0, lambda: f"Invalid number of data points ({n}) specified") + + return _ShapeAndDims(shape=ret_shape, dims=ret_dims) # type: ignore[possibly-undefined] + + +def _prod(xs: Iterable[int]) -> int: + """Compute product of a list""" + prod = 1 + for x in xs: + prod *= x + return prod + + +def _fftn_c2c( + function_name: str, + input: TensorLikeType, + shape: Tuple[int, ...], + dim: Tuple[int, ...], + norm: NormType, + forward: bool, +) -> TensorLikeType: + """Common code for n-dimensional complex to complex FFTs (fftn or ifftn)""" + torch._check( + input.dtype.is_complex, + lambda: f"{function_name} expects a complex input tensor, " + f"but got {input.dtype}", + ) + x = _resize_fft_input(input, dim, shape) + output = prims.fft_c2c(x, dim=dim, forward=forward) + return _apply_norm(output, norm=norm, signal_numel=_prod(shape), forward=forward) + + +@register_decomposition(aten.fft_fftn) +@out_wrapper() +def fftn( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = None, + norm: NormType = None, +) -> TensorLikeType: + (shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim) + x = _maybe_promote_tensor_fft(input, require_complex=True) + return _fftn_c2c("fftn", x, shape, dim, norm, forward=True) + + +@register_decomposition(aten.fft_ifftn) +@out_wrapper() +def ifftn( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = None, + norm: NormType = None, +) -> TensorLikeType: + (shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim) + x = _maybe_promote_tensor_fft(input, require_complex=True) + return _fftn_c2c("ifftn", x, shape, dim, norm, forward=False) + + +@register_decomposition(aten.fft_rfftn) +@out_wrapper() +def rfftn( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = None, + norm: NormType = None, +) -> TensorLikeType: + torch._check( + not input.dtype.is_complex, + lambda: f"rfftn expects a real-valued input tensor, but got {input.dtype}", + ) + shape, dim = _canonicalize_fft_shape_and_dim_args(input, s, dim) + input = _maybe_promote_tensor_fft(input, require_complex=False) + input = _resize_fft_input(input, dim, shape) + out = prims.fft_r2c(input, dim=dim, onesided=True) + return _apply_norm(out, norm=norm, signal_numel=_prod(shape), forward=True) + + +@register_decomposition(aten.fft_ihfftn) +@out_wrapper() +def ihfftn( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = None, + norm: NormType = None, +) -> TensorLikeType: + torch._check( + not input.dtype.is_complex, + lambda: f"ihfftn expects a real-valued input tensor, but got {input.dtype}", + ) + shape, dim = _canonicalize_fft_shape_and_dim_args(input, s, dim) + torch._check(len(shape) > 0, lambda: "ihfftn must transform at least one axis") + input = _maybe_promote_tensor_fft(input, require_complex=False) + input = _resize_fft_input(input, dim, shape) + + tmp = prims.fft_r2c(input, dim=dim[-1:], onesided=True) + + if len(dim) == 1: + tmp = _apply_norm(tmp, norm=norm, signal_numel=shape[0], forward=False) + return prims.conj(tmp) + + tmp = prims.conj_physical(tmp) + tmp = prims.fft_c2c(tmp, dim=dim[:-1], forward=False) + return _apply_norm(tmp, norm=norm, signal_numel=_prod(shape), forward=False) + + +class _CanonicalizeC2rReturn(NamedTuple): + shape: Tuple[int, ...] + dim: Tuple[int, ...] + last_dim_size: int + + +def _canonicalize_fft_c2r_shape_and_dim_args( + fname: str, + input: TensorLikeType, + s: Optional[ShapeType], + dim: Optional[DimsType], +) -> _CanonicalizeC2rReturn: + """Canonicalize shape and dim arguments for n-dimensional c2r transforms, + as well as calculating the last_dim_size which is shape[dim[-1]] for the output""" + (shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim) + torch._check(len(shape) > 0, lambda: f"{fname} must transform at least one axis") + + if s is None or s[-1] == -1: + last_dim_size = 2 * (input.shape[dim[-1]] - 1) + else: + last_dim_size = shape[-1] + + torch._check( + last_dim_size >= 1, + lambda: f"Invalid number of data points ({last_dim_size}) specified", + ) + + shape_list = list(shape) + shape_list[-1] = last_dim_size // 2 + 1 + return _CanonicalizeC2rReturn( + shape=tuple(shape_list), dim=dim, last_dim_size=last_dim_size + ) + + +@register_decomposition(aten.fft_irfftn) +@out_wrapper() +def irfftn( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = None, + norm: NormType = None, +) -> TensorLikeType: + shape, dim, last_dim_size = _canonicalize_fft_c2r_shape_and_dim_args( + "irfftn", input, s, dim + ) + input = _maybe_promote_tensor_fft(input, require_complex=True) + input = _resize_fft_input(input, dim, shape) + out = prims.fft_c2r(input, dim=dim, last_dim_size=last_dim_size) + return _apply_norm(out, norm, _prod(out.shape[d] for d in dim), forward=False) + + +@register_decomposition(aten.fft_hfftn) +@out_wrapper() +def hfftn( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = None, + norm: NormType = None, +) -> TensorLikeType: + shape, dim, last_dim_size = _canonicalize_fft_c2r_shape_and_dim_args( + "hfftn", input, s, dim + ) + input = _maybe_promote_tensor_fft(input, require_complex=True) + input = _resize_fft_input(input, dim, shape) + + tmp = prims.fft_c2c(input, dim=dim[:-1], forward=True) if len(dim) > 1 else input + tmp = _apply_norm(tmp, norm, _prod(shape[:-1]), forward=True) + tmp = prims.conj_physical(tmp) + out = prims.fft_c2r(tmp, dim=dim[-1:], last_dim_size=last_dim_size) + return _apply_norm(out, norm, last_dim_size, forward=True) + + +@register_decomposition(aten.fft_fft2) +@out_wrapper() +def fft2( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = (-2, -1), + norm: NormType = None, +) -> TensorLikeType: + return torch.fft.fftn(input, s=s, dim=dim, norm=norm) + + +@register_decomposition(aten.fft_ifft2) +@out_wrapper() +def ifft2( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = (-2, -1), + norm: NormType = None, +) -> TensorLikeType: + return torch.fft.ifftn(input, s=s, dim=dim, norm=norm) + + +@register_decomposition(aten.fft_rfft2) +@out_wrapper() +def rfft2( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = (-2, -1), + norm: NormType = None, +) -> TensorLikeType: + return torch.fft.rfftn(input, s=s, dim=dim, norm=norm) + + +@register_decomposition(aten.fft_irfft2) +@out_wrapper() +def irfft2( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = (-2, -1), + norm: NormType = None, +) -> TensorLikeType: + return torch.fft.irfftn(input, s=s, dim=dim, norm=norm) + + +@register_decomposition(aten.fft_hfft2) +@out_wrapper() +def hfft2( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = (-2, -1), + norm: NormType = None, +) -> TensorLikeType: + return torch.fft.hfftn(input, s=s, dim=dim, norm=norm) + + +@register_decomposition(aten.fft_ihfft2) +@out_wrapper() +def ihfft2( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = (-2, -1), + norm: NormType = None, +) -> TensorLikeType: + return torch.fft.ihfftn(input, s=s, dim=dim, norm=norm) + + +def _default_alldims(dim: Optional[DimsType], x: TensorLikeType) -> List[int]: + """Convert Optional[DimsType] to a simple list, defaulting to all dimensions""" + if dim is None: + return list(range(x.ndim)) + elif not isinstance(dim, Sequence): + return [dim] + else: + return list(dim) + + +@register_decomposition(aten.fft_fftshift) +def fftshift(input: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType: + dims = _default_alldims(dim, input) + shift = [input.shape[d] // 2 for d in dims] + return torch.roll(input, shift, dims) + + +@register_decomposition(aten.fft_ifftshift) +def ifftshift(input: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType: + dims = _default_alldims(dim, input) + shift = [(input.shape[d] + 1) // 2 for d in dims] + return torch.roll(input, shift, dims) diff --git a/venv/lib/python3.10/site-packages/torch/_refs/linalg/__init__.py b/venv/lib/python3.10/site-packages/torch/_refs/linalg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b948e1eccc0b785eeccd000b39b9d68725b39e17 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_refs/linalg/__init__.py @@ -0,0 +1,308 @@ +from functools import partial + +from typing import List, Optional, Tuple, Union + +import torch + +import torch._prims as prims + +import torch._prims_common as utils +import torch._refs as refs +import torch._refs.linalg as linalg +from torch import Tensor +from torch._prims_common import ( + check_fp_or_complex, + check_is_matrix, + Dim, + DimsType, + ELEMENTWISE_TYPE_PROMOTION_KIND, + IntLike, + NumberType, + TensorLikeType, +) +from torch._prims_common.wrappers import ( + _maybe_convert_to_dtype, + elementwise_type_promotion_wrapper, + out_wrapper, +) + + +__all__ = [ + "diagonal", + "matrix_norm", + "norm", + "svd", + "svdvals", + "vector_norm", + "vecdot", + "cross", +] + + +def _check_norm_dtype(dtype: Optional[torch.dtype], x_dtype: torch.dtype, fn_name: str): + """ + Checks related to the dtype kwarg in `linalg.*norm` functions + """ + if dtype is not None: + torch._check( + utils.is_float_dtype(dtype) or utils.is_complex_dtype(dtype), + lambda: f"{fn_name}: dtype should be floating point or complex. Got {dtype}", + ) + torch._check( + utils.is_complex_dtype(dtype) == utils.is_complex_dtype(x_dtype), + lambda: "{fn_name}: dtype should be {d} for {d} inputs. Got {dtype}".format( + fn_name=fn_name, + d="complex" if utils.is_complex_dtype(x_dtype) else "real", + dtype=dtype, + ), + ) + torch._check( + utils.get_higher_dtype(dtype, x_dtype) == dtype, + lambda: f"{fn_name}: the dtype of the input ({x_dtype}) should be convertible " + "without narrowing to the specified dtype ({dtype})", + ) + + +# Utilities should come BEFORE this import +from torch._decomp import register_decomposition +from torch._decomp.decompositions import pw_cast_for_opmath + + +@register_decomposition(torch._ops.ops.aten.linalg_cross) +@out_wrapper() +@pw_cast_for_opmath +def cross(a: Tensor, b: Tensor, dim: int = -1): + torch._check( + a.ndim == b.ndim, + lambda: "linalg.cross: inputs must have the same number of dimensions.", + ) + torch._check( + a.size(dim) == 3 and b.size(dim) == 3, + lambda: f"linalg.cross: inputs dim {dim} must have length 3, got {a.size(dim)} and {b.size(dim)}", + ) + a, b = torch.broadcast_tensors(a, b) + dim = utils.canonicalize_dim(a.ndim, dim) + idx = torch.arange(3, device=a.device) + return a.index_select(dim, (idx + 1) % 3) * b.index_select( + dim, (idx + 2) % 3 + ) - a.index_select(dim, (idx + 2) % 3) * b.index_select(dim, (idx + 1) % 3) + + +def diagonal( + input: TensorLikeType, + *, + offset: int = 0, + dim1: int = -2, + dim2: int = -1, +) -> TensorLikeType: + return torch.diagonal(input, offset=offset, dim1=dim1, dim2=dim2) + + +@register_decomposition(torch._ops.ops.aten.linalg_vector_norm) +@out_wrapper(exact_dtype=True) +def vector_norm( + x: TensorLikeType, + ord: Union[float, int] = 2, + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + dtype: Optional[torch.dtype] = None, +) -> Tensor: + # Checks + check_fp_or_complex(x.dtype, "linalg.vector_norm") + + if isinstance(dim, Dim): + dim = [dim] # type: ignore[assignment] + + if x.numel() == 0 and (ord < 0.0 or ord == float("inf")): + torch._check( + dim is not None and len(dim) != 0, + lambda: f"linalg.vector_norm cannot compute the {ord} norm on an empty tensor " + "because the operation does not have an identity", + ) + shape = x.shape + assert dim is not None # mypy does not seem to be able to see through check? + for d in dim: + torch._check( + shape[d] != 0, + lambda: f"linalg.vector_norm cannot compute the {ord} norm on the " + f"dimension {d} because this dimension is empty and the " + "operation does not have an identity", + ) + _check_norm_dtype(dtype, x.dtype, "linalg.vector_norm") + + computation_dtype, result_dtype = utils.reduction_dtypes( + x, utils.REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT, dtype + ) + + to_result_dtype = partial(_maybe_convert_to_dtype, dtype=result_dtype) + + # Implementation + if ord == 0.0: + return torch.sum(torch.ne(x, 0.0), dim=dim, keepdim=keepdim, dtype=result_dtype) + elif ord == float("inf"): + return to_result_dtype(torch.amax(torch.abs(x), dim=dim, keepdim=keepdim)) # type: ignore[return-value,arg-type] + elif ord == float("-inf"): + return to_result_dtype(torch.amin(torch.abs(x), dim=dim, keepdim=keepdim)) # type: ignore[return-value,arg-type] + else: + # From here on the computation dtype is important as the reduction is non-trivial + x = _maybe_convert_to_dtype(x, computation_dtype) # type: ignore[assignment] + reduce_sum = partial(torch.sum, dim=dim, keepdim=keepdim) + + is_ord_even = ord % 2 == 0 if isinstance(ord, IntLike) else ord % 2.0 == 0.0 + if not (is_ord_even and utils.is_float_dtype(x.dtype)): + x = torch.abs(x) + return to_result_dtype(torch.pow(reduce_sum(torch.pow(x, ord)), 1.0 / ord)) # type: ignore[return-value] + + +def _backshift_permutation(dim0, dim1, ndim): + # Auxiliary function for matrix_norm + # Computes the permutation that moves the two given dimensions to the back + ret = [i for i in range(ndim) if i != dim0 and i != dim1] + ret.extend((dim0, dim1)) + return ret + + +def _inverse_permutation(perm): + # Given a permutation, returns its inverse. It's equivalent to argsort on an array + return [i for i, j in sorted(enumerate(perm), key=lambda i_j: i_j[1])] + + +# CompositeImplicitAutograd +@out_wrapper(exact_dtype=True) +def matrix_norm( + A: TensorLikeType, + ord: Union[float, str] = "fro", + dim: DimsType = (-2, -1), + keepdim: bool = False, + *, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + # shape + check_is_matrix(A, "linalg.matrix_norm") + # dim + dim = utils.canonicalize_dims(A.ndim, dim) + if isinstance(dim, Dim): + dim = (dim,) # type: ignore[assignment] + torch._check( + len(dim) == 2, lambda: "linalg.matrix_norm: dim must be a 2-tuple. Got {dim}" + ) + torch._check( + dim[0] != dim[1], + lambda: "linalg.matrix_norm: dims must be different. Got ({dim[0]}, {dim[1]})", + ) + # dtype arg + _check_norm_dtype(dtype, A.dtype, "linalg.matrix_norm") + + if isinstance(ord, str): + # ord + torch._check( + ord in ("fro", "nuc"), + lambda: "linalg.matrix_norm: Order {ord} not supported.", + ) + # dtype + check_fp_or_complex( + A.dtype, "linalg.matrix_norm", allow_low_precision_dtypes=ord != "nuc" + ) + + if ord == "fro": + return vector_norm(A, 2, dim, keepdim, dtype=dtype) + else: # ord == "nuc" + if dtype is not None: + A = _maybe_convert_to_dtype(A, dtype) # type: ignore[assignment] + perm = _backshift_permutation(dim[0], dim[1], A.ndim) + result = torch.sum(svdvals(prims.transpose(A, perm)), -1, keepdim) + if keepdim: + inv_perm = _inverse_permutation(perm) + result = prims.transpose(torch.unsqueeze(result, -1), inv_perm) + return result + else: + # ord + abs_ord = abs(ord) + torch._check( + abs_ord in (2, 1, float("inf")), + lambda: "linalg.matrix_norm: Order {ord} not supported.", + ) + # dtype + check_fp_or_complex( + A.dtype, "linalg.matrix_norm", allow_low_precision_dtypes=ord != 2 + ) + + max_min = partial(torch.amax if ord > 0.0 else torch.amin, keepdim=keepdim) + + if abs_ord == 2.0: + if dtype is not None: + A = _maybe_convert_to_dtype(A, dtype) # type: ignore[assignment] + perm = _backshift_permutation(dim[0], dim[1], A.ndim) + result = max_min(svdvals(prims.transpose(A, perm)), dim=-1) + if keepdim: + inv_perm = _inverse_permutation(perm) + result = prims.transpose(torch.unsqueeze(result, -1), inv_perm) + return result + else: # 1, -1, inf, -inf + dim0, dim1 = dim + if abs_ord == float("inf"): + dim0, dim1 = dim1, dim0 + if not keepdim and (dim0 < dim1): + dim1 -= 1 + return max_min( + vector_norm(A, 1.0, dim=dim0, keepdim=keepdim, dtype=dtype), dim1 + ) + + +# CompositeImplicitAutograd +@out_wrapper(exact_dtype=True) +def norm( + A: TensorLikeType, + ord: Optional[Union[float, str]] = None, + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + if dim is not None: + if isinstance(dim, Dim): + dim = (dim,) # type: ignore[assignment] + torch._check( + len(dim) in (1, 2), + lambda: "linalg.norm: If dim is specified, it must be of length 1 or 2. Got {dim}", + ) + elif ord is not None: + torch._check( + A.ndim in (1, 2), + lambda: "linalg.norm: If dim is not specified but ord is, the input must be 1D or 2D. Got {A.ndim}D", + ) + + if ord is not None and ( + (dim is not None and len(dim) == 2) or (dim is None and A.ndim == 2) + ): + if dim is None: + dim = (0, 1) + return matrix_norm(A, ord, dim, keepdim, dtype=dtype) + else: + if ord is None: + ord = 2.0 + return vector_norm(A, ord, dim, keepdim, dtype=dtype) + + +# CompositeImplicitAutograd +@out_wrapper("U", "S", "Vh", exact_dtype=True) +def svd(A: TensorLikeType, full_matrices: bool = True) -> Tuple[Tensor, Tensor, Tensor]: + return prims.svd(A, full_matrices=full_matrices) + + +# CompositeImplicitAutograd +@out_wrapper(exact_dtype=True) +def svdvals(A: TensorLikeType) -> Tensor: + return svd(A, full_matrices=False)[1] + + +# CompositeImplicitAutograd +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("x", "y"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def vecdot(x: Tensor, y: Tensor, dim: int = -1) -> Tensor: + check_fp_or_complex(x.dtype, "linalg.vecdot") + return (x.conj() * y).sum(dim=dim) diff --git a/venv/lib/python3.10/site-packages/torch/_refs/linalg/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_refs/linalg/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4fd1ee071df02a9e4c86b91cc927ae4e12c8cf3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_refs/linalg/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_refs/nn/__init__.py b/venv/lib/python3.10/site-packages/torch/_refs/nn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3646b96be90be51e86070360b23212ed450186a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_refs/nn/__init__.py @@ -0,0 +1,3 @@ +from typing import List + +__all__: List[str] = [] diff --git a/venv/lib/python3.10/site-packages/torch/_refs/nn/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_refs/nn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9dab36bb29880030ff618727bb2cbcf92c37db8a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_refs/nn/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_refs/nn/functional/__init__.py b/venv/lib/python3.10/site-packages/torch/_refs/nn/functional/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d6be2180e8b34cb1fdac215e6d885f9450c5103f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_refs/nn/functional/__init__.py @@ -0,0 +1,1230 @@ +import math +from functools import wraps +from typing import Callable, Optional, Union + +import torch +import torch._prims as prims +import torch._prims_common as utils +import torch._refs as refs +from torch._decomp import register_decomposition +from torch._prims_common import ( + ELEMENTWISE_TYPE_PROMOTION_KIND, + NumberType, + ShapeType, + TensorLike, + TensorLikeType, +) +from torch._prims_common.wrappers import ( + elementwise_type_promotion_wrapper, + elementwise_unary_scalar_wrapper, + out_wrapper, +) +from torch._refs import _make_inplace + +__all__ = [ + "alpha_dropout", + "celu", + "celu_", + "dropout", + "elu", + "elu_", + "gelu", + "glu", + "group_norm", + "hardshrink", + "hardtanh", + "hinge_embedding_loss", + "huber_loss", + "l1_loss", + "layer_norm", + "leaky_relu", + "log_softmax", + "margin_ranking_loss", + "mish", + "mish_", + "mse_loss", + "nll_loss", + "pairwise_distance", + "pdist", + "poisson_nll_loss", + "prelu", + "relu", + "relu6", + "selu", + "selu_", + "smooth_l1_loss", + "softmax", + "softmin", + "softplus", + "softshrink", + "tanhshrink", + "threshold", + "threshold_", + "triplet_margin_loss", +] + +Tensor = torch.Tensor +aten = torch._ops.ops.aten +DispatchKey = torch._C.DispatchKey # type: ignore[attr-defined] + + +def _dropout_helper( + self: TensorLikeType, + val: float, +) -> TensorLikeType: + """ + Helper function for all dropout-type operators. During training, + some of the elements of the input tensor are randomly masked. + + Returns the masked tensor of the boolean values. + + """ + + return ( + refs._uniform_helper( + self.shape, low=0.0, high=1.0, dtype=torch.float32, device=self.device + ) + < val + ) + + +@register_decomposition(aten.alpha_dropout) +def alpha_dropout( + self: TensorLikeType, p: float = 0.5, training: bool = False, inplace: bool = False +) -> TensorLikeType: + if inplace: + raise NotImplementedError + + if not training: + return self + + torch._check( + p <= 1 and p >= 0, + lambda: f"dropout probability has to be between 0 and 1, but got, {p}", + ) + + if p == 1: + return torch.zeros_like(self) + + if p == 0: + return self + + dropout_mask = _dropout_helper(self, 1 - p) + + # From paper: Self-Normalizing Neural Networks (https://arxiv.org/pdf/1706.02515.pdf) + # alpha = - SELU.alpha * SELU.scale, here + # SELU.alpha = 1.6732632423543772848170429916717 and + # SELU.scale = 1.0507009873554804934193349852946 + alpha = -1.7580993408473766 + + a = 1.0 / math.sqrt((alpha * alpha * p + 1) * (1 - p)) + b = torch.logical_not(dropout_mask) + b = b * (alpha * a) + alpha * a * p + dropout_mask = a * dropout_mask + + return self * dropout_mask + b + + +def _inplace_wrapper(fn): + """ + Given a nn.functional non-linearity, implements its `inplace: bool` argument + """ + + # nb. We use the name of the first argument used in the unary references + @wraps(fn) + def _fn(a, *args, inplace=False, **kwargs): + if inplace: + torch._check( + "out" not in kwargs, + lambda: "Cannot set inplace=True and pass out= at the same time", + ) + return fn(a, *args, inplace=False, out=a, **kwargs) + else: + return fn(a, *args, inplace=False, **kwargs) + + return _fn + + +# celu is implemented specially because it has an alpha argument +# celu is very similar to elu +@register_decomposition(aten.celu) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def celu( + a: TensorLikeType, alpha: Optional[NumberType] = None, inplace: bool = False +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.celu + """ + + if inplace: + raise NotImplementedError + + rhs: TensorLikeType + if alpha is not None: + python_type = utils.dtype_to_type(a.dtype) + if not utils.is_weakly_lesser_type(type(alpha), python_type): + msg = f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + rhs = alpha * torch.expm1(torch.true_divide(a, alpha)) # type: ignore[arg-type] + else: + rhs = torch.expm1(a) + + return torch.where(a > 0, a, rhs) + + +@_inplace_wrapper +@out_wrapper() +def dropout( + a: TensorLikeType, p: float = 0.5, training: bool = True, inplace: bool = False +) -> TensorLikeType: + if inplace: + raise NotImplementedError + + if not training: + return a + + torch._check( + p <= 1 and p >= 0, + lambda: f"dropout probability has to be between 0 and 1, but got, {p}", + ) + + if p == 1: + return torch.zeros_like(a) + + if p == 0: + return a + + scale = 1 / (1 - p) + dropout_mask = _dropout_helper(a, 1 - p) + + return a * dropout_mask * scale + + +@register_decomposition(aten.elu) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def elu( + a: TensorLikeType, + alpha: NumberType = 1.0, + scale: NumberType = 1.0, + input_scale: NumberType = 1.0, + inplace: bool = False, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.elu + """ + if inplace: + raise NotImplementedError + + # nb. This should be factored out into a can_cast aux function + python_type = utils.dtype_to_type(a.dtype) + torch._check( + utils.is_weakly_lesser_type(type(input_scale), python_type), + lambda: f"input_scale argument of type {type(input_scale)} cannot be safely cast to type {python_type}!", + ) + torch._check( + utils.is_weakly_lesser_type(type(scale), python_type), + lambda: f"scale argument of type {type(scale)} cannot be safely cast to type {python_type}!", + ) + torch._check( + utils.is_weakly_lesser_type(type(alpha), python_type), + lambda: f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!", + ) + + return torch.where(a > 0, scale * a, (alpha * scale) * torch.expm1(a * input_scale)) + + +@register_decomposition(aten.relu) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def relu(a: TensorLikeType, inplace: bool = False) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.relu + """ + + if inplace: + raise NotImplementedError + + return torch.where(torch.le(a, 0), 0, a) + + +def group_norm( + input: Tensor, + num_groups: int, + weight: Optional[Tensor] = None, + bias: Optional[Tensor] = None, + eps: float = 1e-5, +) -> Tensor: + """ + Reference implementation of :func:`torch.nn.functional.group_norm`. + """ + torch._check( + input.ndim >= 2, + lambda: f"Expected at least 2 dimensions for input tensor but received {input.ndim}", + ) + + batch_size = input.shape[0] + num_channels = input.shape[1] + torch._check( + num_channels % num_groups == 0, + lambda: "Expected number of channels in input to be divisible by num_groups, " + + f"but got input of shape {input.shape} and num_groups = {num_groups}", + ) + + # input shape is (N, C, *), so we flatten all inner dimensions except (N, C) + flattened_inner_size = 1 + for dim_length in input.shape[2:]: + flattened_inner_size *= dim_length + + return torch.native_group_norm( + input, + weight, + bias, + batch_size, + num_channels, + flattened_inner_size, + num_groups, + eps, + )[0] + + +def layer_norm( + input: Tensor, + normalized_shape: ShapeType, + weight: Optional[Tensor] = None, + bias: Optional[Tensor] = None, + eps: float = 1e-5, +) -> Tensor: + """ + Reference implementation of :func:`torch.nn.functional.layer_norm`. + """ + return torch.native_layer_norm(input, normalized_shape, weight, bias, eps)[0] + + +@register_decomposition(aten.leaky_relu) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def leaky_relu( + a: TensorLikeType, negative_slope: float = 0.01, inplace: bool = False +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.leaky_relu + """ + + if inplace: + raise NotImplementedError + + python_type = utils.dtype_to_type(a.dtype) + if not utils.is_weakly_lesser_type(type(negative_slope), python_type): + msg = f"negative_slope argument of type {type(negative_slope)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + return torch.where(torch.gt(a, 0), a, torch.mul(a, negative_slope)) + + +@register_decomposition(aten.mish) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def mish(a: TensorLikeType, inplace: bool = False) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.mish + """ + + if inplace: + raise NotImplementedError + return a * torch.tanh(torch.nn.functional.softplus(a)) + + +@register_decomposition(aten.selu) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def selu(a: TensorLikeType, inplace: bool = False) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.selu + """ + if inplace: + raise NotImplementedError + + alpha = 1.6732632423543772848170429916717 + scale = 1.0507009873554804934193349852946 + + rhs = alpha * torch.expm1(a) + + return scale * torch.where(a > 0, a, rhs) + + +# Forwarding alias: the functional variant doesn't support the out kwarg +# CompositeImplicitAutograd - don't register decomp +def softmax( + a: TensorLikeType, + dim: Optional[int] = None, + _stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True) + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + # The error is for compat with regular PyTorch, which has this behavior + # deprecated. For PrimTorch, it's fine to drop support for deprecated + # behavior because it requires explicit opt in. This error is to inform + # users how to update their calls. + torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X") + return torch.softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload] + + +# CompositeImplicitAutograd - don't register decomp +def softmin( + a: TensorLikeType, + dim: Optional[int] = None, + _stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True) + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + # The error is for compat with regular PyTorch, which has this behavior + # deprecated. For PrimTorch, it's fine to drop support for deprecated + # behavior because it requires explicit opt in. This error is to inform + # users how to update their calls. + torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X") + return torch.softmax(a=-a, dim=dim, dtype=dtype) # type: ignore[call-overload] + + +# softplus is implemented specially because it has beta and threshold arguments +@register_decomposition(aten.softplus) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def softplus( + a: TensorLikeType, + beta: Optional[NumberType] = None, + threshold: NumberType = 20, + inplace: bool = False, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.softplus + """ + + if inplace: + raise NotImplementedError + + rhs: TensorLikeType + if beta is not None: + python_type = utils.dtype_to_type(a.dtype) + if not utils.is_weakly_lesser_type(type(beta), python_type): + msg = f"beta argument of type {type(beta)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + scaled_input = a * beta + rhs = torch.true_divide(torch.log1p(torch.exp(scaled_input)), beta) # type: ignore[arg-type] + + else: + scaled_input = a + rhs = torch.log1p(torch.exp(scaled_input)) + + return torch.where(scaled_input > threshold, a, rhs) + + +@aten.hardshrink.default.py_impl(DispatchKey.Autograd) +@register_decomposition(aten.hardshrink) +@out_wrapper() +def hardshrink(a: TensorLikeType, lambd: float = 0.5): + # Formula for reference, + # hardshrink(x) = x if x > lambd + # = x if x < -lambd + # = 0 otherwise + return torch.where(torch.abs(a) <= lambd, 0, a) + + +@aten.softshrink.default.py_impl(DispatchKey.Autograd) +@register_decomposition(aten.softshrink) +@out_wrapper() +def softshrink(a: TensorLikeType, lambd: float = 0.5): + # Formula for reference, + # softshrink(x) = x - lambd if x > lambd + # = x + lambd if x < -lambd + # = 0 otherwise + torch._check( + lambd >= 0, + lambda: f"lambda must be greater or equal to 0, but found to be {lambd}", + ) + # We implement this in one torch.where to generate better code in the backward + # see https://github.com/pytorch/pytorch/pull/107052#discussion_r1293748211 + return torch.where(torch.abs(a) > lambd, a - torch.sign(a) * lambd, 0) + + +# Losses +def _reduction_int_to_str(reduction: int) -> str: + from torch._decomp.decompositions import Reduction + + if reduction == Reduction.NONE.value: + return "none" + elif reduction == Reduction.MEAN.value: + return "mean" + elif reduction == Reduction.SUM.value: + return "sum" + else: + raise ValueError(f"{reduction} is not a valid value for reduction") + + +def _apply_loss_reduction(loss: TensorLikeType, reduction: str) -> TensorLikeType: + if reduction == "sum": + return torch.sum(loss) + elif reduction == "mean": + return torch.mean(loss) + else: # reduction == "none" + return loss + + +def _check_reduction_value(reduction: str): + if reduction not in ("mean", "sum", "none"): + raise ValueError(f"{reduction} is not a valid value for reduction") + + +# This helper function maps depreciated arguments, "size_average" and "reduce" +# to their corresponding "reduction" string argument +def _get_string_reduction_arg( + *, size_average: Optional[bool], reduce: Optional[bool] +) -> str: + if size_average is None: + size_average = True + if reduce is None: + reduce = True + if size_average and reduce: + ret = "mean" + elif reduce: + ret = "sum" + else: + ret = "none" + return ret + + +# CompositeImplicitAutograd - don't register decomp +@elementwise_type_promotion_wrapper( + type_promoting_args=("input", "target"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT, +) +def l1_loss( + input: TensorLikeType, + target: TensorLikeType, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.l1_loss + """ + if size_average is not None or reduce is not None: + # TODO: Raise exception instead of converting value. This is only for + # primTorch since it can drop support for deprecated arguments. + # msg = "size_average and reduce args are deprecated, please use reduction argument." + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + _check_reduction_value(reduction) + loss = torch.abs(input - target) + return _apply_loss_reduction(loss, reduction) + + +@elementwise_type_promotion_wrapper( + type_promoting_args=("input", "target"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT, +) +def smooth_l1_loss( + input: TensorLikeType, + target: TensorLikeType, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", + beta: float = 1.0, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.smooth_l1_loss + """ + if size_average is not None or reduce is not None: + # TODO: Raise exception instead of converting value. This is only for + # primTorch since it can drop support for deprecated arguments. + # msg = "size_average and reduce args are deprecated, please use reduction argument." + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + _check_reduction_value(reduction) + + if beta == 0.0: + return torch.nn.functional.l1_loss( + input, target, size_average=size_average, reduce=reduce, reduction=reduction + ) + else: + loss = torch.abs(input - target) + loss = torch.where(loss < beta, 0.5 * loss**2 / beta, loss - 0.5 * beta) + return _apply_loss_reduction(loss, reduction) + + +# Forwarding alias: the functional variant doesn't support the out kwarg +# CompositeImplicitAutograd - don't register decomp +def log_softmax( + a: TensorLikeType, + dim: Optional[int] = None, + _stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True) + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + # The error is for compat with regular PyTorch, which has this behavior + # deprecated. For PrimTorch, it's fine to drop support for deprecated + # behavior because it requires explicit opt in. This error is to inform + # users how to update their calls. + torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X") + return torch.log_softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload] + + +@register_decomposition(aten.margin_ranking_loss) +def margin_ranking_loss( + input1: TensorLikeType, + input2: TensorLikeType, + target: TensorLikeType, + margin: float = 0.0, + reduction: str = "mean", +) -> TensorLikeType: + # loss_without_reduction = max(0, −target * (input1 − input2) + margin) + if input1.ndim != input2.ndim or input1.ndim != target.ndim: + raise RuntimeError( + "margin_ranking_loss : All input tensors should have same dimension but got sizes: " + f"input1: {input1.shape}, input2: {input2.shape}, target: {target.shape} " + ) + _check_reduction_value(reduction) + loss = torch.clamp_min(-target * (input1 - input2) + margin, 0) + return _apply_loss_reduction(loss, reduction) + + +@elementwise_type_promotion_wrapper( + type_promoting_args=("input", "target"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT, +) +def mse_loss( + input: TensorLikeType, + target: TensorLikeType, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> TensorLikeType: + if size_average is not None or reduce is not None: + # TODO: Raise exception instead of converting value. This is only for + # primTorch since it can drop support for deprecated arguments. + # msg = "size_average and reduce args are deprecated, please use reduction argument." + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + _check_reduction_value(reduction) + loss = torch.pow(input - target, 2) + return _apply_loss_reduction(loss, reduction) + + +@register_decomposition(aten.hinge_embedding_loss) +def hinge_embedding_loss( + input: TensorLikeType, + target: TensorLikeType, + margin: float = 1.0, + reduction: str = "mean", +) -> TensorLikeType: + # loss_without_reduction = input if y == 1 + # = max(0, margin - input) if y == -1 + _check_reduction_value(reduction) + margin_clamp = torch.clamp_min(margin - input, 0) + output_margin = torch.where(target != 1, margin_clamp, 0) + output_self = torch.where(target != -1, input, 0) + loss = output_margin + output_self + return _apply_loss_reduction(loss, reduction) + + +def _nll_loss_nd( + input: TensorLikeType, + target: TensorLikeType, + weight: Optional[TensorLikeType], + reduction: str, + ignore_index: int, +) -> TensorLikeType: + torch._check( + input.ndim > 0 and input.ndim <= 3, + lambda: f"Expected input dimension to be either [1, 2, 3] but received {input.ndim}.", + ) + + torch._check( + (input.ndim == 1) or (input.shape[0] == target.shape[0]), + lambda: f"Expected input batch size {input.shape[0]} to match target batch size {target.shape[0]}.", + ) + + _check_reduction_value(reduction) + + flat_target = torch.flatten(target) + ignore_classes_mask = torch.eq(flat_target, ignore_index) + + # TODO: Enable data-dependent checks with debug mode + # TODO: This check does not work with FakeTensor inputs; See Issue #85834 + # Explicit cast for class_check to bool; See Issue #78071 + """ + from torch._subclasses.fake_tensor import FakeTensor + num_classes = input.shape[1] if input.ndim > 1 else input.shape[0] + valid_classes_mask = torch.logical_and( + (flat_target >= 0), (flat_target < num_classes) + ) + class_check = torch.all(torch.logical_or(ignore_classes_mask, valid_classes_mask)) + torch._check( + isinstance(target, FakeTensor) or bool(class_check.item()), + lambda: "A target class is out-of-bounds and not the ignore index.", + ) + """ + + ignore_class_weight = torch.scalar_tensor(0, dtype=input.dtype, device=input.device) + class_weight = ( + torch.scalar_tensor(1, dtype=input.dtype, device=input.device) + if weight is None + else weight[flat_target] + ) + current_weight = torch.where( + ignore_classes_mask, + ignore_class_weight, + class_weight, + ) + + if input.ndim == 1: + # implicit batch size = 1 + # input (1 batch size, C classes) + loss = -input[target] * current_weight + elif input.ndim == 2: + # input (N batch size, C classes) + batch_size = input.shape[0] + loss = -input[torch.arange(batch_size), target] * current_weight + else: + # 3D case (N batch size, C classe, K dimensions) + # input (N batch size, C classes, K) + batch_size = input.shape[0] + extent = input.shape[2] + numel = batch_size * extent + indices = torch.arange(numel) + bdx = indices // extent + kdx = indices % extent + loss = -input[bdx, flat_target, kdx] * current_weight + loss = torch.reshape(loss, target.shape) + + if reduction == "none": + return loss + elif reduction == "sum": + return torch.sum(loss) + else: + # calculate weighted mean of the loss function + return torch.sum(loss) / torch.sum(current_weight) + + +@register_decomposition(aten.nll_loss) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("input",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def nll_loss( + input: TensorLikeType, + target: TensorLikeType, + weight: Optional[TensorLikeType] = None, + size_average: Optional[bool] = None, + ignore_index: int = -100, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.nll_loss + """ + torch._check( + input.ndim > 0, + lambda: f"Expected input tensor to have 1 or more dimensions (got {input.ndim})", + ) + + # TODO: raise exception instead of converting value + # msg = "size_average and reduce args are deprecated, please use reduction argument." + # Convert these options for consistency with the eager mode + if size_average is not None or reduce is not None: + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + + # The expected behavior when the target and input have zero elements: + # reduction = 'none' --- tensor([]) + # reduction = 'sum' --- tensor(0.) + # reduction = 'mean' --- tensor(nan) + # Mean reduction on empty tensors produces NaN. See the discussion in + # https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162 + if input.numel() == 0 and target.numel() == 0: + if reduction == "none": + return torch.zeros_like(target) + elif reduction == "sum": + return torch.empty_like(target) + else: + return torch.full_like(target, float("nan")) + + # The _nll_loss_nd helper function handles the most common cases. + # ndim == 1 (Single Example) + # => Batch Size: 1, Input: (C), Target: () + # ndim == 2 (k = 1) + # => Batch Size: N, Input: (N, C), Target: (N) + # ndim == 3 (k > 1) + # => Batch Size: N, Input: (N, C, K), Target: (N, K) + if input.ndim <= 3: + return _nll_loss_nd(input, target, weight, reduction, ignore_index) + + # For ndim > 3, we reshape the input and target to 3-D case. + # Input (N batch-size, C classes, k-dimensions) + # Target (N batch-size, k-dimensions) + torch._check( + input.ndim > 0 and target.ndim > 0 and target.shape[1:] == input.shape[2:], + lambda: ( + "Expected input and target to both have ndim > 0 and " + "target.shape[1:] == input.shape[2:], but got " + f"target.shape {target.shape} and input.shape {input.shape}" + ), + ) + + batch_size = input.shape[0] + num_classes = input.shape[1] + out_size = [batch_size] + list(target.shape[1:]) + + input = torch.reshape(input, [batch_size, num_classes, -1]) + target = torch.reshape(target, [batch_size, -1]) + if reduction != "none": + return _nll_loss_nd(input, target, weight, reduction, ignore_index) + else: + result = _nll_loss_nd(input, target, weight, reduction, ignore_index) + # reshape flattened inner-dim to original k-dimensions + return torch.reshape(result, out_size) + + +# TODO: This ref supports int reduction and out kwarg to be compatible with ATen: +# https://github.com/pytorch/pytorch/issues/83931 +# TODO: Could be rewritten to support complex: +# https://github.com/pytorch/pytorch/pull/85041 +@register_decomposition(aten.huber_loss) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("input", "target"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def huber_loss( + input: TensorLikeType, + target: TensorLikeType, + reduction: Union[str, int] = "mean", + delta: float = 1.0, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.huber_loss + """ + if type(reduction) is int: + reduction = _reduction_int_to_str(reduction) + _check_reduction_value(reduction) # type: ignore[arg-type] + torch._check( + delta > 0, + lambda: "huber_loss does not support non-positive values for delta.", + ) + z = (input - target).abs() + loss = torch.where(z < delta, 0.5 * z * z, delta * (z - 0.5 * delta)) + return _apply_loss_reduction(loss, reduction) # type: ignore[arg-type] + + +# tanhshrink does not use _make_elementwise_unary_reference because it does not support out +@elementwise_unary_scalar_wrapper +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def tanhshrink(a: TensorLikeType) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.tanhshrink + """ + if not isinstance(a, TensorLike): + raise RuntimeError( + "Expected a tensor input for an elementwise unary operation!" + ) + return a - torch.tanh(a) + + +@register_decomposition(aten.threshold) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def threshold( + a: TensorLikeType, + threshold: NumberType, + value: Union[bool, int, float], + inplace: bool = False, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.threshold + """ + + if inplace: + raise NotImplementedError + + return torch.where(a <= threshold, value, a) + + +# CompositeImplicitAutograd - don't register decomp +# No elementwise type promotion - core op doesn't explicitly type promote +def triplet_margin_loss( + anchor: TensorLikeType, + positive: TensorLikeType, + negative: TensorLikeType, + margin: float = 1.0, + p: float = 2, + eps: float = 1e-6, + swap: bool = False, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> TensorLikeType: + if size_average is not None or reduce is not None: + # TODO: Raise exception instead of converting value. This is only for + # primTorch since it can drop support for deprecated arguments. + # msg = "size_average and reduce args are deprecated, please use reduction argument." + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + + # torch.nn.functional.triplet_margin_with_distance_loss has no ref defined + # since it's a pure Python implementation. Use this helper instead. + return _triplet_margin_with_distance_loss( + anchor=anchor, + positive=positive, + negative=negative, + distance_function=lambda x, y: torch.pairwise_distance(x, y, p, eps), + margin=margin, + swap=swap, + reduction=reduction, + ) + + +# Pure Python impl - don't register decomp and don't add a ref. Defined as a +# helper here since triplet_margin_loss can be nicely implemented with it. +def _triplet_margin_with_distance_loss( + anchor: TensorLikeType, + positive: TensorLikeType, + negative: TensorLikeType, + *, + distance_function: Optional[ + Callable[[TensorLikeType, TensorLikeType], TensorLikeType] + ] = None, + margin: float = 1.0, + swap: bool = False, + reduction: str = "mean", +) -> TensorLikeType: + _check_reduction_value(reduction) + + a_dim = anchor.ndim + p_dim = positive.ndim + n_dim = negative.ndim + torch._check( + a_dim == p_dim and p_dim == n_dim, + lambda: ( + f"The anchor, positive, and negative tensors are expected to have " + f"the same number of dimensions, but got: anchor {a_dim}D, " + f"positive {p_dim}D, and negative {n_dim}D inputs" + ), + ) + + if distance_function is None: + distance_function = torch.pairwise_distance + + dist_pos = distance_function(anchor, positive) + dist_neg = distance_function(anchor, negative) + # The distance swap is described in the paper "Learning shallow + # convolutional feature descriptors with triplet losses" by V. Balntas, E. + # Riba et al. If True, and if the positive example is closer to the + # negative example than the anchor is, swaps the positive example and the + # anchor in the loss computation. + if swap: + dist_swap = distance_function(positive, negative) + dist_neg = torch.minimum(dist_neg, dist_swap) + loss = torch.clamp_min(margin + dist_pos - dist_neg, 0) + return _apply_loss_reduction(loss, reduction) + + +@register_decomposition(aten.hardtanh) +@_inplace_wrapper +@out_wrapper() +@elementwise_unary_scalar_wrapper +@elementwise_type_promotion_wrapper( + type_promoting_args=("a"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def hardtanh( + a: TensorLikeType, + min_val: NumberType = -1, + max_val: NumberType = 1, + inplace: bool = False, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.hardtanh + """ + if inplace: + raise NotImplementedError + if utils.is_boolean_dtype(a.dtype): + raise RuntimeError("Bool inputs not supported for hardtanh") + + # preserve legacy behavior of boundaries not causing type promotion + if utils.is_integer_dtype(a.dtype): + min_val = int(min_val) # type: ignore[arg-type] + max_val = int(max_val) # type: ignore[arg-type] + if not (a.dtype != torch.uint8 or (min_val >= 0 and max_val >= 0)): + raise RuntimeError( + "Cannot do hardtanh on an unsigned type with negative limits" + ) + return torch.clamp(a, min_val, max_val) # type: ignore[arg-type] + + +@register_decomposition(aten.gelu) +@out_wrapper() +@elementwise_unary_scalar_wrapper +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def gelu(a: TensorLikeType, approximate: str = "none") -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.gelu + """ + if not isinstance(a, TensorLike): + raise RuntimeError( + "Expected a tensor input for an elementwise unary operation!" + ) + M_SQRT2 = 1.41421356237309504880 + M_SQRT1_2 = 0.70710678118654752440 + M_2_SQRTPI = 1.12837916709551257390 + if approximate == "tanh": + kBeta = M_SQRT2 * M_2_SQRTPI * 0.5 + kKappa = 0.044715 + a_cube = a * a * a + inner = kBeta * (a + kKappa * a_cube) + return 0.5 * a * (1 + torch.tanh(inner)) + elif approximate == "none": + kAlpha = M_SQRT1_2 + return a * 0.5 * (1 + torch.erf(a * kAlpha)) + else: + raise RuntimeError("approximate argument must be either none or tanh.") + + +# CompositeImplicitAutograd - don't register decomp +@elementwise_type_promotion_wrapper( + type_promoting_args=("input", "target"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def poisson_nll_loss( + input: TensorLikeType, + target: TensorLikeType, + log_input: bool = True, + full: bool = False, + size_average: Optional[bool] = None, + eps: float = 1e-8, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.poisson_nll_loss + """ + if size_average is not None or reduce is not None: + # TODO: Raise exception instead of converting value. This is only for + # primTorch since it can drop support for deprecated arguments. + # msg = "size_average and reduce args are deprecated, please use reduction argument." + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + _check_reduction_value(reduction) + if log_input: + loss = torch.exp(input) - target * input + else: + loss = input - target * torch.log(input + eps) + + if full: + stirling_term = ( + target * torch.log(target) - target + 0.5 * torch.log(2 * torch.pi * target) + ) + # avoid inplace add + loss = loss + stirling_term.masked_fill(target <= 1, 0) + return _apply_loss_reduction(loss, reduction) + + +@register_decomposition(aten.prelu) +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "weight"), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def prelu(a: TensorLikeType, weight: TensorLikeType) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.prelu + """ + torch._check( + isinstance(a, TensorLike), + lambda: f"prelu: Expected `a` to be tensor, but got: {type(a)}", + ) + torch._check( + isinstance(weight, TensorLike), + lambda: f"prelu: Expected `weight` to be tensor, but got: {type(weight)}", + ) + + if weight.numel() != 1: + torch._check(a.ndim > 0, lambda: "Not allow zero-dim input tensor.") + channel_size = a.shape[1] if a.ndim >= 2 else 1 + torch._check( + weight.numel() == channel_size, + lambda: f"Mismatch of parameter numbers and input channel size. Found parameter numbers =" + f" {weight.numel()} and channel size = {channel_size}.", + ) + + torch._check( + weight.ndim == 0 or weight.ndim == 1, + lambda: f"prelu: Expected `weight` to be a scalar or 1D tensor, but got: " + f"ndim = {weight.ndim}", + ) + if a.ndim == 0: + weight = weight[0] if weight.ndim == 1 else weight + else: + weight = prims.broadcast_in_dim( + weight, a.shape, tuple() if weight.ndim == 0 else (0 if a.ndim == 1 else 1,) + ) + + return torch.where(a > 0, a, a * weight) + + +@register_decomposition(aten.relu6) +@_inplace_wrapper +@out_wrapper() +def relu6(a: TensorLikeType, inplace: bool = False) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.relu6 + """ + if inplace: + raise NotImplementedError + + # See https://github.com/pytorch/pytorch/pull/81142#discussion_r918220126 + # It may be better to use clamp here, but we use hardtanh to replicate + # the behavior of the existing implementation + return torch.nn.functional.hardtanh(a, 0, 6) + + +@register_decomposition(aten.glu) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def glu(a: TensorLikeType, dim: int = -1) -> TensorLikeType: + dim = utils.canonicalize_dims(a.ndim, dim) + torch._check( + a.shape[dim] % 2 == 0, + lambda: f"Halving dimension must be even, but dimension {dim} is size {a.shape[dim]}", + ) + b, c = torch.tensor_split(a, 2, dim) + + return b * torch.sigmoid(c) + + +@register_decomposition(aten.pairwise_distance) +@out_wrapper() +def pairwise_distance( + x1: TensorLikeType, + x2: TensorLikeType, + p: NumberType = 2.0, + eps: NumberType = 1e-6, + keepdim=False, +) -> TensorLikeType: + return torch.linalg.vector_norm(x1 - x2 + eps, ord=p, dim=-1, keepdim=keepdim) + + +@register_decomposition(aten.pdist) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def pdist(a: TensorLikeType, p: float = 2) -> TensorLikeType: + torch._check(a.ndim == 2, lambda: f"pdist only supports 2D tensors, got: {a.ndim}D") + torch._check(p >= 0, lambda: "pdist only supports non-negative p values") + # For p == 2 we can use an efficient implementation, but other values of p + # require creating a much bigger tensor for an intermediate step + if p == 2: + aTa = torch.mm(a, a.T) + aTa_diag = torch.diag(aTa) + t = torch.sqrt(torch.clamp(aTa_diag + aTa_diag.unsqueeze(-1) - 2 * aTa, min=0)) + else: + t = torch.linalg.vector_norm(a.unsqueeze(1) - a, ord=p, dim=2) + i = torch.triu_indices(t.shape[0], t.shape[1], offset=1, device=a.device) + return t.flatten().index_select(0, i[0] * t.shape[0] + i[1]) + + +@register_decomposition(aten.pixel_shuffle) +@out_wrapper() +def pixel_shuffle(self: Tensor, upscale_factor: int): + torch._check( + self.dim() >= 3, + lambda: f"pixel_shuffle expects input to have at least 3 dimensions, but got input with {self.dim} dimension(s)", + ) + batch = self.shape[:-3] + C_out = self.shape[-3] // upscale_factor**2 + HW_out = (self.shape[-2] * upscale_factor, self.shape[-1] * upscale_factor) + n = len(batch) + B_dims = range(n) + C_dim, r1_dim, r2_dim, H_dim, W_dim = range(n, n + 5) + return ( + self.view( + *batch, + C_out, + upscale_factor, + upscale_factor, + self.shape[-2], + self.shape[-1], + ) + .permute(*B_dims, C_dim, H_dim, r1_dim, W_dim, r2_dim) + .reshape(*batch, C_out, *HW_out) + .clone(memory_format=utils.suggest_memory_format(self)) + ) + + +@register_decomposition(aten.pixel_unshuffle) +@out_wrapper() +def pixel_unshuffle(self: Tensor, downscale_factor: int): + torch._check( + self.dim() >= 3, + lambda: f"pixel_unshuffle expects input to have at least 3 dimensions, but got input with {self.dim} dimension(s)", + ) + batch = self.shape[:-3] + C_out = self.shape[-3] * downscale_factor**2 + HW_out = (self.shape[-2] // downscale_factor, self.shape[-1] // downscale_factor) + n = len(batch) + B_dims = range(n) + C_dim, H_dim, r1_dim, W_dim, r2_dim = range(n, n + 5) + return ( + self.view( + *batch, + self.shape[-3], + HW_out[0], + downscale_factor, + HW_out[1], + downscale_factor, + ) + .permute(*B_dims, C_dim, r1_dim, r2_dim, H_dim, W_dim) + .reshape(*batch, C_out, *HW_out) + .clone(memory_format=utils.suggest_memory_format(self)) + ) + + +# Needed as aten.{celu_,elu_...} exist (even if they don't have the in-place kwarg) +celu_ = _make_inplace(celu) +elu_ = _make_inplace(elu) +mish_ = _make_inplace(mish) +selu_ = _make_inplace(selu) +threshold_ = _make_inplace(threshold) diff --git a/venv/lib/python3.10/site-packages/torch/_refs/nn/functional/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_refs/nn/functional/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0f24732831b49db3e5cf2217c459d32170c1361 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_refs/nn/functional/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_refs/special/__init__.py b/venv/lib/python3.10/site-packages/torch/_refs/special/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..048de83506d2919fd858e871290871bb0f558289 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_refs/special/__init__.py @@ -0,0 +1,236 @@ +import math +from typing import Optional, Union + +import torch +import torch._prims as prims +import torch._prims_common as utils +import torch._refs as refs + +from torch import Tensor +from torch._decomp import register_decomposition +from torch._prims_common import ( + ELEMENTWISE_TYPE_PROMOTION_KIND, + Number, + NumberType, + TensorLike, + TensorLikeType, +) +from torch._prims_common.wrappers import elementwise_type_promotion_wrapper, out_wrapper +from torch._refs import ( + _make_alias, + _make_elementwise_binary_reference, + _make_elementwise_unary_reference, +) + + +__all__ = [ + "bessel_j0", + "bessel_j1", + "entr", + "erfcx", + "expit", + "i0e", + "i1", + "i1e", + "log_ndtr", + "logit", + "log_softmax", + "multigammaln", + "ndtr", + "ndtri", + "softmax", + "spherical_bessel_j0", + "xlog1py", + "zeta", +] +aten = torch._ops.ops.aten + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def bessel_j0(a: TensorLikeType) -> TensorLikeType: + return prims.bessel_j0(a) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def bessel_j1(a: TensorLikeType) -> TensorLikeType: + return prims.bessel_j1(a) + + +@register_decomposition(aten.special_entr) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def entr(a: TensorLikeType) -> TensorLikeType: + return torch.where( + torch.isnan(a), + a, + torch.where(a > 0, -a * torch.log(a), torch.where(a == 0, 0, -torch.inf)), + ) + + +@register_decomposition(aten.special_erfcx) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def erfcx(a: TensorLikeType) -> TensorLikeType: + return prims.erfcx(a) + + +# alias for sigmoid +expit = _make_alias(torch.sigmoid, "expit") + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def i0e(a: TensorLikeType) -> TensorLikeType: + return prims.bessel_i0e(a) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def i1(a: TensorLikeType) -> TensorLikeType: + return prims.bessel_i1(a) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def i1e(a: TensorLikeType) -> TensorLikeType: + return prims.bessel_i1e(a) + + +@register_decomposition(aten.special_log_ndtr) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def log_ndtr(a: TensorLikeType) -> TensorLikeType: + # Note: M_SQRT1_2 is the value of 1 / √2 + M_SQRT1_2 = 0.707106781186547524400844362104849039 + t = a * M_SQRT1_2 + return torch.where( + a < 1.0, + torch.log(torch.special.erfcx(-t) / 2) - t * t, + torch.log1p(-torch.erfc(t) / 2), + ) + + +@register_decomposition(aten.logit) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def logit(self: TensorLikeType, eps: Optional[float] = None) -> TensorLikeType: + if eps is None: + eps = -1.0 + lo = eps + hi = 1 - eps + self = torch.clamp(self, lo, hi) + return torch.log(torch.true_divide(self, torch.sub(1, self))) + + +@register_decomposition(aten.special_xlog1py) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def xlog1py(a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType]): + torch._check( + isinstance(a, TensorLike) or isinstance(b, TensorLike), + lambda: 'Expected either argument a or b to be a Tensor"', + ) + + # Operations like eq and log do not handle scalar values, so we convert them to scalar_tensors. + if isinstance(a, TensorLike) and isinstance(b, Number): + b = refs.scalar_tensor(b, dtype=a.dtype, device=a.device) + elif isinstance(b, TensorLike) and isinstance(a, Number): + a = refs.scalar_tensor(a, dtype=b.dtype, device=b.device) + + # mypy: expected "Tensor" + assert isinstance(a, TensorLike) + assert isinstance(b, TensorLike) + rhs = torch.where(torch.eq(a, 0), 0, torch.mul(a, torch.log1p(b))) + return torch.where(torch.isnan(b), float("nan"), rhs) + + +@register_decomposition(aten.mvlgamma) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def multigammaln(a: TensorLikeType, p: int) -> TensorLikeType: + c = 0.25 * p * (p - 1) * math.log(math.pi) + b = 0.5 * torch.arange(start=(1 - p), end=1, step=1, dtype=a.dtype, device=a.device) + return torch.sum(torch.lgamma(a.unsqueeze(-1) + b), dim=-1) + c + + +@register_decomposition(aten.special_ndtr) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def ndtr(a: TensorLikeType) -> TensorLikeType: + # Note: M_SQRT1_2 is the value of 1 / √2 + M_SQRT1_2 = 0.707106781186547524400844362104849039 + a_sqrt_2 = a * M_SQRT1_2 + return (1 + torch.erf(a_sqrt_2)) * 0.5 + + +@register_decomposition(aten.special_ndtri) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def ndtri(a: TensorLikeType) -> TensorLikeType: + return prims.ndtri(a) + + +# Forwarding alias: the special variant doesn't support the out kwarg +# CompositeImplicitAutograd - don't register decomp +def log_softmax( + a: TensorLikeType, + dim: int, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + return torch.log_softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload] + + +# Forwarding alias: the special variant doesn't support the out kwarg +# CompositeImplicitAutograd - don't register decomp +def softmax( + a: TensorLikeType, + dim: int, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + return torch.softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload] + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def spherical_bessel_j0(a: TensorLikeType) -> TensorLikeType: + return prims.spherical_bessel_j0(a) + + +# TODO: add docstring +@_make_elementwise_binary_reference( + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def zeta(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.zeta(a, b) diff --git a/venv/lib/python3.10/site-packages/torch/_refs/special/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_refs/special/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bcf7e47d923d6123599f6786994db81760136fd Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_refs/special/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_subclasses/__init__.py b/venv/lib/python3.10/site-packages/torch/_subclasses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bc7a7fed7512d4fb37b2a85fd41b862c65b57a03 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_subclasses/__init__.py @@ -0,0 +1,18 @@ +import torch + +from torch._subclasses.fake_tensor import ( + DynamicOutputShapeException, + FakeTensor, + FakeTensorMode, + UnsupportedFakeTensorException, +) + +from torch._subclasses.fake_utils import CrossRefFakeMode + +__all__ = [ + "FakeTensor", + "FakeTensorMode", + "UnsupportedFakeTensorException", + "DynamicOutputShapeException", + "CrossRefFakeMode", +] diff --git a/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a3c47d98674916f8dc0c18bf0512fd2ef22ecd1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_impls.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_impls.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb5f7f56094216314735960c606971e6e33fcacc Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_impls.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_tensor.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe6b786c4d952c5f136db83327ed30ac234ca4df Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_tensor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12027ebcdf56dad4568e850bbbce114f94c48cda Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/functional_tensor.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/functional_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26eab1368b576a27a8c2993d472896c9c54f40b2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/functional_tensor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/meta_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/meta_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32c1e226dc18169b0179c9a435832ffaaac7e5d1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/meta_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/schema_check_mode.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/schema_check_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..338e877026f4dc4a23148d1b65781dbb8a1a49ec Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/schema_check_mode.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_subclasses/fake_impls.py b/venv/lib/python3.10/site-packages/torch/_subclasses/fake_impls.py new file mode 100644 index 0000000000000000000000000000000000000000..0b12de2db80405865e26e49b2a2f4f2b41df8ea5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_subclasses/fake_impls.py @@ -0,0 +1,1061 @@ +# mypy: ignore-errors + +import functools +import itertools +import math +import sys +from typing import Callable, Union + +import torch +import torch._custom_op +import torch._logging + +from torch._ops import OpOverload +from torch._prims_common import ( + elementwise_dtypes, + ELEMENTWISE_TYPE_PROMOTION_KIND, + is_boolean_dtype, + is_float_dtype, + is_integer_dtype, +) + +from torch._subclasses.fake_tensor import ( + DataDependentOutputException, + DynamicOutputShapeException, + FakeTensor, + in_kernel_invocation_manager, + run_fallback_kernel, + UnsupportedOperatorException, +) +from torch.fx.operator_schemas import normalize_function + +from torch.utils._stats import count_label + +pytree = torch.utils._pytree + +__all__ = [ + "op_implementations_checks", + "get_fast_op_impls", + "stride_incorrect_op", + "has_meta", +] + +op_implementations_dict = {} +op_implementations_checks = [] + + +aten = torch._ops.ops.aten + + +def ordered_set(*items): + return dict.fromkeys(items, True) + + +# This function indicates if the backend device +# supports non-contiguous tensors +def is_noncontiguous_supported(device): + if device.type == "hpu": + return False + return True + + +_like_tensor_constructors = ordered_set( + aten.empty_like.default, + aten.empty_like.out, + aten.full_like.default, + aten.full_like.out, + aten.ones_like.default, + aten.ones_like.out, + aten.rand_like.default, + aten.rand_like.out, + aten.randn_like.default, + aten.randn_like.out, + aten.randint_like.default, + aten.randint_like.out, + aten.randint_like.low_dtype, + aten.randint_like.low_dtype_out, + aten.zeros_like.default, + aten.zeros_like.out, + aten.new_empty.default, + aten.new_empty.out, + aten.new_empty_strided.default, + aten.new_empty_strided.out, + aten.new_full.default, + aten.new_full.out, + aten.new_zeros.default, + aten.new_zeros.out, + aten.new_ones.default, + aten.new_ones.out, +) + + +_device_not_kwarg_ops = ordered_set( + aten._resize_output_.default, + aten._nested_tensor_from_tensor_list.default, + aten._nested_tensor_from_tensor_list.out, + aten.pin_memory.default, + aten.is_pinned.default, + aten.to.device, + aten.to.prim_Device, + aten._pin_memory.default, + aten._pin_memory.out, + aten._resize_output.default, + aten._resize_output.out, +) + +# this op is never actually used +_non_kwarg_device_constructors = (aten._list_to_tensor,) + + +def contains_tensor_types(type): + tensor_type = torch._C.TensorType.get() + return type.isSubtypeOf(tensor_type) or any( + contains_tensor_types(e) for e in type.containedTypes() + ) + + +@functools.lru_cache(None) +def _is_tensor_constructor(func: OpOverload): + assert isinstance(func, OpOverload) + schema = func._schema + if any(contains_tensor_types(arg.type) for arg in schema.arguments): + return False + # TODO: no real reason to restrict multiple outputs + return ( + len(schema.returns) == 1 and schema.returns[0].type is torch._C.TensorType.get() + ) + + +def register_op_impl(run_impl_check: Union[Callable[[OpOverload], bool], OpOverload]): + def impl_decorator(op_impl): + if isinstance(run_impl_check, OpOverload): + assert ( + run_impl_check not in op_implementations_dict + ), f"duplicate registration: {run_impl_check}" + op_implementations_dict[run_impl_check] = op_impl + elif isinstance(run_impl_check, (list, tuple)): + for op in run_impl_check: + register_op_impl(op)(op_impl) + else: + assert callable(run_impl_check) + op_implementations_checks.append((run_impl_check, op_impl)) + + return op_impl + + return impl_decorator + + +@register_op_impl(op_implementations_dict.__contains__) +def dispatch_to_op_implementations_dict(fake_mode, func, *args, **kwargs): + return op_implementations_dict[func](fake_mode, func, *args, **kwargs) + + +@register_op_impl(_is_tensor_constructor) +@register_op_impl([*_like_tensor_constructors]) +def constructors(fake_mode, func, *args, **kwargs): + assert func not in _non_kwarg_device_constructors + _, new_kwargs = normalize_function( + func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True + ) + if "names" in kwargs: + raise UnsupportedOperatorException( + "torch.compile doesn't support named tensors" + ) + + if func in _like_tensor_constructors: + default_device = new_kwargs["input"].device + # TODO: file issue + args = (new_kwargs.pop("input"),) + else: + # cpu is default device if none is specified + default_device = torch.device("cpu") + args = () + out_device = new_kwargs.pop("device", None) + out_device = out_device if out_device is not None else default_device + new_kwargs["device"] = torch.device("meta") + # _like constructors have fake tensor inputs (maybe this causes the non-like + # to fail? hmmm) + with in_kernel_invocation_manager(fake_mode): + r = func(*args, **new_kwargs) + return FakeTensor(fake_mode, r, out_device) + + +@register_op_impl(aten.to.prim_Device) +@register_op_impl(aten.to.device) +def non_kwarg_to(fake_mode, func, *args, **kwargs): + _, new_kwargs = normalize_function( + func, args, kwargs, normalize_to_only_use_kwargs=True + ) + input_device = new_kwargs["device"] + out_device = input_device if input_device else new_kwargs["input"].device + new_kwargs["device"] = torch.device("meta") + inp = new_kwargs.pop("input") + with in_kernel_invocation_manager(fake_mode): + r = func(inp, **new_kwargs) + # TODO: I think this does the wrong thing if r is inp + return fake_mode.fake_tensor_converter.from_meta_and_device( + fake_mode, r, out_device + ) + + +def stride_incorrect_op(op): + if op.namespace not in ("aten", "prims"): + return False + if op is aten._fft_c2c.default: + return False + + op_name = op.name() + if "fft" in op_name: + return True + return False + + +# These operators have meta implementations with incorrect strides +@register_op_impl(stride_incorrect_op) +def wordaround_stride_incorrect_op(fake_mode, func, *args, **kwargs): + # This is a workaround for meta implmentations with incorrect strides + + def is_symbolic(x): + if isinstance(x, FakeTensor): + return x._has_symbolic_sizes_strides + if isinstance(x, (torch.SymInt, torch.SymFloat, torch.SymBool)): + return True + return False + + # For static shapes, we can fall back to eager for the real strides + if fake_mode.allow_fallback_kernels: + require_dynamic = any( + is_symbolic(x) for x in itertools.chain(args, kwargs.values()) + ) + if not require_dynamic: + flat_args, args_spec = pytree.tree_flatten((args, kwargs)) + return run_fallback_kernel(fake_mode, func, flat_args, args_spec, None) + + raise UnsupportedOperatorException(func) + + +# Dont default to default device handling, +# since the device of `the_template` is ignored +@register_op_impl(aten.resize_as_.default) +def resize_as_(fake_mode, func, *args, **kwargs): + with in_kernel_invocation_manager(fake_mode): + return func(*args, **kwargs) + + +@register_op_impl(aten._sparse_coo_tensor_with_dims_and_tensors.default) +def _sparse_coo_tensor_with_dims_and_tensors(fake_mode, func, *args, **kwargs): + # TODO: remove me + return constructors(fake_mode, func, *args, **kwargs) + + +# index.Tensor data-dependent in only some conditions +@register_op_impl( + lambda func: torch.Tag.dynamic_output_shape in func.tags + and func + not in [aten.index.Tensor, aten.nonzero.default, aten.repeat_interleave.Tensor] +) +def dyn_shape(fake_mode, func, *args, **kwargs): + raise DynamicOutputShapeException(func) + + +@register_op_impl(aten.repeat_interleave.Tensor) +def repeat_interleave_tensor(fake_mode, func, repeats, output_size=None): + if output_size is None: + if ( + fake_mode.shape_env is None + or not fake_mode.shape_env.allow_dynamic_output_shape_ops + ): + raise DynamicOutputShapeException(func) + + output_size = fake_mode.shape_env.create_unbacked_symint() + + # Avoid importing sympy at a module level + from torch.fx.experimental.symbolic_shapes import _constrain_range_for_size + + _constrain_range_for_size(output_size) + # TODO: consider a memo + return repeats.new_empty(output_size) + + +@register_op_impl(torch.ops.aten._local_scalar_dense.default) +def local_scalar_dense(fake_mode, func, arg): + if fake_mode.shape_env is None or not fake_mode.shape_env.allow_scalar_outputs: + # Without symints/symfloats, cannot handle this + raise DataDependentOutputException(func) + if is_float_dtype(arg.dtype): + return fake_mode.shape_env.create_unbacked_symfloat() + elif is_integer_dtype(arg.dtype): + return fake_mode.shape_env.create_unbacked_symint() + elif is_boolean_dtype(arg.dtype): + return fake_mode.shape_env.create_unbacked_symbool() + else: + raise NotImplementedError(f"local_scalar_dense/item NYI for {arg.dtype}") + + +@register_op_impl(torch.ops.aten.nonzero.default) +def nonzero(fake_mode, func, arg): + if ( + fake_mode.shape_env is None + or not fake_mode.shape_env.allow_dynamic_output_shape_ops + ): + # Without symints/symfloats, cannot handle this + raise DynamicOutputShapeException(func) + + if arg.nonzero_memo is None: + nnz = fake_mode.shape_env.create_unbacked_symint() + + # This is unsound, but it works well in practice + # See https://docs.google.com/document/d/1lFRYAJo5nrfxRhwIzGnfi2pbLpU6T4ytSRSuLJ5qebI/edit# + # TODO: Add a config knob to turn off this unsound behavior + # + # NB: If numel < 2, the bounds here might be COMPLETELY + # disjoint with what can actually occur. But this is fine: + # remember, the hypothesis is that if your later code works + # with N >= 2, it will work with N = 1 and N = 0. + maxval = sys.maxsize - 1 + + # Avoid importing sympy at a module level + from torch.fx.experimental.symbolic_shapes import ( + _constrain_range_for_size, + has_free_symbols, + ) + + if not has_free_symbols(arg.numel()): + # Don't upgrade the range if numel is less than two, since we then + # have an empty range which makes things go explodey. We also + # don't allow for 2 because that would specialize the unbacked + # SymInt to 2, which is also likely to be buggy. + if arg.numel() > 2: + maxval = int(arg.numel()) + + _constrain_range_for_size(nnz, max=maxval) + + arg._nonzero_memo = nnz + arg._nonzero_memo_vc = arg._version + + return arg.new_empty((arg.nonzero_memo, arg.dim()), dtype=torch.int64) + + +@register_op_impl(torch.ops.aten.masked_select.default) +def masked_select(fake_mode, func, self, mask): + if ( + fake_mode.shape_env is None + or not fake_mode.shape_env.allow_dynamic_output_shape_ops + ): + # Without symints/symfloats, cannot handle this + raise DynamicOutputShapeException(func) + + nnz = fake_mode.shape_env.create_unbacked_symint() + + # see nonzero for commentary + maxval = sys.maxsize - 1 + + # Avoid importing sympy at a module level + from torch.fx.experimental.symbolic_shapes import ( + _constrain_range_for_size, + has_free_symbols, + ) + + if not has_free_symbols(self.numel()): + if self.numel() > 2: + maxval = int(self.numel()) + + _constrain_range_for_size(nnz, max=maxval) + + return self.new_empty((nnz,)) + + +# NB: this must be ordered after local_scalar_dense +@register_op_impl(lambda func: torch.Tag.data_dependent_output in func.tags) +def data_dep(fake_mode, func, *args, **kwargs): + raise DataDependentOutputException(func) + + +# Bool Indices get Expanded as Masks +# See: IndexingUtils.h:expandTensors +def check_no_bool_index_tensors(func, self, indices): + for index in indices: + if index is not None and index.dtype in (torch.bool, torch.uint8): + raise DynamicOutputShapeException(func) + + +def run_and_return_new_tensor_of_input_device(fake_mode, func, args, kwargs): + _, new_kwargs = normalize_function( + func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True + ) + + out_device = new_kwargs["input"].device + with in_kernel_invocation_manager(fake_mode): + out = func(*args, **kwargs) + if not is_noncontiguous_supported(out_device): + out = out.new_empty(out.shape) + + if out is new_kwargs["input"]: + return out # copy_ + return FakeTensor(fake_mode, out, out_device) + + +_is_builtin_namespaces = ordered_set("aten", "prims", "prim") + + +def is_builtin(op): + return op.namespace in _is_builtin_namespaces + + +def has_meta(func): + return torch._C._dispatch_has_computed_kernel_for_dispatch_key(func.name(), "Meta") + + +@register_op_impl( + lambda func: is_builtin(func) and "foreach" in func.name() and has_meta(func) +) +def foreach_run_and_map_input_device(fake_mode, func, *args, **kwargs): + tensor_lists = [] + for arg in itertools.chain(args, kwargs.values()): + if ( + isinstance(arg, (list, tuple)) + and len(arg) + and isinstance(arg[0], torch.Tensor) + ): + tensor_lists.append(arg) + + try: + with in_kernel_invocation_manager(fake_mode): + out_meta = func(*args, **kwargs) + except NotImplementedError as not_implemented_error: + return NotImplemented + + if not out_meta: + return out_meta + + assert tensor_lists + out_fake = [] + + for i, meta_t in enumerate(out_meta): + device, _ = FakeTensor._find_common_device(func, [tl[i] for tl in tensor_lists]) + out_fake.append( + fake_mode.fake_tensor_converter.from_meta_and_device( + fake_mode, meta_t, device + ) + ) + + return out_fake + + +# Dont default to default device handling, +# Since op can take in non-zero sized cpu +# index tensors with cuda self +@register_op_impl(aten.index.Tensor) +def index_tensor(fake_mode, func, *args, **kwargs): + from torch._meta_registrations import meta_index_Tensor + + _, new_kwargs = normalize_function( + func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True + ) + + out_device = new_kwargs["input"].device + # ensure nonzero call goes to fake tensor + with fake_mode: + out = meta_index_Tensor(*args, **kwargs) + return out.to(out_device) + + +# Can take mixed meta/non-meta arguments; the meta registration +# will roughly do the right thing even when given real devices +@register_op_impl(aten._embedding_bag.default) +def embedding_bag(fake_mode, func, *args, **kwargs): + from torch._meta_registrations import meta_embedding_bag + + with fake_mode: + return meta_embedding_bag(*args, **kwargs) + + +# takes in multiple-devices, dont default to default device handling +@register_op_impl(aten._unsafe_index_put.default) +@register_op_impl(aten.copy.default) +@register_op_impl(aten.copy_.default) +@register_op_impl(aten.slice_scatter.default) +def multi_device_op_default(fake_mode, func, *args, **kwargs): + return run_and_return_new_tensor_of_input_device(fake_mode, func, args, kwargs) + + +# same with multi_device_op_default, but return the input +@register_op_impl(aten.copy.out) +@register_op_impl(aten.slice_scatter.out) +def multi_device_op_out(fake_mode, func, *args, **kwargs): + with in_kernel_invocation_manager(fake_mode): + out = func(*args, **kwargs) + + _, new_kwargs = normalize_function( + func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True + ) + + return new_kwargs["input"] + + +@register_op_impl(aten.index_put.default) +@register_op_impl(aten.index_put_.default) +def index_put_impl(fake_mode, func, *args, **kwargs): + _, new_kwargs = normalize_function( + func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True + ) + + values = new_kwargs["values"] + self_device = new_kwargs["input"].fake_device + torch._check( + self_device == values.fake_device or (values.ndim == 0 and values.numel() == 1), + lambda: f"Mismatching {func} device between self ({self_device}) and values ({values.device})", + ) + + out = run_and_return_new_tensor_of_input_device(fake_mode, func, args, kwargs) + if func is aten.index_put_.default: + return new_kwargs["input"] + else: + return out + + +@register_op_impl(aten._nested_tensor_from_tensor_list.default) +@register_op_impl(aten._nested_tensor_from_tensor_list.out) +def nested_tensors_unsupported(fake_mode, func, *args, **kwargs): + raise UnsupportedOperatorException( + "torch.compile does not support strided NestedTensor" + ) + + +@register_op_impl( + [ + x + for x in _device_not_kwarg_ops + if x + not in ( + # these are already registered elsewhere + aten.to.device, + aten.to.prim_Device, + aten._nested_tensor_from_tensor_list.default, + aten._nested_tensor_from_tensor_list.out, + ) + ] +) +def nyi(fake_mode, func, *args, **kwargs): + assert func not in _device_not_kwarg_ops, f"NYI: {func}" + + +@register_op_impl([aten.convolution.default, aten.convolution_backward.default]) +def conv(fake_mode, func, *args, **kwargs): + _, kwargs = normalize_function( + func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True + ) + device = kwargs["input"].fake_device + # need to re-enable mode so the tensors report fake device + with fake_mode: + # if the input is unsqueezed is done in Convolution.cpp we get segfault + k = kwargs["weight"].ndim + batch = kwargs["input"].shape[0] + + # Avoid importing sympy at a module level + from torch.fx.experimental.symbolic_shapes import has_hint + + if not has_hint(batch): + # TODO: We can make this a little more faithful with best effort + # channels last detection (but only if it's statically obvious!) + mem_fmt = None + elif k == 3 and not kwargs["input"].is_mkldnn and not kwargs["input"].is_xpu: + mem_fmt = None + else: + if func is aten.convolution.default: + conv_backend = torch._C._select_conv_backend(**kwargs) + else: + conv_backend = torch._C._select_conv_backend( + kwargs["input"], + kwargs["weight"], + bias=None, + stride=kwargs["stride"], + padding=kwargs["padding"], + dilation=kwargs["dilation"], + transposed=kwargs["transposed"], + output_padding=kwargs["output_padding"], + groups=kwargs["groups"], + bias_sizes=kwargs["bias_sizes"], + ) + mem_fmt = torch._C._conv_determine_backend_memory_format( + kwargs["input"], kwargs["weight"], conv_backend + ) + + def convert(t, mem_fmt): + if t is None: + return t + if mem_fmt is not None: + t = t.to(memory_format=mem_fmt) + return FakeTensor(fake_mode, t, device) + + with in_kernel_invocation_manager(fake_mode): + out = func(**kwargs) + + if func is aten.convolution.default: + return convert(out, mem_fmt) + else: + return ( + convert(out[0], mem_fmt), + convert(out[1], mem_fmt), + convert(out[2], None), + ) + + +@register_op_impl(aten._scaled_dot_product_flash_attention.default) +def meta__scaled_dot_product_flash(fake_mode, func, *args, **kwargs): + _, kwargs = normalize_function( + func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True + ) + + query = kwargs["query"] + key = kwargs["key"] + return_debug_mask = kwargs["return_debug_mask"] + # unused: value, dropout_p, is_causal, scale + + def convert_tensor(t, device): + return FakeTensor(fake_mode, t, device) + + batch_size = query.size(0) + num_heads = query.size(1) + max_seqlen_batch_q = query.size(2) + head_dim = query.size(3) + max_seqlen_batch_k = key.size(2) + + query_t = query.transpose(1, 2) + # empty_like already returns a fake tensor so we don't need to convert it + attention = torch.empty_like(query_t).transpose(1, 2) + logsumexp = convert_tensor( + torch.empty( + (batch_size, num_heads, max_seqlen_batch_q), + dtype=torch.float, + device="meta", + ), + device=query.device, + ) + + if return_debug_mask: + blocksize_c = 128 if head_dim > 64 else 256 + max_seqlen_k = math.ceil(max_seqlen_batch_q / blocksize_c) + if max_seqlen_batch_k <= 128: + max_seqlen_k = 128 + elif max_seqlen_batch_k <= 256: + max_seqlen_k = 256 + debug_mask = convert_tensor( + torch.empty( + (batch_size, num_heads, max_seqlen_batch_q, max_seqlen_k), + dtype=query.dtype, + device="meta", + ), + device=query.device, + ) + else: + debug_mask = convert_tensor( + torch.empty(0, dtype=query.dtype, device="meta"), + query.device, + ) + + # Note [Seed and Offset]: device for seed and offset below depends on whether we are + # capturing or not, but at the time of tracing we don't know if we + # are going to use cudagraphs or not, so we return meta tensors here + # it's possible we'll need to have some special handling in inductor for sdpa + + return ( + attention, + logsumexp, + None, + None, + max_seqlen_batch_q, + max_seqlen_batch_k, + convert_tensor(torch.empty((), dtype=torch.long, device="meta"), query.device), + convert_tensor(torch.empty((), dtype=torch.long, device="meta"), query.device), + debug_mask, + ) + + +@register_op_impl(aten._scaled_dot_product_efficient_attention.default) +def meta__scaled_dot_product_efficient(fake_mode, func, *args, **kwargs): + _, kwargs = normalize_function( + func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True + ) + + query = kwargs["query"] + key = kwargs["key"] + value = kwargs["value"] + compute_log_sumexp = kwargs["compute_log_sumexp"] + # unused: attn_bias, dropout_p, is_causal, scale + + def convert_tensor(t, device): + return FakeTensor(fake_mode, t, device) + + query = query.transpose(1, 2) + key = key.transpose(1, 2) + value = value.transpose(1, 2) + + B = query.size(0) + M = query.size(1) + N = key.size(1) + num_heads = query.size(-2) + K = query.size(-1) + Kv = value.size(-1) + + res = convert_tensor( + torch.empty(B, M, num_heads, Kv, dtype=query.dtype, device="meta"), + query.device, + ) + + logsumexp_dim = math.ceil(M / 32) * 32 if compute_log_sumexp else 0 + logsum_exp = convert_tensor( + torch.empty( + (B, num_heads, logsumexp_dim), + dtype=torch.float, + device="meta", + ), + query.device, + ) + + res = res.transpose(1, 2) + + # See Note [Seed and Offset]: + seed = convert_tensor( + torch.empty((), dtype=torch.long, device="meta"), query.device + ) + offset = convert_tensor( + torch.empty((), dtype=torch.long, device="meta"), query.device + ) + + return res, logsum_exp, seed, offset + + +@register_op_impl(aten._flash_attention_forward.default) +def meta__flash_attention_forward(fake_mode, func, *args, **kwargs): + _, kwargs = normalize_function( + func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True + ) + + query = kwargs["query"] + key = kwargs["key"] + cum_seq_q = kwargs["cum_seq_q"] + cum_seq_k = kwargs["cum_seq_k"] + max_q = kwargs["max_q"] + max_k = kwargs["max_k"] + return_debug_mask = kwargs["return_debug_mask"] + # unused: value, dropout_p, is_causal, scale + + def convert_tensor(t, device): + return FakeTensor(fake_mode, t, device) + + # NB: there are two underlying paths: + # 1. normal dense path; expect 4D inputs of shape (batch_size, seqlen, num_heads, head_dim) + # 2. varseqlen path; expect 3D inputs of shape (total, num_heads, head_dim) where total + # includes all batch item sequences. cum_seq_q / cum_seq_k contain offsets into total + batch_size = query.size(0) if cum_seq_q is None else cum_seq_q.numel() - 1 + max_seqlen_batch_q = query.size(1) if cum_seq_q is None else max_q + max_seqlen_batch_k = key.size(1) if cum_seq_k is None else max_k + num_heads = query.size(-2) + head_dim = query.size(-1) + + # Cuda Path + # note: empty_like already returns a fake tensor, we don't need to wrap it + attention = torch.empty_like(query) + logsumexp = convert_tensor( + torch.empty( + (batch_size, num_heads, max_seqlen_batch_q), + dtype=torch.float, + device="meta", + ), + device=query.device, + ) + + if return_debug_mask: + blocksize_c = 128 if head_dim > 64 else 256 + max_seqlen_k = math.ceil(max_seqlen_batch_q / blocksize_c) + if max_seqlen_batch_k <= 128: + max_seqlen_k = 128 + elif max_seqlen_batch_k <= 256: + max_seqlen_k = 256 + debug_mask = convert_tensor( + torch.empty( + (batch_size, num_heads, max_seqlen_batch_q, max_seqlen_k), + dtype=query.dtype, + device="meta", + ), + query.device, + ) + else: + debug_mask = convert_tensor( + torch.empty(0, dtype=query.dtype, device="meta"), + query.device, + ) + + # See Note [Seed and Offset]: + return ( + attention, + logsumexp, + convert_tensor(torch.empty((), dtype=torch.long, device="meta"), query.device), + convert_tensor(torch.empty((), dtype=torch.long, device="meta"), query.device), + debug_mask, + ) + + +@register_op_impl(aten._efficient_attention_forward.default) +def meta__efficient_attention_forward(fake_mode, func, *args, **kwargs): + _, kwargs = normalize_function( + func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True + ) + + query = kwargs["query"] + key = kwargs["key"] + value = kwargs["value"] + cu_seqlens_q = kwargs["cu_seqlens_q"] + max_seqlen_q = kwargs["max_seqlen_q"] + max_seqlen_k = kwargs["max_seqlen_k"] + compute_log_sumexp = kwargs["compute_log_sumexp"] + # unused: bias, cu_seqlens_k, dropout_p, custom_mask_type, scale, causal_diagonal, seqlen_k + + def convert_tensor(t, device): + return FakeTensor(fake_mode, t, device) + + B = query.size(0) + M = query.size(1) + N = key.size(1) + num_heads = query.size(-2) + K = query.size(-1) + Kv = value.size(-1) + + res = convert_tensor( + torch.empty(B, M, num_heads, Kv, dtype=query.dtype, device="meta"), + query.device, + ) + + logsumexp_batch_dim = cu_seqlens_q.size(0) - 1 if (cu_seqlens_q is not None) else B + actual_max_seqlen_q = M + if cu_seqlens_q is not None: + assert max_seqlen_q is not None + actual_max_seqlen_q = max_seqlen_q + actual_max_seqlen_k = max_seqlen_k if max_seqlen_k is not None else N + logsumexp_dim = ( + math.ceil(actual_max_seqlen_q / 32) * 32 if compute_log_sumexp else 0 + ) + logsum_exp = convert_tensor( + torch.empty( + (logsumexp_batch_dim, num_heads, logsumexp_dim), + dtype=torch.float, + device="meta", + ), + query.device, + ) + + # See Note [Seed and Offset]: + seed = convert_tensor( + torch.empty((), dtype=torch.long, device="meta"), query.device + ) + offset = convert_tensor( + torch.empty((), dtype=torch.long, device="meta"), query.device + ) + + return res, logsum_exp, seed, offset, actual_max_seqlen_q, actual_max_seqlen_k + + +FAST_OP_IMPLEMENTATIONS = {} + + +# Unlike register_op_impl, these don't do the slow iteration for +# run_impl_check, and these run BEFORE decompositions +def register_fast_op_impl(func: OpOverload): + def impl_decorator(op_impl): + FAST_OP_IMPLEMENTATIONS[func] = op_impl + return op_impl + + return impl_decorator + + +# infer_size_impl in ExpandUtils +def infer_size(a, b): + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + dimsA = len(a) + dimsB = len(b) + ndim = max(dimsA, dimsB) + expandedSizes = [0] * ndim + for i in range(ndim - 1, -1, -1): + offset = ndim - 1 - i + dimA = dimsA - 1 - offset + dimB = dimsB - 1 - offset + sizeA = a[dimA] if dimA >= 0 else 1 + sizeB = b[dimB] if dimB >= 0 else 1 + + # NB: It is very important to test for broadcasting, before testing + # sizeA == sizeB. This is because the broadcasting tests are likely + # to be statically known (in particular, if sizeA/sizeB is unbacked + # but size-like, we will unsoundly assume they never equal 1), but + # the sizeA == sizeB test may not be statically known. However, once + # we have established that no broadcasting is happening, the + # sizeA == sizeB is now expect_true and we can defer it as a runtime + # assert (this works because Python will return the terminal + # expression of an or statement as-is, without bool()'ing it; if this + # were not the case, we'd need to write this using torch.sym_or() or + # something like that). + torch._check( + guard_size_oblivious(sizeA == 1) + or guard_size_oblivious(sizeB == 1) + or sizeA == sizeB, + lambda: f"The size of tensor a ({sizeA}) " + f"must match the size of tensor b ({sizeB}) " + f"at non-singleton dimension {i})", + ) + expandedSizes[i] = sizeB if guard_size_oblivious(sizeA == 1) else sizeA + return tuple(expandedSizes) + + +def make_fast_binary_impl(slow_ref): + def fast_binary_impl(mode, *args, **kwargs): + def slow(msg): + count_label(f"slow {msg}") + with mode: + return slow_ref(*args, **kwargs) + + count_label("attempt fast") + + # Fast path (based off of TensorIterator fast path). + # Unfortunately, there is no way to easily deduplicate + # this with either the TensorIterator C++ implementation + # (which we don't want to SymIntify, and also the algorithm + # here is slightly different from TensorIterator to allow + # for broadcasting), nor the PrimTorch implementation + # (which does not actually implement a fast path.) + + operands = args + + # compute_shape + has_scalars = False + has_tensors = False + final_shape = None + for op in operands: + shape = op.shape if isinstance(op, torch.Tensor) else () + if len(shape) == 0: + has_scalars = True + else: + has_tensors = True + if final_shape is None: + final_shape = shape + # TODO: Minor optimization: track if the shapes + # were equal so you can skip the equality check + # below if unnecessary + final_shape = infer_size(final_shape, shape) + assert final_shape is not None + + # Do some extra safety checks to see if the output + # stride is obvious + for op in operands: + if ( + isinstance(op, torch.Tensor) + and len(op.shape) == len(final_shape) + and op.shape == final_shape + ): + break + else: + return slow("both tensors nontrivially broadcast") + + # compute_types + cpu = torch.device("cpu") + common_device = cpu + common_dtype = None + output_dtype = None + has_different_input_dtypes = False + for op in operands: + if not isinstance(op, torch.Tensor): + # Use elementwise_dtypes for the tricky case + has_different_input_dtypes = True + continue + if common_device == cpu and not op.device.type == "cpu": + common_device = op.device + # Slightly simplified here as target_dtype cannot vary + if common_dtype is None: + common_dtype = op.dtype + elif common_dtype != op.dtype: + has_different_input_dtypes = True + + if has_different_input_dtypes: + # compute promotion + # TODO: we don't need the compute type + _, common_dtype = elementwise_dtypes( + *operands, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ) + + # check all tensors on same device + # cpu scalars are assumed allow + current_cpu_scalars_on_non_cpu = 0 + max_cpu_scalars_on_non_cpu = 1 # hard coded atm + for op in operands: + if not isinstance(op, torch.Tensor): + continue + if common_device != cpu and op.dim() == 0 and op.device == cpu: + if current_cpu_scalars_on_non_cpu >= max_cpu_scalars_on_non_cpu: + return slow("error") + current_cpu_scalars_on_non_cpu += 1 + elif op.device != common_device: + return slow("error") + + # compute_fast_setup_type + is_contiguous = True + is_channels_last = True + # TODO: is_non-overlapping_and_dense (not bound from Python + # no inplace, no out, everything defined + + if is_noncontiguous_supported(common_device): + for op in operands: + if not isinstance(op, torch.Tensor): + continue + is_contiguous = is_contiguous and op.is_contiguous( + memory_format=torch.contiguous_format + ) + is_channels_last = is_channels_last and op.is_contiguous( + memory_format=torch.channels_last + ) + if is_contiguous: + # do contiguous + count_label("fast is_contiguous") + return FakeTensor( + mode, + torch.empty( + final_shape, + dtype=common_dtype, + device="meta", + memory_format=torch.contiguous_format, + ), + device=common_device, + ) + if is_channels_last: + count_label("fast channels_last") + # do channels last + return FakeTensor( + mode, + torch.empty( + final_shape, + dtype=common_dtype, + device="meta", + memory_format=torch.channels_last, + ), + device=common_device, + ) + + return slow("no contiguity match") + + return fast_binary_impl + + +@functools.lru_cache(None) +def get_fast_op_impls(): + import torch._refs + + register_fast_op_impl(torch.ops.aten.add.Tensor)( + make_fast_binary_impl(torch._refs.add) + ) + register_fast_op_impl(torch.ops.aten.sub.Tensor)( + make_fast_binary_impl(torch._refs.sub) + ) + register_fast_op_impl(torch.ops.aten.mul.Tensor)(make_fast_binary_impl(torch._refs.mul)) # type: ignore[has-type] + register_fast_op_impl(torch.ops.aten.div.Tensor)( + make_fast_binary_impl(torch._refs.div) + ) + return FAST_OP_IMPLEMENTATIONS diff --git a/venv/lib/python3.10/site-packages/torch/_subclasses/fake_tensor.py b/venv/lib/python3.10/site-packages/torch/_subclasses/fake_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..08007dcfdd11fcee715b8a0c24ff103e672bd2cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_subclasses/fake_tensor.py @@ -0,0 +1,1819 @@ +# mypy: ignore-errors + +import contextlib +import functools +import logging +import os +import traceback +import weakref +from collections import defaultdict +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Type, TYPE_CHECKING, TypeVar +from weakref import ReferenceType + +import torch +import torch._custom_op +import torch._logging +from torch._C._functorch import is_functorch_wrapped_tensor + +from torch._guards import Source +from torch._ops import OpOverload +from torch._prims_common import suggest_memory_format +from torch._subclasses.meta_utils import ( + assert_eq, + assert_metadata_eq, + is_sparse_any, + is_sparse_compressed, + MetaConverter, +) +from torch._utils import render_call +from torch.fx.operator_schemas import normalize_function +from torch.multiprocessing.reductions import StorageWeakRef +from torch.overrides import TorchFunctionMode +from torch.utils._mode_utils import no_dispatch +from torch.utils._python_dispatch import ( + is_traceable_wrapper_subclass, + TorchDispatchMode, +) + +from torch.utils._pytree import PyTree, tree_map +from torch.utils._stats import count +from torch.utils.weak import WeakIdRef + +if TYPE_CHECKING: + from torch.fx.experimental.symbolic_shapes import ShapeEnv + +DimList = List + +log = logging.getLogger(__name__) + +# TODO: Hack to unblock https://github.com/pytorch/pytorch/pull/108186 +# Proper fix tracked by https://github.com/pytorch/pytorch/issues/120105 +try: + not_implemented_log = torch._logging.getArtifactLogger(__name__, "not_implemented") +except ValueError as e: + if "'not_implemented' not registered" in str(e): + import logging as not_implemented_log + else: + raise e + +pytree = torch.utils._pytree +T = TypeVar("T") +TensorWeakRef = Any + +aten = torch._ops.ops.aten + +CONSTANT_NUMEL_LIMIT = 1 + +RECURSION_COUNT = 0 + + +# Small helper that increments recursion count, and +# resets it when the object goes out of scope. Useful +# if you don't want to increase indentation which is +# what a context manager would do. +class IncrementRecursionCount: + def __init__(self): + global RECURSION_COUNT + RECURSION_COUNT += 1 + + def __del__(self): + global RECURSION_COUNT + RECURSION_COUNT -= 1 + + +@dataclass +class UnsupportedFakeTensorException(RuntimeError): + reason: str + + +@dataclass +class DynamicOutputShapeException(RuntimeError): + func: OpOverload + + +@dataclass +class DataDependentOutputException(RuntimeError): + func: OpOverload + + +@dataclass +class UnsupportedOperatorException(RuntimeError): + func: OpOverload + + +def ordered_set(*items): + return dict.fromkeys(items, True) + + +@contextlib.contextmanager +def unset_fake_temporarily(): + old = torch._C._unset_dispatch_mode(torch._C._TorchDispatchModeKey.FAKE) + try: + yield old + finally: + if old is not None: + torch._C._set_dispatch_mode(old) + + +def is_fake(x): + if isinstance(x, FakeTensor): + return True + if is_traceable_wrapper_subclass(x): + attrs, _ = type(x).__tensor_flatten__(x) + flattened_tensors = [getattr(x, attr) for attr in attrs] + # need to recurse because we could have nested subclasses + all_fake = all(is_fake(x) for x in flattened_tensors) + any_fake = any(is_fake(x) for x in flattened_tensors) + assert all_fake == any_fake, "got mixed fake and real tensors!" + return all_fake + elif isinstance(x, torch.Tensor) and torch._is_functional_tensor(x): + reapply_views = torch._C._functionalization_reapply_views_tls() + unwrapped = torch._C._functorch._unwrap_functional_tensor(x, reapply_views) + return is_fake(unwrapped) + elif isinstance(x, torch.Tensor) and is_functorch_wrapped_tensor(x): + unwrapped = torch._C._functorch.get_unwrapped(x) + return is_fake(unwrapped) + return False + + +def maybe_get_fake_mode(t): + if isinstance(t, FakeTensor): + return t.fake_mode + if is_traceable_wrapper_subclass(t): + inner_tensor_names, _ = t.__tensor_flatten__() + modes = [ + maybe_get_fake_mode(getattr(t, t_name)) for t_name in inner_tensor_names + ] + m = modes[0] + assert all(m is x for x in modes) + return m + elif isinstance(t, torch.Tensor) and torch._is_functional_tensor(t): + reapply_views = torch._C._functionalization_reapply_views_tls() + unwrapped = torch._C._functorch._unwrap_functional_tensor(t, reapply_views) + return maybe_get_fake_mode(unwrapped) + elif isinstance(t, torch.Tensor) and is_functorch_wrapped_tensor(t): + unwrapped = torch._C._functorch.get_unwrapped(t) + return maybe_get_fake_mode(unwrapped) + return None + + +@functools.lru_cache(None) +def get_schema_info(func): + return torch._C._SchemaInfo(func._schema) # type: ignore[attr-defined] + + +# many of the decompositions registered to torch/_prims do not at the moment model +# aliasing or strides, so as an incremental step, just enable the decompositions in +# torch/_decomp/decompositions.py. +# decomps are used for aot autograd tracing so we would like to unify on their +# implementation and add additional testing to them +@functools.lru_cache(None) +def torch_decomp_decompositions(func): + from torch._decomp import decomposition_table + + decompositions = torch._decomp.decompositions + # Note that the function in the decomposition table might be + # different from the one in the module because of the difference + # in out handling in aten API and torch public API + return decomposition_table[func].__module__.startswith( + "torch._decomp" + ) and decomposition_table[func].__name__ in dir(decompositions) + + +def tree_flatten_only(ty: Type[T], tree: PyTree): + flat_vals = pytree.tree_leaves(tree) + return [elem for elem in flat_vals if isinstance(elem, ty)] + + +# Similar to `MetaConverter`, this is a class for converting +# multiple tensors into fake tensors which share the same view/storage +# structure. Like `MetaConverter`, it uses `WeakIdRef` to +# hold a weak reference for all memoized tensors. +class FakeTensorConverter: + @property + def tensor_memo(self): + return self.meta_converter.tensor_memo + + meta_converter: MetaConverter + constant_storage_mapping: Dict[StorageWeakRef, List[ReferenceType]] + + def __init__(self): + self.meta_converter = MetaConverter() + + # map from to storage to corresponding constant tensors + self.constant_storage_mapping = {} + + def add_constant_storage_mapping(self, fake_tensor): + # when you have a constant, aliased tensor: + # const_tensor.add_(torch.rand([1])) + # all aliases of it must become no longer const + assert isinstance(fake_tensor, FakeTensor) and fake_tensor.constant is not None + weak_st = StorageWeakRef(fake_tensor.constant._typed_storage()) + + # we need a map from a weak storage to all of its corresponding + # constant tensors. python doesn't have the weak value equivalent + # of defaultdict(list), so we are using a WeakValueDictionary as one + if weak_st not in self.constant_storage_mapping: + self.constant_storage_mapping[weak_st] = [] + self.constant_storage_mapping[weak_st].append(weakref.ref(fake_tensor)) + + def invalidate_constant_aliases(self, tensor): + assert not isinstance(tensor, FakeTensor) + + weak_st = StorageWeakRef(tensor._typed_storage()) + if weak_st not in self.constant_storage_mapping: + return + + for weak_tensor_ref in self.constant_storage_mapping[weak_st]: + ten = weak_tensor_ref() + if ten is not None: + ten._fix_weakref() + ten.constant = None + + del self.constant_storage_mapping[weak_st] + + def _get_memo(self, t): + if WeakIdRef(t) in self.tensor_memo: + out = self.tensor_memo[WeakIdRef(t)] + out._fix_weakref() + return out + return None + + def set_tensor_memo(self, t, v): + th = WeakIdRef(t) + + # hold a weak ref to self, otherwise it will be kept alive + # by the del_ten closure + self_weak_ref = weakref.ref(self) + + def del_ten(): + self_ref = self_weak_ref() + if self_ref is None: + return + # on shutdown, th may not be in memo + self_ref.tensor_memo.pop(th, None) + + weakref.finalize(t, del_ten) + self.tensor_memo[th] = v + + def from_real_tensor( + self, + fake_mode, + t, + make_constant=False, + shape_env=None, + *, + source=None, + symbolic_context=None, + memoized_only=False, + ): + # see note [Tensor Fakification and Symbol Caching] + if not symbolic_context and not source and shape_env: + if tracing_context := torch._guards.TracingContext.try_get(): + if t in tracing_context.tensor_to_context: + symbolic_context = tracing_context.tensor_to_context[t] + source = symbolic_context.tensor_source + + maybe_memo = self._get_memo(t) + if maybe_memo is not None: + return maybe_memo + if memoized_only: + return None + existing_device = t.device + # not yet supported in metatensors + if t.is_quantized: + raise UnsupportedFakeTensorException("quantized nyi in meta tensors") + if type(t) is torch.nn.Parameter: + assert not make_constant + + def mk_fake_tensor(make_meta_t): + # NB: don't use in_kernel_invocation_manager. to + # ensure FakeTensor can internally do constant computation + # as necessary. Invocation manager is "more correct" as + # it works for more operators in make_meta_t, but + # invariant is that make_meta_t only calls factories + # for which it is not strictly necessary to use the + # invocation manager (I think!) + with no_dispatch(): + return FakeTensor( + fake_mode, + make_meta_t(), + existing_device, + constant=t if make_constant else None, + ) + + out = self.meta_converter( + t, + shape_env=shape_env, + callback=mk_fake_tensor, + source=source, + symbolic_context=symbolic_context, + ) + if out is NotImplemented: + raise UnsupportedFakeTensorException("meta converter nyi") + if make_constant: + self.add_constant_storage_mapping(out) + # NB: meta_converter set the memo + return out + + # If you specify the device, it MUST be a meta tensor. + def from_meta_and_device(self, fake_mode, t, device): + assert ( + t.device.type == "meta" + ), f"tensor's device must be `meta`, got {t.device.type} instead" + maybe_memo = self._get_memo(t) + if maybe_memo is not None: + return maybe_memo + out = FakeTensor(fake_mode, t, device) + self.set_tensor_memo(t, out) + return out + + # You can have a real tensor that you need to convert into a fake tensor. + # If you have a meta tensor already, call from_meta_and_device. + # + # You're allowed to pass a meta tensor to be turned into a fake + # tensor; although an odd thing to do, this can occur if you're doing + # cross ref testing and the inner test is already operating on meta tensors. + def __call__( + self, + fake_mode, + t, + *, + make_constant=False, + shape_env=None, + source=None, + symbolic_context=None, + memoized_only=False, + ): + return self.from_real_tensor( + fake_mode, + t, + make_constant, + shape_env=shape_env, + source=source, + symbolic_context=symbolic_context, + memoized_only=memoized_only, + ) + + +@functools.lru_cache(None) +def init_cuda_context(): + # Backward will error with cuda Fake Tensors if no cuda tensors have been initialized first + if torch.cuda.is_available(): + torch.empty(1, device="cuda") if torch.version.hip is None else torch.zeros( + 1, device="cuda" + ) + + +@contextlib.contextmanager +def in_kernel_invocation_manager(fake_mode): + # See: note [Fake Tensor Dispatch Keys] + prev_in_kernel = fake_mode.in_kernel_invocation + meta_in_tls = torch._C._meta_in_tls_dispatch_include() + assert meta_in_tls == prev_in_kernel, f"{meta_in_tls}, {prev_in_kernel}" + + guard = torch._C._DisableTorchDispatch() # type: ignore[attr-defined] + fake_mode.in_kernel_invocation = True + torch._C._set_meta_in_tls_dispatch_include(True) + try: + yield + finally: + fake_mode.in_kernel_invocation = prev_in_kernel + torch._C._set_meta_in_tls_dispatch_include(prev_in_kernel) + del guard + + +# Return if the function allows Python numbers to bind to Tensors +def should_allow_numbers_as_tensors(func: OpOverload): + return torch._C._should_allow_numbers_as_tensors( + func.name().split("::")[-1].split(".")[0] + ) + + +class FakeTensorConfig: + debug = os.environ.get("TORCH_FAKE_TENSOR_DEBUG", "0") == "1" + + +class FakeTensor(torch.Tensor): + """ + Meta tensors give you the ability to run PyTorch code without having to + actually do computation through tensors allocated on a `meta` device. + Because the device is `meta`, meta tensors do not model device propagation. + FakeTensor extends MetaTensors to also carry an additional `fake_device` + which tracks devices that would have been used. + """ + + fake_device: torch.device + fake_mode: "FakeTensorMode" + constant: Optional[torch.Tensor] + + # This memorizes the unbacked SymInt representing the number of nonzero + # elements in this tensor. This is helpful if you do something like + # x[mask] and y[mask]; mask.nonzero() gets repeatedly called and should + # give a consistent unbacked SymInt. It needs to be invalidated in the + # same way constant is. + # TODO: Generalize this as needed, e.g., into a trie of memos + _nonzero_memo: Optional[torch.SymInt] + _nonzero_memo_vc: Optional[int] + + # Indicates to our torch_dispatch dispatching infra that + # this is an "infra" mode with lower dispatching precedence. + _mode_key = torch._C._TorchDispatchModeKey.FAKE + + @property + def nonzero_memo(self): + if self._nonzero_memo is None: + return None + # Version counter based tracking isn't 100% sound but it's close + # enough + if self._nonzero_memo_vc != self._version: + self._nonzero_memo = None + return None + return self._nonzero_memo + + @property + def device(self): + if self.fake_mode.in_kernel_invocation: + return torch.device("meta") + else: + return self.fake_device + + # Note: [Fake Tensor Dispatch Keys] + # In order to model the behavior of device-specific autocast + # and autograd logic, we update the dispatch keys of FakeTensors + # to reflect their fake device. This includes the BackendComponent + # (DispatchKey::Meta -> DispatchKey::CUDA), and also the BackendComponent + # related Autocast and Autograd keys. __torch__dispatch__ sits below + # Autocast and Autograd, and is only invoked when we are at the + # kernel for the BackendComponent. Then, we add Meta to the + # thread-local dispatch include set to hit the meta kernel + # instead of the kernel of the BackendComponent for the fake device. + # The `device_for_backend_keys` does that below + # NOTE: this probably will not do the right thing for backends + # that have dispatch keys which are higher than the "meta" key: + # https://github.com/pytorch/pytorch/blob/main/c10/core/DispatchKey.h#L189 + + # We don't support named tensors; graph break + @property + def names(self): + raise UnsupportedFakeTensorException( + "torch.compile doesn't support named tensors" + ) + + @staticmethod + def __new__(cls, fake_mode, elem, device, constant=None): + self = torch.Tensor._make_subclass( + cls, + elem, + elem.requires_grad, + dispatch_device=True, + device_for_backend_keys=device, + ) + + assert elem.device.type == "meta", elem.device.type + device = device if isinstance(device, torch.device) else torch.device(device) + # NB: it is fine, if a little confusing, for device to be meta + # (we are faking a meta tensor in that case). However, it often + # indicates some sort of confusion (e.g., you accidentally passed + # in a meta tensor when you should have passed in the real tensor). + # So by default we disallow meta, and if you are working in a situation + # where it is helpful (e.g., crossref testing) you can turn it back + # on + if not fake_mode.allow_meta: + assert device.type != "meta" + # normalize device. + if device.type == "cuda": + init_cuda_context() + + if ( + device.type + in ["cuda", "hpu", "xpu", torch._C._get_privateuse1_backend_name()] + and device.index is None + ): + device = torch.device( + f"{device.type}:{getattr(torch, device.type).current_device()}" + ) + self.fake_device = device # type: ignore[attr-defined] + self.fake_mode = fake_mode # type: ignore[attr-defined] + self.constant = constant # type: ignore[attr-defined] + self._nonzero_memo = None # type: ignore[attr-defined] + self._nonzero_memo_vc = None # type: ignore[attr-defined] + + if FakeTensorConfig.debug: + import traceback + + self._debug_trace = traceback.extract_stack() # type: ignore[attr-defined] + return self + + # In some circumstances, a conventional torch.Tensor constructor + # will get rewritten to call into FakeTensor. We must provide an + # __init__ method that can accept the Python interpreters initialization + # in such a situation; we must also be able to handle direct fake + # tensor construction via FakeTensor(). + # + # In particular, the __init__ call will look funny in the following case: + # + # with FakeTensorMode(): + # x = torch.Tensor([1, 2, 3]) + # + # this desugars into: + # + # with FakeTensorMode(): + # x = torch.Tensor.__new__([1, 2, 3]) + # # NB: x is a fake tensor, because of the mode! + # x.__init__([1, 2, 3]) # not the normal fake tensor args! + # + def __init__(self, *args, **kwargs): + super().__init__() + + @staticmethod + def from_tensor(t, fake_mode): + return fake_mode.from_tensor(t) + + @classmethod + @count + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + # need to handle here to avoid infinite recursion + # see [in_kernel_invocation] + if func == torch.ops.prim.device.default: + assert len(args) == 1 and isinstance(args[0], FakeTensor) + if args[0].fake_mode.in_kernel_invocation: + return torch.device("meta") + else: + return args[0].fake_device + + # Because fake mode can return NotImplemented (if it sees a subclass + # it doesn't know how to deal with), this test here is important + # because the next dispatch after a fake mode will attempt to use + # subclasses of tensors to dispatch, and any FakeTensor arguments + # will be considered eligible. + unrecognized_types = [ + t for t in types if not issubclass(t, FakeTensor) and t is not torch.Tensor + ] + if unrecognized_types: + not_implemented_log.debug( + "FakeTensor unrecognized subclass(es): %s", unrecognized_types + ) + return NotImplemented + + fake_mode = None + for arg in pytree.arg_tree_leaves(*args, **kwargs): + if isinstance(arg, FakeTensor): + fake_mode = arg.fake_mode + break + + assert fake_mode is not None + + # If the fake mode is already active, don't try to reapply it! + # NotImplemented is the right thing to return here, because the + # typical situation this can occur is if ProxyTensorMode returned a + # NotImplemented because of a not implemented subclass; we may have + # unluckily attempted to hit FakeTensor's dispatch first, + # NotImplemented lets us keep chaining until we find the actual + # subclass + maybe_cur_fake_mode = torch._C._get_dispatch_mode( + torch._C._TorchDispatchModeKey.FAKE + ) + if maybe_cur_fake_mode: + not_implemented_log.debug( + "FakeTensor mode already active: %s in %s", + fake_mode, + maybe_cur_fake_mode, + ) + return NotImplemented + + with fake_mode: # type: ignore[attr-defined] + return func(*args, **kwargs) + + @staticmethod + def _find_common_device(func, flat_args) -> Tuple[torch.device, bool]: + # Returns: (common_device, has_scalar_only_inputs) + + # cpu - zero-dim tensors can be called in cuda kernels, + # so overwrite the common_device if it the only existing + # device comes from a cpu zero-dim tensor + common_device = None + has_scalar_only_inputs = False + is_cpu_zero_dim = None + + def cpu_zero_dim(t): + return t.device.type == "cpu" and t.dim() == 0 + + def merge_devices(t): + nonlocal common_device + nonlocal is_cpu_zero_dim + if not isinstance(t, FakeTensor): + return + + if common_device is None: + common_device = t.device + is_cpu_zero_dim = cpu_zero_dim(t) + return + + t_is_cpu_zero_dim = cpu_zero_dim(t) + if t.device == common_device: + if is_cpu_zero_dim: + is_cpu_zero_dim = t_is_cpu_zero_dim + return + + # mismatching devices ! + # if current tensor is cpu 0 dim, defer to existing device + if t_is_cpu_zero_dim: + return + + # current device is from cpu 0 dim tensor, overwrite + if is_cpu_zero_dim: + common_device = t.device + is_cpu_zero_dim = t_is_cpu_zero_dim + return + + # mismatching devices of non-zero dim tensors, throw + # This might be valid behavior and need to be explicitly modeled, e.g. reshape_as + raise RuntimeError( + f"Unhandled FakeTensor Device Propagation for {func}, found two different devices {common_device}, {t.device}" + ) + + for arg in flat_args: + merge_devices(arg) + + # some functions that allow Python numbers to bind to Tensors + # if we have failed to find a device, and we're running one of these operators, + # we must have scalar only inputs + if should_allow_numbers_as_tensors(func) and common_device is None: + # ops with scalar only inputs always have result on cpu + has_scalar_only_inputs = True + common_device = torch.device("cpu") + + assert common_device is not None, f"Could not find common device for {func}" + + return common_device, has_scalar_only_inputs + + # We must handle tolist in a special way for FakeTensors here in the case + # where tolist is called from torch dispatch for tensor subclasses. + # Ordinarily, if a program calls .tolist compiling still works because there is + # special handling in dynamo, but for tensor subclasses if .tolist is called + # inside torch dispatch, the .tolist call may be directly on a FakeTensor. + # This would result in an error since wrapper subclasses don't have storage. + # To avoid this, we handle the FakeTensor case by (1) specializing on the size + # of the tensor to create the output Python list, and (2) creating unbacked + # symints for each element of the list. + def tolist(self): + assert self.dim() == 1, "NYI for higher dims" + shape_env = self.fake_mode.shape_env + out = [] + # Specialize on the length of the list + for _ in range(self.shape[0]): + s = shape_env.create_unbacked_symint() + # max value? + torch._constrain_as_size(s, min=2) + out.append(s) + return out + + +@dataclass(frozen=True) +class TensorMetadata: + """ + The Tensor metadata relevant to hashing FakeTensors when caching. + """ + + dtype: torch.dtype + shape: torch.Size + stride: Tuple[Any, ...] + device: torch.device + layout: torch.layout + memory_format: Optional[torch.memory_format] + storage_offset: int + requires_grad: bool + is_quantized: bool + is_conj: bool + is_neg: bool + is_inference: bool + is_sparse: bool # read: is sparse COO + is_coalesced: Optional[bool] + dense_dim: Optional[int] + sparse_dim: Optional[int] + + +def extract_tensor_metadata(t: torch.Tensor) -> "TensorMetadata": + """ + Extract the TensorMetadata of a tensor. + """ + memory_format = suggest_memory_format(t) + if is_sparse_any(t) or not t.is_contiguous(memory_format=memory_format): + memory_format = None + + return TensorMetadata( + dtype=t.dtype, + shape=t.shape, + stride=t.stride() if t.layout == torch.strided else (), + device=t.device, + layout=t.layout, + memory_format=memory_format, + storage_offset=t.storage_offset(), + requires_grad=t.requires_grad, + is_quantized=t.is_quantized, + is_conj=t.is_conj(), + is_neg=t.is_neg(), + is_inference=t.is_inference(), + is_sparse=t.is_sparse, + is_coalesced=t.is_coalesced() if t.is_sparse else None, + dense_dim=t.dense_dim() if t.is_sparse else None, + sparse_dim=t.sparse_dim() if t.is_sparse else None, + ) + + +@dataclass(frozen=True) +class _ShapeEnvSettings: + """ + Encapsulates all shape env settings that could potentially affect + FakeTensor dispatch. Used when creating dispatch cache keys. + """ + + allow_scalar_outputs: bool + allow_dynamic_output_shape_ops: bool + assume_static_by_default: bool + specialize_zero_one: bool + duck_shape: bool + + def __init__(self, env: "ShapeEnv"): + # Initialize this way because the class is frozen (to enable hashing): + object.__setattr__(self, "allow_scalar_outputs", env.allow_scalar_outputs) + object.__setattr__( + self, "allow_dynamic_output_shape_ops", env.allow_dynamic_output_shape_ops + ) + object.__setattr__( + self, "assume_static_by_default", env.assume_static_by_default + ) + object.__setattr__(self, "specialize_zero_one", env.specialize_zero_one) + object.__setattr__(self, "duck_shape", env.duck_shape) + + +class _DispatchCacheKey(list): + """ + Key for the FakeTensor dispatch cache. Inspired by (copied from) + _HashedSeq from the functools.lru_cache implementation. + """ + + __slots__ = "hashvalue" # noqa: PLC0205 + + def __init__(self, tup, hash=hash): + self[:] = tup + self.hashvalue = hash(tup) + + def __hash__(self): + return self.hashvalue + + +@dataclass(frozen=True) +class _DispatchCacheEntry: + """ + Entry type for the FakeTensor dispatch cache. Accounts for two possibilities: + 1) The op is inplace, and a hit means we need to alias the argument at a given + index. 2) We need to synthesize a new FakeTensor given tensor metadata. For view + ops, we further capture the index of the arg to alias. + """ + + inplace_idx: Optional[int] = None + metadata: Optional[TensorMetadata] = None + view_idx: Optional[int] = None + + +@dataclass(frozen=True) +class _BypassDispatchCache(Exception): + """ + Signals cases that should skip FakeTensor caching. + """ + + reason: str + + +@dataclass(frozen=True) +class DispatchCacheInfo: + """ + Information about the state of the FakeTensor dispatch cache. + """ + + hits: int + misses: int + bypasses: Dict[str, int] + size: int + + +# We keep one instantiation of `fake_tensor_converter` active +# for the duration of `with FakeTensorMode()`. +# This allows accurate storage aliasing across invocation of +# different operators. While this will keep all freshly allocated +# tensors alive during `FakeTensorMode`, there will no be no +# new allocations of Tensors which have non-meta storage so +# memory should not significantly increase. + + +class FakeTensorMode(TorchDispatchMode): + cache: Dict[_DispatchCacheKey, _DispatchCacheEntry] = {} + cache_hits: int = 0 + cache_misses: int = 0 + cache_bypasses = defaultdict(int) + + def __init__( + self, + *, + allow_fallback_kernels=True, + allow_non_fake_inputs=False, + shape_env=None, + static_shapes=None, + ): + log.debug("create_mode 0x%x", id(self)) + self.allow_fallback_kernels = allow_fallback_kernels + self.fake_tensor_converter = FakeTensorConverter() + if static_shapes is not None: + self.static_shapes = static_shapes + else: + self.static_shapes = shape_env is None + + import torch._dynamo.config + import torch._functorch.config + + self.allow_meta = torch._functorch.config.fake_tensor_allow_meta + self.cache_enabled = torch._dynamo.config.fake_tensor_cache_enabled + self.cache_crosscheck_enabled = ( + torch._dynamo.config.fake_tensor_cache_crosscheck_enabled + ) + + # A flag that controls, whether we want to invoke ops on mix of + # real weights/global variables and fake inputs + self.allow_non_fake_inputs = allow_non_fake_inputs + + # [in_kernel_invocation] + # when FakeTensor is invoked in user code, .device should return + # the fake_device of the tensor so that code such as as `if x.is_cuda` + # or torch.zeros([10, 10], device=x.device) continues to execute as if + # the FakeTensor were real. However, within kernel execution, we return + # the `Meta` device because all computation within the kernels should + # behave as if the Tensors are on meta devices. Kernels should allocate + # new tensors on meta devices, and checks like `is_meta` should return true. + # within python refs, we always return the real device by defining + # the device property + self.in_kernel_invocation = False + + # True if we enter'ed and actually enabled fake tensor mode, + # false if it was a no-op. Not thread safe but neither is + # in_kernel_invocation + # If another fake mode was already active when we enter, we also stash it here. + # That way when we exit, we know to re-enable the previous fake mode. + self.enter_stack: List[Tuple[bool, Optional[FakeTensorMode]]] = [] + + self.shape_env = shape_env + + self.stack = "".join(traceback.format_stack()) + + # Indicates to our torch_dispatch dispatching infra that + # this is an "infra" mode with lower dispatching precedence. + self._mode_key = torch._C._TorchDispatchModeKey.FAKE + + # Typically, there is only one fake tensor mode and you test for it by + # doing an isinstance test. However, in some situations, there might be + # TWO fake tensor modes. The canonical example of this is exporting + # a fake model: there is an outer fake mode created by the user, and + # an inner fake mode created by Dynamo. The two phase process is required + # because the outer fake mode typically won't have a ShapeEnv, even if + # the user is interested in exporting with dynamic shapes (so the inner + # fake mode will actually have a ShapeEnv and swap in symbolic sizes.) + # + # In this case, it's insufficient to test only one FakeTensor: you need + # to distinguish between our fake tensor and other fake tensors. That's + # what this function does. + def is_our_fake(self, t): + return isinstance(t, FakeTensor) and t.fake_mode is self + + @count + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + # FakeTensorMode should not be set when we're inside of it. + assert ( + torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.FAKE) is None + ), func + try: + return self.dispatch(func, types, args, kwargs) + except TypeError: + log.exception("fake tensor raised TypeError") + raise + + # No-op if FakeTensorMode is already in use + def __enter__(self): + maybe_prev_fake_mode = torch._C._unset_dispatch_mode(self._mode_key) + if self is not maybe_prev_fake_mode: + self.enter_stack.append((True, maybe_prev_fake_mode)) + return super().__enter__() + else: + # no-op (still need to re-set the fake mode though since we unset it) + torch._C._set_dispatch_mode(self) + self.enter_stack.append((False, None)) + return self + + def __exit__(self, a, b, c): + live, maybe_prev_fake_mode = self.enter_stack.pop() + if live: + out = super().__exit__(a, b, c) + # Re-enable the previous fake mode, if there was one. + if maybe_prev_fake_mode is not None: + torch._C._set_dispatch_mode(maybe_prev_fake_mode) + + @classmethod + def cache_info(cls) -> DispatchCacheInfo: + """ + Query the state of the dispatch cache. + """ + return DispatchCacheInfo( + FakeTensorMode.cache_hits, + FakeTensorMode.cache_misses, + dict(FakeTensorMode.cache_bypasses), + len(FakeTensorMode.cache), + ) + + @classmethod + def cache_clear(cls): + """ + Clear the dispatch cache. + """ + cls.cache_hits = 0 + cls.cache_misses = 0 + cls.cache_bypasses.clear() + cls.cache.clear() + + def _cached_dispatch_impl( + self, + func: OpOverload, + types: Tuple[Any, ...], + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + ): + """ + Lookup a cache entry for the given arguments. If none exists, dispatch + and cache the result (if the result is eligible for caching). + """ + output = unassigned = object() + try: + key = self._cache_key(func, args, kwargs) + entry = FakeTensorMode.cache.get(key, None) + if entry is not None: + output = self._output_from_cache_entry(entry, func, args) + FakeTensorMode.cache_hits += 1 + if self.cache_crosscheck_enabled: + # For debugging / testing: Validate that the output synthesized + # from the cache matches the output created by normal dispatch. + self._crosscheck_cache_output(output, func, types, args, kwargs) + else: + output = self._dispatch_impl(func, types, args, kwargs) + entry = self._make_cache_entry(key, func, args, kwargs, output) + FakeTensorMode.cache[key] = entry + FakeTensorMode.cache_misses += 1 + except _BypassDispatchCache as e: + FakeTensorMode.cache_bypasses[e.reason] += 1 + + if output is unassigned: + output = self._dispatch_impl(func, types, args, kwargs) + + return output + + def _cache_key( + self, + func: OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + ) -> _DispatchCacheKey: + """ + Create a cache key given the dispatch args. Raises _BypassDispatchCache + for any situation that precludes caching. + """ + # Avoid caching for any ops that would require a more sophisticated + # caching implementation, e.g., data dependent ops or ops that modify + # the inputs. + if torch.Tag.data_dependent_output in func.tags: + raise _BypassDispatchCache("data dependent output") + + if torch.Tag.dynamic_output_shape in func.tags: + raise _BypassDispatchCache("dynamic output shape") + + if torch.Tag.inplace_view in func.tags: + raise _BypassDispatchCache("inplace view") + + if func == aten._unsafe_view.default: + raise _BypassDispatchCache("unsafe view") + + if func in self.lift_fns: + raise _BypassDispatchCache("lift") + + if not torch._library.utils.is_builtin(func): + raise _BypassDispatchCache("non-builtin") + + # In order to handle storage aliasing, we need to establish the alias + # for any view op on a cache hit. But CompositeImplicitAutograd ops may + # or may not alias the input, so just punt on caching these. + if func.is_view and torch._C._dispatch_has_kernel_for_dispatch_key( + func.name(), torch._C.DispatchKey.CompositeImplicitAutograd + ): + raise _BypassDispatchCache("CompositeImplicitAutograd") + + key_values = ( + func, + # Translate any FakeTensor args to metadata. + self._prep_args_for_hash(args) if args else (), + self._prep_args_for_hash(kwargs) if kwargs else (), + # Capture the default_dtype mode since that can affect the output tensor, + # e.g., when operating on constant float values. + torch.get_default_dtype(), + # Capture the current device to support, e.g., cache tensor creation, + # where there isn't necessarily a tensor to take the device from. + torch._C._get_default_device(), + # We want to create tensors from cached metadata only when the inference + # mode is the same. + torch.is_inference_mode_enabled(), + # Shape env settings could affect behavior. One example seen in the wild: + # Disasllowing dynamic shapes can introduce a DynamicOutputShapeException + # where it wasn't seen on a previous instance of the same op. + _ShapeEnvSettings(self.shape_env) if self.shape_env else None, + ) + return _DispatchCacheKey(key_values) + + def _prep_args_for_hash(self, args: Any) -> Any: + """ + Translate the provided args into a form suitable for caching at FakeTensor + dispatch, i.e., convert unhashable types like lists & dicts into tuples and + convert FakeTensors into metadata. Raises _BypassDispatchCache to signal + unsupported cases that should bypass caching. + """ + if isinstance(args, dict): + args = list(args.keys()) + list(args.values()) + + result = [] + for arg in args: + if isinstance(arg, FakeTensor): + if not self.is_our_fake(arg): + raise _BypassDispatchCache("not our fake") + if arg._has_symbolic_sizes_strides: + raise _BypassDispatchCache("symbolic shape") + if arg.constant is not None: + raise _BypassDispatchCache("constant attribute") + if arg.is_sparse: + raise _BypassDispatchCache("sparse tensor") + if is_sparse_compressed(arg): + raise _BypassDispatchCache("sparse compressed tensor") + result.append(extract_tensor_metadata(arg)) + elif isinstance(arg, torch.Tensor): + raise _BypassDispatchCache("non-fake tensor") + elif isinstance(arg, (torch.SymBool, torch.SymInt, torch.SymFloat)): + raise _BypassDispatchCache("symbolic shape") + elif isinstance(arg, (list, tuple, dict)): + result.extend(self._prep_args_for_hash(arg)) + else: + # It's important to capture the type of the arg since, e.g., 1 and 1.0 + # hash to the same value, but can produce different dtypes for the + # output tensor. + result.append((type(arg), arg)) + + return tuple(result) + + def _make_cache_entry( + self, + key: _DispatchCacheKey, + func: OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + output: FakeTensor, + ) -> _DispatchCacheEntry: + """ + Make a cache entry object for the given 'output' Tensor. Raises + _BypassDispatchCache if the output tensor has characteristics that + prevent caching it. + """ + # Some ops return tuples of Tensors, but it's rare, so avoid + # the complexity of caching other types. + if not isinstance(output, FakeTensor): + raise _BypassDispatchCache("non-FakeTensor output") + + # Avoid caching FakeTensors with constants attached since those + # can be invalidated. + if output.constant is not None: + raise _BypassDispatchCache("constant attribute") + + # TODO: support caching sparse outputs? + if output.is_sparse: + raise _BypassDispatchCache("sparse output") + + if is_sparse_compressed(output): + raise _BypassDispatchCache("sparse compressed output") + + # Can an in-place op really reference a kwarg? If so, then we need + # to extend the implementation to handle it. + for kval in kwargs.values(): + if id(kval) == id(output): + raise _BypassDispatchCache("kwarg aliases output") + + # If this is an in-place op, the entry records which input arg is aliased. + for idx in range(len(args)): + if id(args[idx]) == id(output): + return _DispatchCacheEntry( + inplace_idx=idx, metadata=None, view_idx=None + ) + + # Otherwise, create an entry that records the output tensor's metadata. + view_idx = None + if func.is_view: + idxs = [i for i, t in enumerate(args) if isinstance(t, torch.Tensor)] + assert len(idxs) == 1 + view_idx = idxs[0] + + metadata = extract_tensor_metadata(output) + entry = _DispatchCacheEntry( + inplace_idx=None, metadata=metadata, view_idx=view_idx + ) + + # N.B.: Some checks for bypassing the cache would be performed on the + # output tensor synthesized from the cached metadata. As an optimization, + # we can synthesize a tensor here and do the checks on that instance. + # This approach keeps the (more frequent) cache-hit path as lightweight + # as possible. + synth_output = self._output_from_cache_entry(entry, func, args) + + # Make sure the dispatch_key_set from the synthesized output tensor will + # be the same. + synth_key_set = torch._C._dispatch_key_set(synth_output) + key_set = torch._C._dispatch_key_set(output) + if synth_key_set != key_set: + raise _BypassDispatchCache("dispatch_key_set mismatch") + + return entry + + def _output_from_cache_entry( + self, entry: _DispatchCacheEntry, func: OpOverload, args: Tuple[Any, ...] + ) -> FakeTensor: + """ + Create a new FakeTensor from the cache entry. + """ + if entry.inplace_idx is not None: + # This is an in-place op; return the aliased arg. + return args[entry.inplace_idx] + + # Synthesize a new FakeTensor with the cached metadata. + metadata = entry.metadata + assert not metadata.is_sparse + + empty = torch.empty_strided( + metadata.shape, + metadata.stride, + dtype=metadata.dtype, + layout=metadata.layout, + device="meta", + requires_grad=metadata.requires_grad, + ) + + if metadata.is_conj: + torch._C._set_conj(empty, True) + if metadata.is_neg: + torch._C._set_neg(empty, True) + + if func.is_view: + # For view ops, the storage should be the same as the tensor input. + storage = args[entry.view_idx].untyped_storage() + with in_kernel_invocation_manager(self): + empty.set_( + storage, metadata.storage_offset, metadata.shape, metadata.stride + ) + elif metadata.storage_offset != 0: + storage = empty.untyped_storage() + with in_kernel_invocation_manager(self): + empty.set_( + storage, metadata.storage_offset, metadata.shape, metadata.stride + ) + + return FakeTensor(self, empty, metadata.device) + + def _crosscheck_cache_output( + self, + output: FakeTensor, + func: OpOverload, + types: Tuple[Any, ...], + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + ): + """ + Helper to validate that the output synthesized from the cache matches + the output created by normal dispatch. + """ + try: + true_output = self._dispatch_impl(func, types, args, kwargs) + except Exception as e: + raise RuntimeError( + f"FakeTensor cache crosscheck failure: func={func}, " + f"args={args}, kwargs={kwargs}: Dispatch raised={e}" + ) from e + try: + assert_metadata_eq(assert_eq, true_output, output) + except Exception as e: + raise RuntimeError( + f"FakeTensor cache crosscheck failure: func={func}, " + f"args={args}, kwargs={kwargs}" + ) from e + + def dispatch(self, func, types, args=(), kwargs=None): + kwargs = kwargs or {} + with no_dispatch(): + log.debug("%s %s %s", func, args, kwargs) + + if func in _DISPATCH_META_HANDLERS: + return _DISPATCH_META_HANDLERS[func](args) + + if log.getEffectiveLevel() <= logging.DEBUG: + log.debug( + "%sFakeTensorMode.__torch_dispatch__: %s", " " * RECURSION_COUNT, func + ) + # NOTE: incr is intentionally unused for a RAII pattern + incr = IncrementRecursionCount() + + # Some attribute queries that can be serviced directly + # See Note [is_coalesced is dispatched] + if func in _DISPATCH_HANDLE_DIRECTLY: + # NB: no_dispatch is ok here too, this func is very simple + with in_kernel_invocation_manager(self): + return func(*args, **kwargs) + + if self.cache_enabled: + return self._cached_dispatch_impl(func, types, args, kwargs) + else: + return self._dispatch_impl(func, types, args, kwargs) + + def _dispatch_impl(self, func, types, args, kwargs): + flat_args, args_spec = pytree.tree_flatten((args, kwargs)) + + flat_arg_fake_tensors = [ + t for t in flat_args if isinstance(t, FakeTensor) and self.is_our_fake(t) + ] + has_symbolic_sizes = any( + i._has_symbolic_sizes_strides for i in flat_arg_fake_tensors + ) or any(isinstance(a, torch.SymInt) for a in flat_args) + + converter = self.fake_tensor_converter + + def maybe_to_constant(t): + if isinstance(t, FakeTensor) and self.is_our_fake(t): + return t.constant + else: + return t + + # To constant propagate through these functions: + # 1, If this is a lift due to a torch.tensor call, + # the input tensor is guaranteed to be a + # constant, so we keep a copy of the original argument along so + # we can query it if we're asked to item() it at some later point. + # (Note that you can always call a lift fn manually, so we do + # have to check if there are any fake tensors!) + # 2, Some functions that allow Python numbers to bind to Tensors, e.g, torch.div + if (func in self.lift_fns and not flat_arg_fake_tensors) or ( + should_allow_numbers_as_tensors(func) + and not has_symbolic_sizes + and not flat_arg_fake_tensors + ): + assert all( + t.constant is not None for t in flat_arg_fake_tensors + ), f"{func} should not have fake inputs without constants" + const_flat_args = [maybe_to_constant(a) for a in flat_args] + const_args, const_kwargs = pytree.tree_unflatten(const_flat_args, args_spec) + out = func(*const_args, **const_kwargs) + if type(out) is torch.Tensor and self.may_turn_const(out): + # NB: not in_kernel_invocation_manager because we're doing real + # compute here + # NB: no_dispatch() here is VERY DANGEROUS (like, segfault + # dangerous) if this is actually a wrapper subclass tensor, + # therefore the exact type test above + with no_dispatch(): + out = out.clone() + return converter(self, out, make_constant=True) + + # See [subclass inputs] below + # NB: If you're seeing a mysterious infinite loop involving fake + # tensor, it might be related to this line. Though I'm not sure + # how you'll know to read this comment, as this line won't show up + # in the stack trace. + unrecognized_types = self.check_for_subclass(flat_args) + if unrecognized_types: + not_implemented_log.debug( + "FakeTensorMode unrecognized subclass(es): %s", unrecognized_types + ) + return NotImplemented + + # if we are in the dispatch mode, we will enter this function even if the inputs + # are not FakeTensors. For now, throw if any non-Fake Tensor inputs + # and just support constructors. + + # this is generated from torch.tensor(), which does not use the + # dispatcher, to allow wrapper subclasses to wrap the new tensor + if func in self.lift_fns: + assert len(kwargs) == 0 and len(args) == 1, f"{args} {kwargs}" + + if type(args[0]) is torch.Tensor: + return converter(self, args[0]) + + # Recompute flat_arg_fake_tensors here again in case some of the inputs + # were real tensors and fakified in validate_and_convert_non_fake_tensors + (flat_args, flat_arg_fake_tensors) = self.validate_and_convert_non_fake_tensors( + func, converter, flat_args, args_spec + ) + del args, kwargs # Invalidated + + # The current constant handling only support tracing systems + # (aot autograd, torchdynamo) where each operation is run consecutively. + # Because each operation is run in order, we can trace out and support + # sequences like: x = torch.tensor(0.); y = x.add_(1) + # Whenver a constant is written to but with inputs that cannot be evaluated + # statically, such as random_(), we invalidate all constants that alias the input + # We will rely on functionalization for use of fake tensors constants as persistent + # objects on an FX Graph. + + # We dispatch size/stride/numel on the FakeTensor not its constant, so bail on inplace_view + all_constant = all(e.constant is not None for e in flat_arg_fake_tensors) + if ( + torch.Tag.nondeterministic_seeded not in func.tags + and torch.Tag.inplace_view not in func.tags + and all_constant + and len(flat_arg_fake_tensors) != 0 + and not has_symbolic_sizes + ): + const_flat_args = [maybe_to_constant(a) for a in flat_args] + const_args, const_kwargs = pytree.tree_unflatten(const_flat_args, args_spec) + + # NB: not in_kernel_invocation_manager(self) as we want to do REAL + # compute + with no_dispatch(): + out = func(*const_args, **const_kwargs) + + flat_out = pytree.tree_leaves(out) + flat_out_tensors = [t for t in flat_out if isinstance(t, torch.Tensor)] + all_constant = all(self.may_turn_const(t) for t in flat_out_tensors) + + if all_constant: + return pytree.tree_map_only( + torch.Tensor, + lambda t: converter(self, t, make_constant=True), + out, + ) + + # we weren't able to turn outputs to constants, + # so invalidate all constants that might be aliases of the outputs + for ten in flat_out_tensors: + converter.invalidate_constant_aliases(ten) + + # we are falling through to running non constant tensors, any input constant that + # is written to must be invalidated + args, kwargs = pytree.tree_unflatten(flat_args, args_spec) + self.invalidate_written_to_constants(func, flat_arg_fake_tensors, args, kwargs) + + # Try for fastpath + if has_symbolic_sizes: + fast_impl = get_fast_op_impls().get(func) + if fast_impl is not None: + return fast_impl(self, *args, **kwargs) + + # If there's a Python meta, prefer that over the decomposition + from torch._decomp import meta_table as meta_table + + if func not in meta_table and not self.cpp_meta_supports_symint(func): + from torch._decomp import decomposition_table + + # Prefer Python decompositions over C++ ones + if func in decomposition_table and ( + has_symbolic_sizes + or ( + # TODO: Remove these exclusions, so that we can remove + # this leg entirely + torch_decomp_decompositions(func) + and all(not e.is_sparse for e in flat_arg_fake_tensors) + ) + ): + with self: + return decomposition_table[func](*args, **kwargs) + + with self: + # Decomposes CompositeImplicitAutograd ops + r = func.decompose(*args, **kwargs) + if r is not NotImplemented: + return r + + # prims already wrap FakeTensor inputs to FakeTensor outputs + # and do device logic, we dont need do anything but run them + # and ensure that Meta kernels are dispatched to (see) + # Fake Tensor Dispatch Keys + # TODO - we should be use the prim aten impl + # TODO - fix prims complex ops + if ( + "prims::" in func._schema.name + and hasattr(func, "prim_meta_impl") + and not stride_incorrect_op(func) + ): + with self: + return func.prim_meta_impl(*args, **kwargs) + + # Users can register FakeTensor rules for custom operators + # Call them if they exist. + maybe_abstract_impl = torch._library.simple_registry.singleton.find( + func.name() + ).abstract_impl.kernel + if maybe_abstract_impl: + ctx = torch._library.abstract_impl.AbstractImplCtx(self.shape_env, func) + with torch._library.abstract_impl.set_ctx_getter(lambda: ctx), self: + result = maybe_abstract_impl(*args, **kwargs) + return result + + # special handling for funcs registered through `register_op_impl`, + # e.g., manipulating args on constructor calls to construct meta tensors + # and then afterwards wrapping them to a FakeTensor + for run_impl_check, op_impl in op_implementations_checks: + if run_impl_check(func): + op_impl_out = op_impl(self, func, *args, **kwargs) + if op_impl_out != NotImplemented: + return op_impl_out + + def maybe_run_unsafe_fallback(error=None): + # We infer the meta of a custom ops that return None to just + # return None. custom ops are not allowed to mutate metadata + # of their inputs, so this is safe. + if can_generate_trivial_abstract_impl(func): + return None + # no meta kernel registered, fallback to kernel for the device + if has_symbolic_sizes or not self.can_run_unsafe_fallback(func): + raise UnsupportedOperatorException(func) + if error is None: + error = UnsupportedOperatorException(func) + return run_fallback_kernel(self, func, flat_args, args_spec, error) + + # Optimization: If there is no Meta kernel, it takes a surprisingly long + # amount of time to catch the NotImplementedError, so we check it here. + if not has_meta(func): + return maybe_run_unsafe_fallback() + + # run kernel registered to meta for func, which include + # python meta registrations, prims, decomps, and c++ meta fns (structured kernels) + # It's possible that the kernel will return NotImplementedError + try: + with in_kernel_invocation_manager(self): + r = func(*args, **kwargs) + except NotImplementedError as not_implemented_error: + return maybe_run_unsafe_fallback(not_implemented_error) + + return self.wrap_meta_outputs_with_default_device_logic( + r, func, flat_args, device=kwargs.get("device") + ) + + # WARNING: DO NOT add any additional namespaces/operators here if they refer to operators + # outside of the pytorch/pytorch library! Any pre-existing things here + # are either in the pytorch/pytorch library or have been grandfathered in. + # The fallback does not always work and MAY CRASH and emit unreadable error messages + # so it should not be allowed by default. + _can_run_unsafe_fallback_allowed_namespaces = ordered_set( + "debugprims", + "prims", + "aten", + "xla", + "vision", + "torchtext", + "torchaudio", + "quantized", + ) + + def can_run_unsafe_fallback(self, func: OpOverload): + if not self.allow_fallback_kernels: + return False + # It's OK to try the fallback for built-in ops (e.g. aten, prims) + # because we control and test these but the fallback leads to unexpected behavior + # in user-defined custom ops + return ( + func.namespace in self._can_run_unsafe_fallback_allowed_namespaces + or func.name() == "fbgemm::gmm" + ) + + # [subclass inputs] + # Suppose we enable fake tensor mode. This means that fake tensor + # mode will run first. But what if we do an operation that + # involves a tensor subclass that will desugar into normal tensor + # operations? Without returning NotImplemented, fake tensor mode will run first, + # decide that a conversion was made (since there was a non fake + # tensor argument), and report an error that converting non + # fake tensor is not supported. What we actually wanted to happen + # was to give the subclass a chance to figure out what it wants to + # before erroring out. Returning NotImplemented here allows this. + def check_for_subclass(self, flat_args): + def check(x): + return ( + isinstance(x, torch.Tensor) + and not isinstance(x, FakeTensor) + and type(x) is not torch.Tensor + and type(x) is not torch.nn.Parameter + ) + + return [type(x) for x in flat_args if check(x)] + + def validate_and_convert_non_fake_tensors( + self, func, converter, flat_args, args_spec + ): + """ + Checks if the list of tensors are fake tensors. + If not, try to convert them to fake tensors. + Returns the original args, kwargs, and a flattened list of (args, kwargs) that are fake tensors. + """ + flat_arg_fake_tensors = [] + + def validate(x): + if not isinstance(x, torch.Tensor): + return x + + nonlocal flat_arg_fake_tensors + if not self.is_our_fake(x): + if torch.Tag.inplace_view in func.tags: + args, kwargs = pytree.tree_unflatten(flat_args, args_spec) + raise Exception( + f"Can't call metadata mutating ops on non-Fake Tensor inputs. Found in {render_call(func, args, kwargs)}" + ) + if not self.allow_non_fake_inputs: + if isinstance(x, FakeTensor) and x.fake_mode is not self: + raise AssertionError("Mixing fake modes NYI") + args, kwargs = pytree.tree_unflatten(flat_args, args_spec) + raise Exception( + f"Please convert all Tensors to FakeTensors first or instantiate FakeTensorMode " + f"with 'allow_non_fake_inputs'. Found in {render_call(func, args, kwargs)}" + ) + + x = converter(self, x) + + flat_arg_fake_tensors.append(x) + return x + + validated_args = [validate(a) for a in flat_args] + return validated_args, flat_arg_fake_tensors + + def wrap_meta_outputs_with_default_device_logic(self, r, func, flat_args, device): + converter = self.fake_tensor_converter + + # Lazily initialized, in case there are no tensor returns + common_device = None + has_scalar_only_inputs = False + + def wrap(e): + nonlocal common_device + nonlocal has_scalar_only_inputs + + if isinstance(e, torch.Tensor) and common_device is None: + ( + common_device, + has_scalar_only_inputs, + ) = FakeTensor._find_common_device(func, flat_args) + + if self.is_our_fake(e): + torch._check( + e.device == common_device, + lambda: f"FakeTensor is wrapped to wrong device, found {e.device}, expected {common_device}", + ) + + if ( + isinstance(e, torch.Tensor) + and not self.is_our_fake(e) + and converter is not None + ): + if has_scalar_only_inputs: + # Under FakeTensorMode, op accepts scalar only inputs, such as aten.add/sub/mul/div, + # returns a real scalar tensor on CPU. See TensorMeta() in _prims/__init__.py for details. + # We thus directly convert real tensor to fake tensor. + return converter(self, e) + else: + return converter.from_meta_and_device( + self, e, device or common_device + ) + else: + return e + + return tree_map(wrap, r) + + _cpp_meta_supports_symint = ordered_set( + aten.empty.memory_format, + aten.empty_strided.default, + aten.as_strided_scatter.default, + aten.as_strided.default, + aten.as_strided_.default, + aten.zeros.default, + aten.detach.default, + aten.view_as_real.default, + aten.view_as_complex.default, + aten.set_.source_Storage_storage_offset, + aten._sparse_coo_tensor_with_dims_and_tensors.default, + ) + + def cpp_meta_supports_symint(self, func): + if torch.Tag.view_copy in func.tags: + return True + return func in self._cpp_meta_supports_symint + + lift_fns = ordered_set(aten.lift_fresh.default, aten.lift_fresh_copy.default) + + def may_turn_const(self, t): + return ( + t.numel() <= CONSTANT_NUMEL_LIMIT + and not t.is_sparse + and not self.is_our_fake(t) + and not t.device.type == "meta" + ) + + def invalidate_written_to_constants( + self, func, flat_arg_fake_tensors, args, kwargs + ): + any_constant = any(e.constant is not None for e in flat_arg_fake_tensors) + schema_info = get_schema_info(func) + if any_constant and schema_info.is_mutable(): + _, new_kwargs = normalize_function( + func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True + ) + for k, v in new_kwargs.items(): + k = k if (k != "input" or schema_info.has_argument(k)) else "self" + if ( + self.is_our_fake(v) + and schema_info.is_mutable(k) + and v.constant is not None + ): + self.fake_tensor_converter.invalidate_constant_aliases(v.constant) + + def from_tensor( + self, + tensor, + *, + static_shapes=None, + source: Optional[Source] = None, + symbolic_context=None, + # Setting this flag will force FakeTensorMode to return `None` if attempting to convert a tensor we have not + # seen before. + memoized_only=False, + ): + shape_env = self.shape_env + if static_shapes is None: + static_shapes = self.static_shapes + if static_shapes: + assert ( + symbolic_context is None + ), "cannot set both static_shapes and symbolic_context" + shape_env = None + # see note [Tensor Fakification and Symbol Caching] + if not symbolic_context and not source and not static_shapes: + if tracing_context := torch._guards.TracingContext.try_get(): + if tensor in tracing_context.tensor_to_context: + symbolic_context = tracing_context.tensor_to_context[tensor] + source = symbolic_context.tensor_source + return self.fake_tensor_converter( + self, + tensor, + shape_env=shape_env, + source=source, + symbolic_context=symbolic_context, + memoized_only=memoized_only, + ) + + +# NB: returns fake tensors +def run_fallback_kernel( + fake_mode, func, flat_args, args_spec, orig_not_implemented_exception +): + # these should all be supported, just to be safe + # avoid fallback for operators which inplace modify metadata + # because the input fake tensors would be umodified + if torch.Tag.inplace_view in func.tags: + raise orig_not_implemented_exception + + inp_impls = {} + + # Don't use in_kernel_invocation_manager(fake_mode) as we want to do + # REAL compute (not with meta device) + with no_dispatch(): + + def to_real_tensor(e): + if fake_mode.is_our_fake(e): + out = torch.zeros_like(e, device=e.fake_device) + if e.is_sparse: + out._coalesced_(e.is_coalesced()) + inp_impls[id(out)] = e + return out + return e + + flat_args = [to_real_tensor(a) for a in flat_args] + args, kwargs = pytree.tree_unflatten(flat_args, args_spec) + + r = func(*args, **kwargs) + + tensor_impls = set() + storages = set() + + for e in flat_args: + if isinstance(e, torch.Tensor): + if not e.is_sparse: + storages.add(e._typed_storage()._cdata) + + # TODO: also check metadata change on inputs + # proper aliasing/metadata relationship between outputs and inputs will + # not be set up, bc of conversion to device, unless we can reuse an + # input impl + + def map_out(e): + if id(e) not in inp_impls and ( + isinstance(e, torch.Tensor) + and not e.is_sparse + and e._typed_storage()._cdata in storages + ): + raise orig_not_implemented_exception + + if isinstance(e, torch.Tensor): + if id(e) in inp_impls: + return inp_impls[id(e)] + else: + return fake_mode.fake_tensor_converter(fake_mode, e) + else: + return e + + return pytree.tree_map(map_out, r) + + +def can_generate_trivial_abstract_impl(op: torch._ops.OpOverload) -> bool: + assert isinstance(op, torch._ops.OpOverload) + if torch._library.utils.is_builtin(op): + # We control the built-ins. These may (in rare cases) + # do input metadata mutation (which we have banned on custom ops) + return False + schema = op._schema + # It's suspicious if the op is not mutable but returns nothing, so we return False out of an abundance of caution + if not schema.is_mutable: + return False + if len(schema.returns) > 0: + return False + # If the op returns nothing, then it has a trivial abstract impl. + return True + + +# Just for use to allow copying a module to fake tensors, +# does not apply elsewhere +class FakeCopyMode(TorchFunctionMode): + def __init__(self, fake_mode): + self.fake_mode = fake_mode + + def __torch_function__(self, func, types, args=(), kwargs=None): + kwargs = kwargs if kwargs else {} + + # clone will get called in Parameter deepcopy + if func == torch._C.TensorBase.clone: + return func( + self.fake_mode.from_tensor(args[0], static_shapes=True), **kwargs + ) + elif func == torch.Tensor.__deepcopy__: + assert len(args) == 2 and len(kwargs) == 0 + tensor, memo = args + + if id(tensor) in memo: + return memo[id(tensor)] + + out = self.fake_mode.from_tensor(tensor, static_shapes=True) + memo[id(tensor)] = out + return out + else: + with torch._C.DisableTorchFunctionSubclass(): + return func(*args, **kwargs) + + +def _device_handler(args): + # NB: Don't use is_our_fake, just serve the fake information + # as is. Notice we don't use 'self'; we use args[0].fake_mode + # because they may not be the same. It would also be possible + # to return NotImplemented here, in which case the FakeTensor + # handler on args[0] would handle it, but we're being nice and + # short-circuiting quickly. + assert len(args) == 1 and isinstance(args[0], FakeTensor) + if args[0].fake_mode.in_kernel_invocation: + return torch.device("meta") + else: + return args[0].fake_device + + +_DISPATCH_META_HANDLERS = { + torch.ops.prim.device.default: _device_handler, + torch.ops.aten.size.default: lambda args: tuple(int(s) for s in args[0].size()), + torch.ops.aten.stride.default: lambda args: tuple(int(s) for s in args[0].stride()), + torch.ops.aten.storage_offset.default: lambda args: int(args[0].storage_offset()), +} + +_DISPATCH_HANDLE_DIRECTLY = ordered_set( + torch.ops.aten.is_coalesced.default, + torch.ops.aten.dense_dim.default, + torch.ops.aten.sparse_dim.default, +) + +from torch._subclasses.fake_impls import ( # noqa: F401 + _device_not_kwarg_ops, # noqa: F401 + _is_tensor_constructor, # noqa: F401 + _like_tensor_constructors, # noqa: F401 + contains_tensor_types, # noqa: F401 + get_fast_op_impls, + has_meta, + op_implementations_checks, + stride_incorrect_op, +) diff --git a/venv/lib/python3.10/site-packages/torch/_subclasses/fake_utils.py b/venv/lib/python3.10/site-packages/torch/_subclasses/fake_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ffe62842e456559db118ecb00163266fa1fd399d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_subclasses/fake_utils.py @@ -0,0 +1,190 @@ +# mypy: ignore-errors + +import functools +import warnings +from typing import Callable, Union + +import torch +import torch.utils._pytree as pytree +from torch._ops import OpOverload +from torch._subclasses.fake_tensor import ( + FakeTensorMode, + tree_flatten_only, + UnsupportedFakeTensorException, +) +from torch.utils._python_dispatch import TorchDispatchMode + + +aten = torch._ops.ops.aten + + +def outputs_alias_inputs(outputs, inputs): + input_storages = { + inp._typed_storage()._cdata + for inp in tree_flatten_only(torch.Tensor, inputs) + if torch._C._has_storage(inp) + } + return any( + torch._C._has_storage(out) and out._typed_storage()._cdata in input_storages + for out in tree_flatten_only(torch.Tensor, outputs) + ) + + +def outputs_are_inputs(outputs, inputs): + input_ids = {id(inp) for inp in tree_flatten_only(torch.Tensor, inputs)} + return any(id(out) in input_ids for out in tree_flatten_only(torch.Tensor, outputs)) + + +def output_alias_each_other(outputs): + storages = set() + for out in tree_flatten_only(torch.Tensor, outputs): + if not torch._C._has_storage(out): + continue + stor = out._typed_storage()._cdata + if stor in storages: + return True + storages.add(stor) + return False + + +def is_sdpa_error(func, idx, e): + if ( + ( + func is aten._scaled_dot_product_flash_attention.default + or func is aten._flash_attention_forward.default + ) + and idx in (6, 7) + and "Devices" in repr(e) + ): + return True + if ( + ( + func is aten._scaled_dot_product_efficient_attention.default + or func is aten._efficient_attention_forward.default + ) + and idx in (2, 3) + and "Devices" in repr(e) + ): + return True + return False + + +class CrossRefFakeMode(TorchDispatchMode): + def __init__( + self, + ignore_op_fn: Union[Callable[[OpOverload], bool], None] = None, + *, + check_strides=True, + check_aliasing=True, + ): + self.ignore_op_fn = ( + ignore_op_fn if ignore_op_fn is not None else lambda fn: False + ) + self.check_strides = check_strides + self.check_aliasing = check_aliasing + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + kwargs = kwargs or {} + + fake_r = None + + # empty_like excluded for now due to sparse complex + # aten._to_dense.default this one is getting called with csc + if ( + func + not in ( + aten.lift_fresh.default, + aten.lift_fresh_copy.default, + aten.set_.source_Storage_storage_offset, + ) + and not self.ignore_op_fn(func) + and torch.Tag.dynamic_output_shape not in func.tags + and torch.Tag.inplace_view not in func.tags + and torch.Tag.data_dependent_output not in func.tags + ): + # Do not import symbolic_shapes at the top of the module as it imports sympy and that's slow + from torch.fx.experimental.symbolic_shapes import ShapeEnv + + try: + # TODO: enable_python_dispatcher() here + with FakeTensorMode(shape_env=ShapeEnv()) as fake_mode: + fake_args, fake_kwargs = pytree.tree_map_only( + torch.Tensor, + functools.partial(fake_mode.from_tensor, static_shapes=True), + (args, kwargs), + ) + with warnings.catch_warnings(): + fake_r = func(*fake_args, **fake_kwargs) + except UnsupportedFakeTensorException: + pass + + context = ( + f"When comparing the output of {func} on FakeTensor and concrete Tensors, " + f"found" + ) + r = func(*args, **kwargs) + if fake_r is not None: + r_flat = pytree.tree_leaves(r) + f_flat = pytree.tree_leaves(fake_r) + assert len(f_flat) == len( + r_flat + ), f"{context} mismatch in number of returns {len(f_flat)} != {len(r_flat)}" + + if self.check_aliasing: + r_aliasing = outputs_alias_inputs(r, (args, kwargs)) + f_aliasing = outputs_alias_inputs(fake_r, (fake_args, fake_kwargs)) + assert ( + r_aliasing == f_aliasing + ), f"{context} mismatch in outputs_alias_inputs check {f_aliasing} != {r_aliasing}" + + r_identity_eq = outputs_are_inputs(r, (args, kwargs)) + f_identity_eq = outputs_are_inputs(fake_r, (fake_args, fake_kwargs)) + assert ( + r_identity_eq == f_identity_eq + ), f"{context} mismatch in outputs_are_inputs check {f_identity_eq} != {r_identity_eq}" + + r_output_alias_each_other = output_alias_each_other(r) + f_output_alias_each_other = output_alias_each_other(fake_r) + assert r_output_alias_each_other == f_output_alias_each_other, ( + f"{context} mismatch in outputs_alias_each_other check " + f"{f_output_alias_each_other} != {r_output_alias_each_other}" + ) + + for idx, (r_out, fake_out) in enumerate( + zip(pytree.tree_leaves(r), pytree.tree_leaves(fake_r)) + ): + r_is_ten = isinstance(r_out, torch.Tensor) + assert r_is_ten == isinstance( + fake_out, torch.Tensor + ), f"{context} mismatched number of tensor outputs" + if r_is_ten: + assert r_out.requires_grad == fake_out.requires_grad, ( + f"{context} mismatched requires_grad-ness of outputs. " + f"This usually means that you have added autograd support " + f"for your operator at a dispatch key other than Autograd, " + f"which will lead to problems" + ) + if torch._C._has_storage(r_out): + r_offset = r_out.storage_offset() + f_offset = fake_out.storage_offset() + assert ( + r_offset == f_offset + ), f"{context} mismatched storage offset" + + try: + torch._prims.utils.compare_tensor_meta( + r_out, + fake_out, + check_strides=self.check_strides, + allow_rhs_unbacked=True, + ) + except Exception as e: + if is_sdpa_error(func, idx, e): + continue + error_message = ( + f"{context} mismatched tensor metadata: {e}" + if len(r_flat) == 1 + else f"{context} mismatched tensor metadata for output[{idx}]: {e}" + ) + raise RuntimeError(error_message) from e + return r diff --git a/venv/lib/python3.10/site-packages/torch/_subclasses/functional_tensor.py b/venv/lib/python3.10/site-packages/torch/_subclasses/functional_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..ce346b81401ac95051e072b414c840afd8d371ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_subclasses/functional_tensor.py @@ -0,0 +1,653 @@ +import contextlib +from abc import ABC, abstractmethod +from typing import Any, Callable, ContextManager, Dict, Optional, Tuple + +import torch +import torch.utils._pytree as pytree +from torch._C import _functionalization_reapply_views_tls as _reapply_views +from torch._ops import _get_dispatch_mode_pre_dispatch +from torch.utils._python_dispatch import ( + _detect_functional_mode, + _disable_infra_mode, + return_and_correct_aliasing, + TorchDispatchMode, +) + +not_implemented_log = torch._logging.getArtifactLogger(__name__, "not_implemented") + + +class FunctionalTensor(torch.Tensor): + """ + Functional tensors represent tensors that will remove mutations + from a program. If you perform a mutable operation on a functional tensor, + it will re-dispatch to the functional variant of that operation. + + Historically, functionalization is implemented in C++ in the dispatcher. + This class is a lightweight python shim around the C++ functionalization logic. + + FunctionalTensor is required to be used with a corresponding + FunctionalTensormode active, because it relies + on using the mode for dispatch (which can properly handle factory functions). + """ + + elem: torch.Tensor + # Indicates to our torch_dispatch dispatching infra that + # this is an "infra" mode with lower dispatching precedence. + _mode_key = torch._C._TorchDispatchModeKey.FUNCTIONAL + + # Note: The reason we add these extra keys to our FunctionalTensor subclass + # is to mirror the behavior of C++ functionalization (we can choose to change this + # later, as long as it doesn't break anything). + # FunctionalTensorWrapper copies **all** dispatch keys from the inner tensor + # to the wrapper, excluding functorch and python dispatch keys. + # Here I'm trying to re-use the keyset the functorch wrapper subclasses copy, + # except that they don't include ZeroTensor so I'm manually adding it in. + _extra_dispatch_keys = torch._C._additional_keys_to_prop_for_wrapper_tensors.add( + torch._C.DispatchKey.ZeroTensor + ) + + # These are all aten ops that correspond to metadata queries. + # We want FunctionalTensor to be able to handle them directly. + metadata_fns = [ + torch.ops.aten.is_contiguous.default, # type: ignore[has-type] + torch.ops.aten.is_contiguous.memory_format, # type: ignore[has-type] + torch.ops.aten.is_strides_like_format.default, # type: ignore[has-type] + torch.ops.aten.is_non_overlapping_and_dense.default, # type: ignore[has-type] + torch.ops.aten.size.default, # type: ignore[has-type] + torch.ops.aten.sym_size.default, # type: ignore[has-type] + torch.ops.aten.stride.default, # type: ignore[has-type] + torch.ops.aten.sym_stride.default, # type: ignore[has-type] + torch.ops.aten.storage_offset.default, # type: ignore[has-type] + torch.ops.aten.sym_storage_offset.default, # type: ignore[has-type] + torch.ops.aten.numel.default, # type: ignore[has-type] + torch.ops.aten.sym_numel.default, # type: ignore[has-type] + torch.ops.aten.dim.default, # type: ignore[has-type] + torch.ops.prim.device.default, # type: ignore[has-type] + ] + + # These are ops that claim to be functional, but actually are maybe-mutating/maybe-aliasing + # TODO (tmanlaibaatar) make it a tag + maybe_aliasing_or_mutating_ops = [ + torch.ops.aten.dropout.default, # type: ignore[has-type] + torch.ops.aten.batch_norm.default, # type: ignore[has-type] + torch.ops.aten.native_batch_norm.default, # type: ignore[has-type] + torch.ops.aten._batch_norm_impl_index.default, # type: ignore[has-type] + torch.ops.aten.cudnn_batch_norm.default, # type: ignore[has-type] + torch.ops.aten.miopen_batch_norm.default, # type: ignore[has-type] + ] + + def __new__(cls, elem): + assert torch._is_functional_tensor(elem) + + # In general, we'd like our functional tensor subclass to only be in charge of functionalization, + # and defer to the inner subclass for all other functionality. + # Example: If our inner tensor is a ZeroTensor, we would want to defer running the ZeroTensor fallback + # until after we redispatch to our inner ZeroTensor. + # However, there are a few keys that we need to mirror between the inner and outer tensors. + # Conjugate + # Negative + # Why? These keys are used to test metadata queries, like `.is_conj()` and `.is_neg()`. + # We **need** calls to is_conj() to return the same thing on the outer and inner tensors, + # Because user code / framework code that branches like so needs to do the same thing + # when it sees the outer FunctionalTensor: + # if (x.is_conj()) { + # return at::view_as_real(x.resolve_conj()); + # } else { + # return at::view_as_real(x); + # } + extra_dispatch_keys = ( + FunctionalTensor._extra_dispatch_keys & torch._C._dispatch_keys(elem) + ) + + out = torch.Tensor._make_wrapper_subclass( # type: ignore[arg-type, attr-defined] + # TODO: right now, _make_wrapper_subclass's dynamic shape interaction is not great. + # Calling the overload that has kwargs causes us to go down the first overload path, + # which will **always** specialize sizes. + # We should probably eventually fix this so that the first overload can just handle dynamic shapes. + cls, + elem.shape, # sizes + elem.stride(), # strides + elem.storage_offset(), # storage_offset + None, # memory_format + elem.dtype, # dtype + elem.layout, # layout + elem.device, # device + False, # pin_memory + elem.requires_grad, # requires_grad + "sizes", # dispatch_sizes_strides_policy + False, # dispatch_device + False, # dispatch_layout + extra_dispatch_keys, # _extra_dispatch_keys + ) + out.elem = elem + return out + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + unrecognized_types = [ + t + for t in types + if t not in [torch.Tensor, torch._subclasses.FakeTensor, FunctionalTensor] + ] + if unrecognized_types: + not_implemented_log.debug( + "FunctionalTensor unrecognized subclass(es): %s", unrecognized_types + ) + return NotImplemented + + if kwargs is None: + kwargs = {} + + # FunctionalTensor needs to plumb all metadata requests to the inner tensor. + # In theory we don't have to do this - but if we want to service metadata requests here, + # we need to carefully make sure all metadata is accurate (including metadata mutations) + if func in FunctionalTensor.metadata_fns: + # All metadata accesses should be plumbed to the inner tensor, that way we don't have to worry + # about the problem of keeping metadata in sync between the wrapper and inner tensor. + # This also alleviates us from having to manually handle metadata mutations on the wrapper. + assert len(kwargs) == 0 + if func in [ + torch.ops.aten.is_strides_like_format.default, + torch.ops.aten.is_contiguous.memory_format, + ]: + assert len(args) == 2 and isinstance(args[0], FunctionalTensor) + return func(args[0].elem, args[1]) + assert len(args) == 1 and isinstance(args[0], FunctionalTensor) + + return func(args[0].elem) + # Originally I tried to implement my subclass without giving it a torch_dispatch, but I gave up: + # - _make_wrapper_subclass requires a __torch_dispatch__ + # - If we want to use _make_subclass(), we have a problem: the subclass will share a TensorImpl with the inner tensor, + # which is of type FunctionalTensorWrapper! We explicitly do not want our wrapper to be a FunctionalTensorWrapper. + # - If we use the default tensor.__new__(), we have another problem: it returns inner_tensor.alias(), + # which causes every subclass created above autograd to have autograd view metadata + # (in addition to also being a FunctionalTensorWrapper). + raise RuntimeError( + "Attempting to use FunctionalTensor on its own. Instead, please use it with a corresponding FunctionalTensorMode()" + ) + + def __repr__(self): + return f"FunctionalTensor({repr(self.elem)})" + + @staticmethod + def to_functional(x): + # We will do the wrapping for the user. + assert not torch._is_functional_tensor(x) + # The only autograd metadata we care about on the FunctionalTensor is: + # - requires_grad (so autograd runs) + # - is_leaf (so that mutations on graph inputs that are not leaves are allowed by the autograd engine) + # this is handled by FunctionalTensor.to_functional + x_functional = torch._to_functional_tensor(x) + # Technically the FunctionalTensormode here is unnecessary, + # but it avoids spurious NotImplemented logs during `ProxyTorchDispatchMode` tracing. + # _mirror_autograd_meta_to queries tensor sizes, + # and otherwise the sym_size() call will go to the proxy mode before hitting + # FunctionalTensor.__torch_dispatch__ + + functional_mode = _detect_functional_mode() + assert functional_mode is not None + + with functional_mode: + torch._mirror_autograd_meta_to(x, x_functional) # type: ignore[attr-defined] + out = FunctionalTensor(x_functional) + torch._mirror_autograd_meta_to(x_functional, out) # type: ignore[attr-defined] + return out + + def from_functional(self): + torch._sync(self) + return torch._from_functional_tensor(self.elem) + + def replace_(self, output) -> None: + torch._functionalize_replace(self.elem, output) + + def commit_update(self) -> None: + torch._functionalize_commit_update(self.elem) + + def sync(self) -> None: + torch._functionalize_sync(self.elem) + + def mark_mutation_hidden_from_autograd(self) -> None: + torch._functionalize_mark_mutation_hidden_from_autograd(self.elem) + + def tolist(self) -> Any: + if self.elem.dim() == 0: + return self.elem.item() + elif self.elem.dim() == 1: + return [elem.item() for elem in self.elem] + else: + return [elem.tolist() for elem in self.elem] + + +class FunctionalTensorMode(TorchDispatchMode): + def __init__(self, pre_dispatch=False, export=False, _allow_token_discovery=False): + self.export = export + self.is_on_stack = False + self.enter_stack = [] + # Indicates to our torch_dispatch dispatching infra that + # this is an "infra" mode with lower dispatching precedence. + self._mode_key = torch._C._TorchDispatchModeKey.FUNCTIONAL + self.pre_dispatch = pre_dispatch + # This will be turned off later for pre-dispatch functionalization + self._dispatch_key = torch._C.DispatchKey.PreDispatch if pre_dispatch else None # type: ignore[attr-defined] + # Map of effect type (ex. _EffectType.ORDERED) to a token. The tokens help keep + # track of the ordering between side effectful operations. + self._tokens: Dict[Any, torch.Tensor] = {} + + # Functionalization runs twice in AOTAutograd, once in + # `run_functionalized_fw_and_collect_metadata` to collect metadata to + # see which tensors need to be functionalized and discover how many + # tokens we need, and another time in `make_fx` which does the actual + # tracing to replace ops with their functional variants and handling + # side-effectful ops. In the second stage there should be no token + # discovery. This flag distinguishes between the two stages. + self._allow_token_discovery = _allow_token_discovery + + # No-op if FunctionalTensorMode is already in use + def __enter__(self): + def _get_prev_mode(): + if self._dispatch_key == torch._C.DispatchKey.PreDispatch: + return _get_dispatch_mode_pre_dispatch( + torch._C._TorchDispatchModeKey.FUNCTIONAL + ) + return torch._C._get_dispatch_mode( + torch._C._TorchDispatchModeKey.FUNCTIONAL + ) + + if _get_prev_mode() is None: + self.enter_stack.append(True) + return super().__enter__() + else: + self.enter_stack.append(False) + return self + + def __exit__(self, a, b, c): + is_on_stack = self.enter_stack.pop() + if is_on_stack: + super().__exit__(a, b, c) + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + if kwargs is None: + kwargs = {} + + unrecognized_types = [ + t + for t in types + if not issubclass(t, torch._subclasses.FakeTensor) + and t not in [torch.Tensor, FunctionalTensor] + ] + if unrecognized_types: + not_implemented_log.debug( + "FunctionalTensor unrecognized subclass(es): %s", unrecognized_types + ) + return NotImplemented + + def _can_decompose(func): + # See https://github.com/pytorch/pytorch/pull/115258#issuecomment-1900755832 + # We never decompose dropout in export + if self.export and func == torch.ops.aten.dropout.default: + return False + # TODO (tmanlaibaatar) + # Eventually, we don't want to decompose any aten op at all + # but there is a safety and coverage gap that we need to close + # before that. + # + # (1) the "safety" is what we are risking with this PR + # (we are blindly taking every op that advertises as + # functional and sending it to the functional fallback. + # We risk silent correctness if we have an op that lies about its schema, + # that we didn't manually hardcode above) Therefore we always decompose them + # (2) the "not every composite inplace op has a functional variant" is a coverage gap, + # but not really a safety risk, since we'll loudly error when we try to generate + # functionalization kernels for these new (composite) inplace/view ops. But until we + # establish such gap more concretely, we still decompose them + if self._dispatch_key is not None: + # it is unsafe to not decompose ops that claim to be functional but actually aren't + if func in FunctionalTensor.maybe_aliasing_or_mutating_ops: + return True + # only decompose view or inplace mutating ops + alias_info = len( + [i for i in func._schema.arguments if i.alias_info is not None] + ) + return alias_info != 0 or func._schema.is_mutable + return True + + if ( + func not in FunctionalTensor.metadata_fns + and _can_decompose(func) + # Not all funcs from __torch_dispatch__ are actual dispatcher ops, + # e.g. prim.device + and torch._C._dispatch_has_kernel(func.name()) + ): + with self: + r = func.decompose(*args, **kwargs) + if r is not NotImplemented: + return r + + def assert_is_functional(x): + assert torch._is_functional_tensor(x) + + def wrap(x): + # Only wrap our outputs in subclasses if the inner functionalization call + # also wrapped outputs into FunctionalTensorWrappers. + # When can this happen? e.g. `torch.div(2, 2)` + assert not isinstance(x, FunctionalTensor) + if isinstance(x, torch.Tensor) and torch._is_functional_tensor(x): + return FunctionalTensor(x) + return x + + def unwrap(x): + return x.elem + + from torch._higher_order_ops.auto_functionalize import ( + can_auto_functionalize, + do_auto_functionalize, + ) + + if can_auto_functionalize( + func + ) and not torch._C._dispatch_has_kernel_for_dispatch_key( + func.name(), torch._C.DispatchKey.Functionalize + ): + if self.pre_dispatch: + raise NotImplementedError( + "Auto functionalization is not supported on pre-dispatch tracing" + ) + return do_auto_functionalize(func, args, kwargs) + + from torch._higher_order_ops.effects import handle_effects, has_effects + + if has_effects(func, args, kwargs): + assert not torch._C._dispatch_has_kernel_for_dispatch_key( + func.name(), torch._C.DispatchKey.Functionalize + ) + return handle_effects( + self._allow_token_discovery, self._tokens, func, args, kwargs + ) + + args_unwrapped, kwargs_unwrapped = pytree.tree_map_only( + FunctionalTensor, unwrap, (args, kwargs) + ) + + # Expectation: functionalization should not **already** be enabled above our mode. + # Why would that be bad? when we return a FunctionalTensor here, we don't want functionalization + # to run above this mode and further wrap that output in **another** C++ FunctionalTensorWrapper. + is_included = torch._C._dispatch_tls_is_dispatch_key_included( + torch._C.DispatchKey.Functionalize + ) + is_excluded = torch._C._dispatch_tls_is_dispatch_key_excluded( + torch._C.DispatchKey.Functionalize + ) + assert is_excluded or not is_included + include_to_set = ( + torch._C._dispatch_tls_local_include_set() + | torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize) + ) + exclude_to_set = ( + torch._C._dispatch_tls_local_exclude_set().remove( + torch._C.DispatchKey.Functionalize + ) + - FunctionalTensor._extra_dispatch_keys + ) + + # All we want to do here is re-use the existing C++ functionalization logic. + # This requires swizzling our TLS dispatch keys so that the Functionalize key is active. + with torch._C._ForceDispatchKeyGuard(include_to_set, exclude_to_set): + try: + # By default for python functionalization (for AOTAutograd), we reapply views. + old_apply_views = torch._functionalize_enable_reapply_views(True) # type: ignore[attr-defined] + + # Sometimes these functions cannot be directly dispatched to functionalize key + # because args are sometimes not functional tensors for some reason? + if func in FunctionalTensor.metadata_fns: + outs_unwrapped = func(*args_unwrapped, **kwargs_unwrapped) + outs_wrapped = pytree.tree_map_only( + torch.Tensor, wrap, outs_unwrapped + ) + else: + # When we dispatch to the C++ functionalization kernel, we might need to jump back to the + # PreDispatch mode stack afterwards, to handle any other PreDispatch modes underneath + # FunctionalTensorMode. If we call func() directly, we would need to exclude PreDispatch + # from the TLS in order to avoid infinite looping, but this would prevent us from coming + # back to PreDispatch later + outs_unwrapped = func._op_dk( + torch._C.DispatchKey.Functionalize, + *args_unwrapped, + **kwargs_unwrapped, + ) + # We don't allow any mutation on result of dropout + if self.export and func == torch.ops.aten.dropout.default: + torch._freeze_functional_tensor(outs_unwrapped) # type: ignore[attr-defined] + outs_wrapped = pytree.tree_map_only( + torch.Tensor, wrap, outs_unwrapped + ) + finally: + torch._disable_functionalization() + torch._functionalize_enable_reapply_views(old_apply_views) # type: ignore[attr-defined] + + is_included = torch._C._dispatch_tls_is_dispatch_key_included( + torch._C.DispatchKey.Functionalize + ) + is_excluded = torch._C._dispatch_tls_is_dispatch_key_excluded( + torch._C.DispatchKey.Functionalize + ) + assert is_excluded or not is_included + + if ( + # If no outputs are our functional subclass, then don't try to fix up aliasing + not any( + isinstance(x, FunctionalTensor) + for x in pytree.tree_leaves(outs_wrapped) + ) + # Since lift_fresh lifts its argument into a functional tensor, we can skip the + # aliasing correction step. Otherwise, we would be setting the storage of a + # lifted tensor to that of an unlifted tensor. + # Ref: https://github.com/pytorch/pytorch/issues/111506 + or func == torch.ops.aten.lift_fresh.default + ): + return outs_wrapped + # Wrapper tensor subclasses do not have correct aliasing info! Use this util to manually correct the output aliasing. + # inplace ops like `aten.add_()` are expected to return inputs **directly**, instead of creating fresh tensor objects. + # Use this util to figure out the right thing to return. + # If none of our inputs were wrapped, then we have no FunctionalTensor outputs that we need to fix up storages for. + return return_and_correct_aliasing(func, args, kwargs, outs_wrapped) + + +@contextlib.contextmanager +def disable_functional_mode(): + return _disable_infra_mode(torch._C._TorchDispatchModeKey.FUNCTIONAL) + + +# This is similar to torch.func.functionalize, but: +# - It uses FunctionalTensorMode, and FunctionalTensor (a python subclass). +# One important advantage to using this mode is that it will let us +# run functionalization underneath __torch_dispatch__, +# which we need in AOTAutograd. +# - Doing so means that it does not automatically compose with other +# functorch transforms, since these transforms always run above __torch_dispatch__. +# That's why this util lives here, and not in functorch. +def dispatch_functionalize(func, mode: FunctionalTensorMode = FunctionalTensorMode()): + # TODO: pull these from aot autograd + def to_fun(t): + if isinstance(t, torch.Tensor): + return FunctionalTensor.to_functional(t) + return t + + def from_fun(t): + if not isinstance(t, FunctionalTensor): + # quick sanity assert + if isinstance(t, torch.Tensor): + assert not torch._is_functional_tensor(t) + return t + torch._sync(t) + return torch._from_functional_tensor(t.elem) + + def inner(*args, **kwargs): + disable_above = torch._C._ExcludeDispatchKeyGuard( + torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize) + ) + with disable_above, mode: + func_args = pytree.tree_map_only(torch.Tensor, to_fun, args) + func_kwargs = pytree.tree_map_only(torch.Tensor, to_fun, kwargs) + func_outputs = func(*func_args, **func_kwargs) + outputs = pytree.tree_map_only(FunctionalTensor, from_fun, func_outputs) + + return outputs + + return inner + + +class BaseFunctionalizeAPI(ABC): + @abstractmethod + def wrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]: + pass + + @abstractmethod + def unwrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]: + pass + + @abstractmethod + def functionalize(self, inner_f: Callable) -> Callable: + pass + + @abstractmethod + def redispatch_to_next(self) -> ContextManager: + pass + + @abstractmethod + def replace(self, input_tensor, output_tensor) -> None: + pass + + @abstractmethod + def commit_update(self, tensor) -> None: + pass + + @abstractmethod + def sync(self, tensor) -> None: + pass + + @abstractmethod + def mark_mutation_hidden_from_autograd(self, tensor) -> None: + pass + + +class PythonFunctionalizeAPI(BaseFunctionalizeAPI): + def __init__( + self, mode: Optional[FunctionalTensorMode] = None, pre_dispatch: bool = False + ) -> None: + super().__init__() + self.mode = mode if mode else FunctionalTensorMode() + self.pre_dispatch = pre_dispatch + + def wrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]: + with self.mode: + return torch.utils._pytree.tree_map_only( + torch.Tensor, FunctionalTensor.to_functional, args + ) + + def unwrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]: + return torch.utils._pytree.tree_map_only( + FunctionalTensor, FunctionalTensor.from_functional, args + ) + + def functionalize(self, inner_f: Callable) -> Callable: + return dispatch_functionalize(inner_f, self.mode) + + def redispatch_to_next(self) -> ContextManager: + # [NOTE] We don't do anything here because at the time + # we exercise this path, we would have already popped the + # FunctionalTensorMode from mode stack. Since FunctionalTensorMode + # is now stateful, it is better to explicitly pass in correct mode + # directly instead of globally setting it. + return contextlib.nullcontext() + + def replace(self, input_tensor, output_tensor) -> None: + assert isinstance(input_tensor, FunctionalTensor) + assert not isinstance(output_tensor, FunctionalTensor) + input_tensor.replace_(output_tensor) + + def commit_update(self, tensor) -> None: + assert isinstance(tensor, FunctionalTensor) + tensor.commit_update() + + def sync(self, tensor) -> None: + assert isinstance(tensor, FunctionalTensor) + tensor.sync() + + def mark_mutation_hidden_from_autograd(self, tensor) -> None: + assert isinstance(tensor, FunctionalTensor) + tensor.mark_mutation_hidden_from_autograd() + + +class CppFunctionalizeAPI(BaseFunctionalizeAPI): + def wrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]: + from torch._functorch.eager_transforms import _wrap_all_tensors_to_functional + + return _wrap_all_tensors_to_functional(args, level=0) + + def unwrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]: + from torch._functorch.eager_transforms import ( + _unwrap_all_tensors_from_functional, + ) + + return _unwrap_all_tensors_from_functional(args, reapply_views=_reapply_views()) + + def functionalize(self, inner_f: Callable) -> Callable: + return torch.func.functionalize(inner_f) + + def redispatch_to_next(self) -> ContextManager: + return torch._C._ExcludeDispatchKeyGuard( + torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize) + ) + + def replace(self, input_tensor, output_tensor) -> None: + torch._functionalize_replace(input_tensor, output_tensor) + + def commit_update(self, tensor) -> None: + torch._functionalize_commit_update(tensor) + + def sync(self, tensor) -> None: + torch._functionalize_sync(tensor) + + def mark_mutation_hidden_from_autograd(self, tensor) -> None: + torch._functionalize_mark_mutation_hidden_from_autograd(tensor) + + +class FunctorchFunctionalizeAPI(BaseFunctionalizeAPI): + def __init__(self, interpreter): + self.interpreter = interpreter + + def wrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]: + from torch._functorch.eager_transforms import _wrap_all_tensors_to_functional + + return _wrap_all_tensors_to_functional(args, level=self.interpreter.level()) + + def unwrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]: + from torch._functorch.eager_transforms import ( + _unwrap_all_tensors_from_functional, + ) + + return _unwrap_all_tensors_from_functional( + args, reapply_views=self.interpreter.functionalize_add_back_views() + ) + + def functionalize(self, inner_f: Callable) -> Callable: + return torch.func.functionalize( + inner_f, + remove="mutations_and_views" + if self.interpreter.functionalize_add_back_views() + else "mutations", + ) + + def redispatch_to_next(self) -> ContextManager: + return self.interpreter.lower() + + def replace(self, input_tensor, output_tensor) -> None: + torch._functionalize_replace(input_tensor, output_tensor) + + def commit_update(self, tensor) -> None: + torch._functionalize_commit_update(tensor) + + def sync(self, tensor) -> None: + torch._functionalize_sync(tensor) + + def mark_mutation_hidden_from_autograd(self, tensor) -> None: + torch._functionalize_mark_mutation_hidden_from_autograd(tensor) diff --git a/venv/lib/python3.10/site-packages/torch/_subclasses/meta_utils.py b/venv/lib/python3.10/site-packages/torch/_subclasses/meta_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c5847d78bd44915f490dcb92230df58fb5f5099b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_subclasses/meta_utils.py @@ -0,0 +1,987 @@ +import contextlib +import warnings +import weakref +from typing import ContextManager, Dict, List, Optional, Tuple, TYPE_CHECKING + +import torch +from torch._C._functorch import ( + _add_batch_dim, + _unwrap_functional_tensor, + _wrap_functional_tensor, + current_level, + get_unwrapped, + is_batchedtensor, + is_functorch_wrapped_tensor, + is_gradtrackingtensor, + maybe_get_bdim, + maybe_get_level, + peek_interpreter_stack, + TransformType, +) +from torch._guards import Source + +from torch.multiprocessing.reductions import StorageWeakRef +from torch.utils._python_dispatch import ( + is_traceable_wrapper_subclass, + transform_subclass, +) +from torch.utils.weak import WeakIdRef + +if TYPE_CHECKING: + # Import the following modules during type checking to enable code intelligence features, + # Do not import unconditionally, as they import sympy and importing sympy is very slow + from torch.fx.experimental.symbolic_shapes import SymbolicContext + +DimList = List + + +def safe_is_leaf(t): + try: + return t.is_leaf + except RuntimeError: + # inference mode can trigger this + return False + + +def safe_grad(t): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "The .grad attribute of a Tensor") + return t.grad + + +def assert_eq(a, b): + assert a == b, f"{a} != {b}" + + +def assert_metadata_eq(assert_eq, m1, m2, *, skip_symbolic=False): + def go(m1, m2): + assert_eq(m1.dtype, m2.dtype) + if not skip_symbolic: + assert_eq(m1.shape, m2.shape) + assert_eq(m1.requires_grad, m2.requires_grad) + assert_eq(m1.is_leaf, m2.is_leaf) + assert_eq(m1.grad_fn is None, m2.grad_fn is None) + assert_eq(m1.is_sparse, m2.is_sparse) + assert_eq(m1.is_inference(), m2.is_inference()) + assert_eq(m1.is_conj(), m2.is_conj()) + assert_eq(m1.is_neg(), m2.is_neg()) + assert_eq(safe_grad(m1) is not None, safe_grad(m2) is not None) + if safe_grad(m1) is not None: + go(safe_grad(m1), safe_grad(m2)) + if m1.is_sparse: + assert_eq(m1.dense_dim(), m2.dense_dim()) + assert_eq(m1.sparse_dim(), m2.sparse_dim()) + assert_eq(m1.is_coalesced(), m2.is_coalesced()) + else: + if not skip_symbolic: + assert_eq(m1.stride(), m2.stride()) + assert_eq(m1.storage_offset(), m2.storage_offset()) + assert_eq(m1._is_view(), m2._is_view()) + if m1._is_view(): + go(m1._base, m2._base) + # TODO: test if is resizable (no direct query for this atm) + # TODO: audit AutogradMeta to see if it matches + # TODO: test forward AD + + return go(m1, m2) + + +def is_sparse_coo(t): + return isinstance(t, torch.Tensor) and t.layout is torch.sparse_coo + + +def is_sparse_compressed(t): + return isinstance(t, torch.Tensor) and t.layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + } + + +def is_sparse_any(t): + return is_sparse_coo(t) or is_sparse_compressed(t) + + +# This is a class for converting multiple tensors into meta tensors which +# share the same view/storage structure. The operation model is you allocate +# one of these, and then call it repeatedly on all the tensors you want to +# convert. It's important to use the same object for tensors you want to +# share storage because this is how we correlate shared storages to the same +# meta storages. This class will hold weak references to cached tenosrs +# and tensor storages. +class MetaConverter: + def __init__(self): + self.storage_memo = {} + self.tensor_memo: weakref.WeakValueDictionary = weakref.WeakValueDictionary() + self.maybe_storages_to_delete = [] + self.check_expired_frequency = 128 + self.check_expired_count = 0 + self.hit = 0 + self.miss = 0 + self.del_hook = None + self.arg_cnt = 0 + + def successful(self): + return self.hit > 0 and self.miss == 0 + + def check_for_expired_weak_storages(self): + new_li = [] + stor_to_delete = [] + for obj in self.maybe_storages_to_delete: + if not obj.expired(): + new_li.append(obj) + else: + stor_to_delete.append(obj) + for obj in stor_to_delete: + self.storage_memo.pop(obj, None) + self.maybe_storages_to_delete = new_li + + # if for some reason we have aquired many storages which have not expired + # even though a tensor with their storage has expired (aliasing or otherwise) + # check for expired storages less often so as to bound the amount of work we + # do checking for expired storages + self.check_expired_frequency = max( + self.check_expired_frequency, len(self.maybe_storages_to_delete) + ) + + def get_tensor_memo(self, t): + return self.tensor_memo.get(WeakIdRef(t), None) + + def set_tensor_memo(self, t, v): + # hold a weak ref to self, otherwise it will be kept alive + # by the del_ten closure + self_weak_ref = weakref.ref(self) + if is_sparse_any(t) or t.is_mkldnn or is_functorch_wrapped_tensor(t): + weak_st = None + else: + weak_st = StorageWeakRef(t._typed_storage()) + tensor_ref_key = WeakIdRef(t) + + def del_ten(): + # tensor outlives the converter + self_ref = self_weak_ref() + if self_ref is None: + return + # on shutdown, tensor_ref_key may not be in memo + self_ref.tensor_memo.pop(tensor_ref_key, None) + if weak_st and weak_st.expired(): + self_ref.storage_memo.pop(weak_st, None) + elif weak_st is not None: + # [expired-storages] + # NB: even though the tensor has died, + # the deallocation of its storage can take longer, + # even when the storage has no other uses/views. + # In this case, the StorageWeakRef object will be kept alive + # longer than it needs to be, however the storage itself + # will be deallocated. We retain the possibly dead storages + # and periodically check if any of them are expired and + # can be freed. + self_ref.maybe_storages_to_delete.append(weak_st) + + weakref.finalize(t, del_ten) + self.tensor_memo[tensor_ref_key] = v + + # NB: doesn't actually return a storage, because meta storage is + # not supported + def meta_storage(self, s, callback): + # NB: TypedStorage is freshly allocated and cannot be used as hash + # key index. + + # Use a Weak Ref to s in order to not leak memory + swr = StorageWeakRef(s) + if swr not in self.storage_memo: + self.storage_memo[swr] = callback( + lambda: torch.empty(s.size(), dtype=torch.uint8, device="meta") + ).untyped_storage() + return self.storage_memo[swr] + + # This function assumes that it's possible to do the conversion + # NB: name here is used in a conventional way by Dynamo; it corresponds + # precisely to the Source.name() of the tensor we're fakeifying and + # corresponds to a valid Python expression. When we construct sub-names + # as part of this process, we will maintain this invariant! (Even though + # other users of this may not need it this property to be upheld.) + def meta_tensor( + self, + t, + shape_env=None, + callback=lambda t: t(), + source: Optional[Source] = None, + symbolic_context: Optional["SymbolicContext"] = None, + ): + if source is None: + from torch._dynamo.source import ConstantSource + + # TODO: make a dedicated UnknownSource for this? + source = ConstantSource( + f"__meta_utils_unknown_tensor{len(self.tensor_memo)}" + ) + + # This indicates you set no_dispatch() before calling into this + # function. This is an error: we may be creating fake tensors and + # will perform operations on them which need fake tensor mode to + # be active. You will segfault if you are in a no_dispatch() block. + assert not torch._C._dispatch_tls_local_exclude_set().has( + torch._C.DispatchKey.Python + ) + arg_cnt = self.arg_cnt + self.arg_cnt += 1 + + # When we make as_strided calls, we end up generating a guard + # that the new as_strided tensor is in bounds for the old storage + # for the base (since as_strided calls can "bust" out of their + # bounding box.) This guard is unnecessary: if a user is able + # to provide us a tensor with the view base setup this way, we + # don't need to produce a guard, because the fact that they + # were able to produce the view base means its in bounds. + # + # Now, ordinarily, this guard would be harmless. However, the + # generated guard refers to variables bound on the base variable. + # At the moment, Dynamo doesn't actually guard on x._base, because + # according to Voz this results in a lot of spurious invalidations, + # and also if the user doesn't directly make use of _base, its + # pointless anyway (because programs should be parametric over + # whether or not the input tensor is a view or not--unless you're + # mutating the input, but that's a whole 'nother ballgame). So + # for expediency, we suppress these guards so we don't have to + # deal with this (yet, anyway.) + # + # NB: An old version of this code suppressed guards for ALL operations + # happening during meta conversion, not just as_strided calls. + # This is too aggressive: we do duck sizing and 0/1 simplification + # as we allocate variables, and we do need to register guards for + # these cases. + maybe_suppress = contextlib.nullcontext + if shape_env is not None: + maybe_suppress = shape_env.suppress_guards + + def sym_sizes_strides_storage_offset( + t, src, symbolic_context=symbolic_context + ) -> Tuple[Tuple[int, ...], Tuple[int, ...], int]: + if shape_env is not None: + fake_mode = torch._subclasses.fake_tensor.maybe_get_fake_mode(t) + if fake_mode is not None and fake_mode.shape_env is shape_env: + # Don't reallocate the sizes; the shape envs are the same, + # so reuse the old sizes/strides/etc + return (t.size(), t.stride(), t.storage_offset()) + else: + return shape_env.create_symbolic_sizes_strides_storage_offset( + t, + src, + symbolic_context=symbolic_context, + ) + else: + assert symbolic_context is None + return (t.size(), t.stride(), t.storage_offset()) + + def empty_create(inner_t, inner_src, symbolic_context=symbolic_context): + ( + inner_sizes, + inner_strides, + inner_storage_offset, + ) = sym_sizes_strides_storage_offset(inner_t, inner_src, symbolic_context) + return torch.empty_strided( + inner_sizes, + inner_strides, + dtype=inner_t.dtype, + device="meta", + ) + + # Creates a subclass instance with empty inner tensors according to the specified + # symbolic context. + def empty_create_subclass( + t, + outer_size, + outer_stride, + symbolic_context=symbolic_context, + callback=callback, + source=source, + ): + from torch._dynamo.source import AttrSource + from torch.fx.experimental.symbolic_shapes import SubclassSymbolicContext + + assert symbolic_context is None or isinstance( + symbolic_context, SubclassSymbolicContext + ) + + # Note: transform_subclass will use __tensor_unflatten__ to generate + # a fresh subclass wrapper with outer sizes / strides according to the + # outer symbolic context (passed in to this function). Inner size / stride + # / storage offset symbols are allocated according to the appropriate inner + # symbolic contexts, after which the checks in transform_subclass() will + # relate them to the outer metadata as possible. + return transform_subclass( + t, + lambda attr, inner_t: callback( + lambda: empty_create( + inner_t, + AttrSource(source, attr), + symbolic_context=( + None + if symbolic_context is None + else symbolic_context.inner_contexts[attr] + ), + ) + ), + outer_size=outer_size, + outer_stride=outer_stride, + ) + + # Returns an all-dynamic symbolic context used for metafying the given tensor with + # fully dynamic dims. This is useful when fake-ifying intermediate tensors in + # closed-over ViewFunc state, as we don't have symbolic contexts for them, but we + # don't want to over-specialize during view replay. + def all_dynamic_symbolic_context(t, source, shape_env, callback): + from torch._dynamo.source import AttrSource + from torch.fx.experimental.symbolic_shapes import ( + DimDynamic, + StatelessSymbolicContext, + SubclassSymbolicContext, + SymbolicContext, + ) + + view_base_context: Optional[SymbolicContext] = None + if t._is_view(): + view_base_context = all_dynamic_symbolic_context( + t._base, AttrSource(source, "_base"), shape_env, callback + ) + + t_symbolic_context: SymbolicContext + t_dynamic_sizes = [DimDynamic.DYNAMIC] * t.dim() + if is_traceable_wrapper_subclass(t): + inner_contexts: Dict[str, SymbolicContext] = {} + attrs, _ = t.__tensor_flatten__() + for attr in attrs: + assert isinstance(attr, str) + inner = getattr(t, attr) + inner_contexts[attr] = all_dynamic_symbolic_context( + inner, AttrSource(source, attr), shape_env, callback + ) + t_symbolic_context = SubclassSymbolicContext( + dynamic_sizes=t_dynamic_sizes, + constraint_sizes=[None] * t.dim(), + inner_contexts=inner_contexts, + tensor_source=source, + view_base_context=view_base_context, + ) + else: + t_symbolic_context = StatelessSymbolicContext( + dynamic_sizes=t_dynamic_sizes, + constraint_sizes=[None] * t.dim(), + view_base_context=view_base_context, + ) + + return t_symbolic_context + + # Returns a fake-ified version of an input view tensor t, given an already fake-ified + # base. At a high level, we want two things: + # 1. fake_t should have the same view relationship to the given fake base as the + # input t has to its _base. + # 2. fake_t should have symbolic sizes / strides / storage offset according to the + # appropriate symbolic context (i.e. from the automatic dynamic algorithm). + # + # We currently take different strategies across view types: + # * For dense -> dense views, accomplish both (1) and (2) simultaneously via an + # as_strided() call on the fake-ified base, passing symbolic metadata. + # * For views involving subclasses, perform view replay using view funcs to + # achieve (1). It's necessary for (2) to swap out any closed-over state in + # the view funcs with symbolicized SymInts and fake-ified tensors. Doing this + # avoids specialization (and thus over-eager simplification of symbols) that + # could occur during view replay on the fake-ified base. + # + # Examples: + # * t.unsqueeze(-1) with dense t is a dense -> dense view. It can be modeled + # with an as_strided() call on the fake base passing symbolic metadata. + # * sub.select(dim=0, index=3) is a subclass -> subclass view. The index arg + # is made symbolic to avoid invalid specialization and view replay is then + # done to reconstruct the view. + # * _nested_from_jagged(values, offsets) is a dense -> subclass view + # that returns a subclass instance from a dense values tensor. The offsets + # tensor is closed over in the view func, as it can be considered view metadata. + # First, the offsets tensor is fake-ified according to the inner symbolic + # context and with the correct relationship to the outer size / stride metadata. + # Then view replay is done, swapping in the fake offsets so the view replay output + # is fully fake with no invalid specialization. + def view_from_base(base, t, source=source, shape_env=shape_env): + # fake-ify t's metadata according to the outer symbolic context + (sizes, strides, storage_offset) = sym_sizes_strides_storage_offset( + t, source + ) + if not is_traceable_wrapper_subclass( + t + ) and not is_traceable_wrapper_subclass(base): + # Dense -> Dense view case uses as_strided() to construct view relationship. + # TODO: Change this logic to use view replay for consistency? + # It's likely there is no view func available. + return base.as_strided(sizes, strides, storage_offset) + + from torch._dynamo.source import EphemeralSource + from torch.fx.experimental.symbolic_shapes import sym_eq + + def symint_visitor_fn(s): + if shape_env is None: + return s + + # NB: The symbol here is expected to be simplified out because we a priori + # allocate inner and outer symbols according to the appropriate symbolic + # contexts and prefer those over this symbol during symbol simplification + # (via usage of EphemeralSource below). This -shouldn't- happen, but if + # this symbol somehow leaks out beyond the view tensor's shape metadata, our + # assumption of it being simplified out will fail and it may be guarded on, + # which will hard error. + sym_source = EphemeralSource("symint_visitor_fn") + symbol = shape_env.create_symbol(s, sym_source) + return shape_env.create_symintnode(symbol, hint=s, source=sym_source) + + real_to_fake_mapping = {} + if is_traceable_wrapper_subclass(t): + # Fake-ify t naively here; this is only done so we can get fake-ified inner + # tensors with the correct relationships to the outer sizes / strides for use + # in view replay. It's done beforehand here because it's not easy to do when + # visiting tensors one-by-one during view replay. + # + # Example: + # Consider a Dense -> NJT view. NJT has (values, offsets) components and we + # want a view of values with the offsets closed over. As the offsets component + # is needed to describe the output view, it's important that it's fakeified + # correctly. + fake_t = empty_create_subclass( + t, outer_size=sizes, outer_stride=strides + ) + attrs, _ = fake_t.__tensor_flatten__() + for attr in attrs: + real_to_fake_mapping[getattr(t, attr)] = getattr(fake_t, attr) + + def tensor_visitor_fn( + visited_t, shape_env=shape_env, callback=callback, source=source + ): + # It's possible to close over an undefined tensor (e.g. NJT's lengths). + if visited_t is None: + return None + + # Fake inner tensors of view subclasses will come from the mapping built above. + fake_visited_t = real_to_fake_mapping.get(visited_t, None) + if fake_visited_t is not None: + return fake_visited_t + + # For other closed-over tensor state, fake-ify it as all dynamic with an + # ephemeral source. This avoids invalid specialization during view replay. + # If we find that in practice the usage of ephemeral sources isn't enough + # to guarantee that we don't have guards on these symbols, we may need to + # explicitly suppress guards (as is done for _base in the dense -> dense + # view case). + temp_source = EphemeralSource("tensor_visitor_fn") + return self.meta_tensor( + visited_t, + shape_env, + callback, + source=temp_source, + symbolic_context=all_dynamic_symbolic_context( + visited_t, temp_source, shape_env, callback + ), + ) + + # Replay the view, swapping out any non-symbolic SymInts or real tensors + # for symbolic SymInts or fake tensors. + fake_t = t._view_func_unsafe(base, symint_visitor_fn, tensor_visitor_fn) + + # Ensure the output has symbolic shapes according to the outer symbolic context. + # These checks should simplify out any symbols created for closed-over view func + # SymInts. + torch._check(sym_eq(fake_t.size(), sizes)) + torch._check(sym_eq(fake_t.stride(), strides)) + torch._check(sym_eq(fake_t.storage_offset(), storage_offset)) + return fake_t + + # see expired-storages + self.check_expired_count += 1 + if self.check_expired_count >= self.check_expired_frequency: + self.check_for_expired_weak_storages() + self.check_expired_count = 0 + + if self.get_tensor_memo(t) is None: + with torch.inference_mode(t.is_inference()): + if t.is_sparse: + is_leaf = safe_is_leaf(t) + + # The lambda function below is similar to + # `t.to(device='meta')` except the latter + # preserves nnz value + r = callback( + lambda: torch.ops.aten._sparse_coo_tensor_with_dims( + t.sparse_dim(), + t.dense_dim(), + t.shape, + dtype=t.dtype, + layout=torch.sparse_coo, + device="meta", + ) + ) + assert safe_is_leaf(r), "the callback you passed in doesn't detach" + # Note [is_coalesced is dispatched] + # Strangely enough, is_coalesced() is a dispatched operator, + # which means that it will get caught by fake tensor mode. + # Ordinarily this would error, but there's some logic in + # fake tensor ensure this doesn't happen. + r._coalesced_(t.is_coalesced()) + if t.requires_grad: + r.requires_grad = True + if t.requires_grad and not is_leaf: + with torch.enable_grad(): + r = r.clone() + r._coalesced_(t.is_coalesced()) + elif is_sparse_compressed(t): + is_leaf = safe_is_leaf(t) + + def mk_meta(): + nnz = 0 + batch_dim = t.ndim - t.sparse_dim() - t.dense_dim() + batch_size = t.shape[:batch_dim] + if t.layout in {torch.sparse_csr, torch.sparse_bsr}: + index_dtype = t.crow_indices().dtype + compressed_indices = torch.empty( + t.crow_indices().shape, device="meta", dtype=index_dtype + ) + plain_indices = torch.empty( + (*t.col_indices().shape[:-1], nnz), + device="meta", + dtype=index_dtype, + ) + else: + index_dtype = t.ccol_indices().dtype + compressed_indices = torch.empty( + t.ccol_indices().shape, device="meta", dtype=index_dtype + ) + plain_indices = torch.empty( + (*t.row_indices().shape[:-1], nnz), + device="meta", + dtype=index_dtype, + ) + values_shape = t.values().shape + values = torch.empty( + ( + *values_shape[:batch_dim], + nnz, + *values_shape[batch_dim + 1 :], + ), + dtype=t.dtype, + device="meta", + ) + return torch.ops.aten.sparse_compressed_tensor( + compressed_indices, + plain_indices, + values, + t.shape, + layout=t.layout, + dtype=t.dtype, + device="meta", + ) + + # `mk_meta()` is similar to `t.to(device='meta'))` + # except `to('meta')` preserves nnz value while + # `mk_meta` result has nnz == 0. + r = callback(mk_meta) + + assert safe_is_leaf(r), "the callback you passed in doesn't detach" + if t.requires_grad: + r.requires_grad = True + if t.requires_grad and not is_leaf: + with torch.enable_grad(): + r = r.clone() + elif t.is_nested and not is_traceable_wrapper_subclass(t): + # TODO: Handle this better in Dynamo? + # There are checks there now, but this can still be triggered by a dense + # tensor graph input that is a view of a strided NT. + from torch._dynamo.exc import unimplemented + + unimplemented( + "strided nested tensors are not supported by meta conversion" + ) + elif t.is_mkldnn: + is_leaf = safe_is_leaf(t) + sizes, strides, _storage_offset = sym_sizes_strides_storage_offset( + t, source + ) + r = callback( + lambda: torch.empty_strided( + sizes, strides, dtype=t.dtype, device="meta" + ) + ) + assert safe_is_leaf(r), "the callback you passed in doesn't detach" + if t.requires_grad: + r.requires_grad = True + if t.requires_grad and not is_leaf: + with torch.enable_grad(): + r = r.clone() + elif is_functorch_wrapped_tensor(t): + if t._is_view(): + from torch._dynamo.exc import unimplemented + + unimplemented( + "view functorch tensors are not supported by meta conversion" + ) + + # Wraps a functorch tensor class (BatchedTensor, GradTrackingTensor) + # in a FakeTensor + def _to_fake_tensor(t): + if is_batchedtensor(t): + ft = _to_fake_tensor(get_unwrapped(t)) + lvl = maybe_get_level(t) + bdim = maybe_get_bdim(t) + r = _add_batch_dim(ft, bdim, lvl) + elif is_gradtrackingtensor(t): + disable_functorch = torch._C._DisableFuncTorch + with disable_functorch(): + ft = _to_fake_tensor(get_unwrapped(t)) + lvl = torch._C._functorch.maybe_get_level(t) + r = torch._C._functorch._wrap_for_grad(ft, lvl) + + is_leaf = safe_is_leaf(t) + if t.requires_grad and safe_is_leaf(r): + r.requires_grad = True + elif t.requires_grad and not is_leaf: + with torch.enable_grad(): + r = r.clone() + else: + sizes = t.size() + strides = t.stride() + r = callback( + lambda: torch.empty_strided( + sizes, + strides, + dtype=t.dtype, + device="meta", + ) + ) + return r + + r = _to_fake_tensor(t) + + elif t._is_view(): + # Construct views in two steps: recursively meta-fy their + # base, and then create view(s) off that. NB: doing it + # directly from storage is WRONG because this won't cause + # version counters to get shared. + assert t._is_view() + + base_symbolic_context = None + if shape_env and symbolic_context is not None: + from torch.fx.experimental.symbolic_shapes import ( + StatelessSymbolicContext, + ) + + assert isinstance(symbolic_context, StatelessSymbolicContext) + # NB: This should generally be set when the input is a view, + # but the exception right now is for fake-ifying grads, which is + # a work in progress. + if symbolic_context.view_base_context is not None: + base_symbolic_context = symbolic_context.view_base_context + + base = self.meta_tensor( + t._base, + shape_env, + callback, + source=torch._dynamo.source.AttrSource(source, "_base"), + symbolic_context=base_symbolic_context, + ) + + def is_c_of_r(complex_dtype, real_dtype): + return ( + utils.is_complex_dtype(complex_dtype) + and utils.corresponding_real_dtype(complex_dtype) + == real_dtype + ) + + # In some situations, MetaConverter may be called in a + # context where autograd is disabled. For the _is_view + # assert to pass, we have to setup the autograd view + # metadata anyway. Do this by reenabling the + # ADInplaceOrView key. This is kind of a hack. + old_exclude = torch._C._dispatch_tls_is_dispatch_key_excluded( + torch._C.DispatchKey.ADInplaceOrView + ) + torch._C._dispatch_tls_set_dispatch_key_excluded( + torch._C.DispatchKey.ADInplaceOrView, False + ) + try: + if base.dtype == t.dtype: + pass + elif is_c_of_r(base.dtype, t.dtype): + base = torch.view_as_real(base) + elif is_c_of_r(t.dtype, base.dtype): + base = torch.view_as_complex(base) + else: + # This is not guaranteed to succeed. If it fails, it + # means there is another dtype-converting view function + # that hasn't been handled here + base = base.view(t.dtype) + + # This is very tricky. Naively, you might expect this + # to hold: + # + # if t.requires_grad and not safe_is_leaf(t) + # assert t._base.requires_grad + # + # But it's not true! As you can see in the following + # program: + # + # x = torch.zeros(4) + # y = x.view(1, 4) + # y.requires_grad = True + # z = y.view(1, 1, 4) + # assert z._base is x + # + # So we may have to do *two* views out of the base to + # recreate this situation. + if safe_is_leaf(t): + # Leaf views that track view metadata are created by + # creating a view inside a no_grad block + with torch.no_grad(), maybe_suppress(): + r = view_from_base(base, t) + # As it's a leaf, we can directly assign requires_grad + r.requires_grad = t.requires_grad + else: + if t._base.requires_grad == t.requires_grad: + # Easy case, just run the view op + with torch.enable_grad(), maybe_suppress(): + r = view_from_base(base, t) + + # NB: We don't actaully faithfully replicate + # autograd connectivity, but that doesn't matter + # today. See following for more info: + # https://gist.github.com/soulitzer/e03f015b314c3f5fcf80888c69390913 + else: + # Obscure case. Create a leaf view and give it the + # correct requires_grad, then do the final view. + # NB: Can't have a non-leaf without requiring grad! + assert t.requires_grad + with torch.no_grad(): + mid = base.view(base.shape) + mid.requires_grad = t.requires_grad + with torch.enable_grad(), maybe_suppress(): + r = view_from_base(mid, t) + # The CreationMeta influences whether or not inplace + # mutation is an error or not. So we need to make + # sure we properly propagate this as well. + torch._C._autograd._set_creation_meta( + r, torch._C._autograd._get_creation_meta(t) + ) + finally: + torch._C._dispatch_tls_set_dispatch_key_excluded( + torch._C.DispatchKey.ADInplaceOrView, old_exclude + ) + + else: + is_leaf = safe_is_leaf(t) + + ( + sizes, + strides, + storage_offset, + ) = sym_sizes_strides_storage_offset(t, source, symbolic_context) + + # If we have a subclass that desugars into dense tensors, + # perform our callback on each inner tensor. + if is_traceable_wrapper_subclass(t): + r = empty_create_subclass( + t, outer_size=sizes, outer_stride=strides + ) + else: + r = callback( + lambda: torch.empty_strided( + sizes, + strides, + dtype=t.dtype, + device="meta", + ) + ) + + assert safe_is_leaf(r), "the callback you passed in doesn't detach" + if t.requires_grad: + r.requires_grad = t.requires_grad + if not is_leaf: + # Fake up some autograd history. + with torch.enable_grad(): + # preserve_format is the default, but we want to + # emphasize how important it is to preserve + # format here + r = r.clone(memory_format=torch.preserve_format) + + # Graph-Break for wrapped tensors + if not ( + is_batchedtensor(t) or is_gradtrackingtensor(t) + ) and torch._C._functorch.is_functorch_wrapped_tensor(t): + return NotImplemented + + s = t.untyped_storage() + swr = StorageWeakRef(s) + if swr not in self.storage_memo and ( + r.is_nested + or ( + r.stride() == strides + and r.storage_offset() == storage_offset + ) + ): + # You're normal and happy, install the fresh storage into the memo + self.storage_memo[swr] = r.untyped_storage() + else: + # You're in crazy town; somehow you gave us a tensor + # that wasn't a view, but had nonzero storage offset, + # nontrivial strides (such that clone() couldn't + # preserve them), or already aliases with another + # tensor's storage. The most typical way to end + # up here is with set_. So use set_ to bludgeon this + # in. + r_s = self.meta_storage(s, callback=callback) + # NB: In principle, this should always work, but there + # is some subtle difference in the autograd metadata + # that means we will backprop the set_ call, even if + # r is declared as an input to grad. + # See https://github.com/pytorch/pytorch/issues/87956 + # for the reproducer. + # NB: The in_kernel_invocation_manager here is necessary + # for fake tensor. If we run the set_ call with fake + # tensor on, r will improperly report that it is NOT a + # meta tensor but a cpu tensor, and then the set_ call + # will fail due to device mismatch. no_dispatch() is + # not enough, because the fake tensor will still claim + # to be a CPU tensor and you'll end up in the CPU + # kernel. Arguably this is a hack; a cleaner way to + # solve this is to have a FakeStorage concept which + # would report it's CPU device--no problem now! But + # this is difficult to do because we don't have storage + # subclasses. Relevant test is + # DynamicShapesFunctionTests::test_add_dynamic_shapes in + # test/dynamo/test_dynamic_shapes.py + maybe_fake_mgr: ContextManager[None] = contextlib.nullcontext() + from torch._subclasses.fake_tensor import ( + in_kernel_invocation_manager, + maybe_get_fake_mode, + ) + + mb_fake_mode = maybe_get_fake_mode(r) + if mb_fake_mode is not None: + maybe_fake_mgr = in_kernel_invocation_manager(mb_fake_mode) + with maybe_fake_mgr, torch.no_grad(): + r.set_(r_s, storage_offset, sizes, strides) + + if safe_grad(t) is not None: + from torch._dynamo.source import AttrSource + + # TODO: Use a valid grad-specific symbolic context instead of recycling + # the one from t. This isn't correct if e.g. t._is_view() != t.grad._is_view(). + r.grad = self.meta_tensor( + safe_grad(t), + shape_env, + callback, + source=AttrSource(source, "grad"), + symbolic_context=symbolic_context, + ) + torch._C._set_conj(r, t.is_conj()) + torch._C._set_neg(r, t.is_neg()) + # This can be skipped if necessary for performance reasons + assert_metadata_eq(assert_eq, t, r, skip_symbolic=True) + self.set_tensor_memo(t, r) + + return self.get_tensor_memo(t) + + def __call__( + self, + t, + shape_env=None, + *, + callback=lambda t: t(), + source=None, + symbolic_context=None, + ): + # TODO: zero tensors? We appear to have eliminated them by + # excluding complex for now + + if isinstance(t, torch.Tensor) or is_traceable_wrapper_subclass(t): + if t.device.type != "xla" and any( + [ + t.is_quantized, + t._is_view() and t._base is not None and t._base.is_sparse, + torch._is_functional_tensor(t), + t.device.type in ("lazy"), + # We need a way to test if a tensor is batched but there + # is no official APi to do it + # torch._C._is_batched(t), + ] + ): + # TODO: sparse should support meta + # NB technically to('meta') does work but our logging + # instrumentation will see the meta conversions and the + # tests all break so we just exclude this. In any case + # the to conversion isn't really right anyhow. + + if torch._is_functional_tensor(t) and t.device.type != "lazy": + if t._is_view(): + raise RuntimeError( + "Cannot safely fakify a view because this process drops the view information right now." + ) + + st = peek_interpreter_stack() + assert ( + st is None or st.key() == TransformType.Functionalize + ), "Expect st to be either None or have Functionalize transform key." + if st is None: + # the case of AOTAutograd + torch._sync(t) + unwrap_t = torch._from_functional_tensor(t) + with torch._dispatch.python.suspend_functionalization(): + fake_t = self.meta_tensor( + unwrap_t, + shape_env=shape_env, + callback=callback, + source=source, + symbolic_context=symbolic_context, + ) + out = torch._to_functional_tensor(fake_t) + torch._mirror_autograd_meta_to(fake_t, out) + return out + else: + # torch.func.functionalize + reapply_views = torch._C._functionalization_reapply_views_tls() + unwrap_t = _unwrap_functional_tensor(t, reapply_views) + pop_st_ctx = ( + torch._functorch.pyfunctorch.temporarily_pop_interpreter_stack() + ) + with pop_st_ctx: + fake_t = self.meta_tensor( + unwrap_t, + shape_env=shape_env, + callback=callback, + source=source, + symbolic_context=symbolic_context, + ) + return _wrap_functional_tensor(fake_t, current_level()) + self.miss += 1 + return NotImplemented + else: + self.hit += 1 + + disable_functorch = torch._C._DisableFuncTorch + with disable_functorch(): + r = self.meta_tensor( + t, + shape_env=shape_env, + callback=callback, + source=source, + symbolic_context=symbolic_context, + ) + if type(t) is torch.nn.Parameter: + # NB: Cannot directly use Parameter constructor + # because that would force a detach, not desirable + r._is_param = True + return r + elif torch.overrides.is_tensor_like(t): + self.miss += 1 + return NotImplemented + else: + # non-Tensor types don't count as hit or miss + return t + + +import torch._prims_common as utils diff --git a/venv/lib/python3.10/site-packages/torch/_subclasses/schema_check_mode.py b/venv/lib/python3.10/site-packages/torch/_subclasses/schema_check_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..72a2082a162df138beab1fa1273c474b86e7a109 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_subclasses/schema_check_mode.py @@ -0,0 +1,198 @@ +# mypy: ignore-errors + +from collections import namedtuple +from copy import deepcopy +from itertools import combinations + +import torch +from torch.fx.operator_schemas import normalize_function +from torch.testing._internal.jit_utils import clone_inputs +from torch.utils import _pytree as pytree +from torch.utils._python_dispatch import TorchDispatchMode +from torch.utils._pytree import tree_map + +# Named Tuples used within SchemaCheckMode +Mutation = namedtuple("Mutation", ["op_name", "arg_name"]) +Aliasing = namedtuple("Aliasing", ["op_name", "arg_name", "output_number"]) + +# Simplified naming for C++ classes +SchemaArgument = torch._C._SchemaArgument +SchemaArgType = torch._C._SchemaArgType +SchemaInfo = torch._C._SchemaInfo + +# This TorchDispatchMode Subclass is used to verify op schemas +# This TorchDispatchMode Scubclass currently: +# - Records the called ops +# - Checks for mutations on all inputs +# - Checks for aliasing on all inputs + + +class SchemaCheckMode(TorchDispatchMode): + def __init__(self): + # Information recorded for testing purposes. For example: + # - incorrect schemas + # - overly conservative schemas + self.ops = [] + self.mutated = [] + self.aliasing = [] + + def reset_cache(self): + self.ops.clear() + self.mutated.clear() + self.aliasing.clear() + + def display_ops(self): + print(*self.ops, sep=",") + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + def bitwise_equal(lhs, rhs): + if lhs.is_quantized: + # TODO: This is only OK if can't have NaN quantized; idk if + # this is actually true + return torch.equal(lhs, rhs) + else: + return torch.allclose(lhs, rhs, equal_nan=True) + + def has_mutated(before, after, md): + are_tensors = type(before) == torch.Tensor and type(after) == torch.Tensor + if ( + are_tensors + and before.layout != torch.sparse_csr + and after.layout != torch.sparse_csr + ): + return not ( + before.size() == after.size() + and bitwise_equal(before, after) + and md[0] == after.stride() + and md[1] == after._typed_storage()._cdata + ) + return False + + def has_aliased(lhs, rhs): + try: + return torch._C._overlaps(lhs, rhs) + except Exception as exception: + if str(exception).startswith("Cannot inspect value of type "): + return False + else: + raise exception + + def standardize_name(name): + return name if name != "self" else "input" + + def unwrap(e): + if isinstance(e, torch.Tensor) and not type(e) == torch.Tensor: + try: + return e.elem + except AttributeError as t: + return e + return e + + def parse_metadata(e): + if isinstance(e, torch.Tensor): + if not type(e) == torch.Tensor: + try: + current = e.elem + return ( + deepcopy(current.stride()), + current._typed_storage()._cdata, + ) + except AttributeError as t: + return None + # Sparse CSR tensors do not have strides or storage + elif e.layout != torch.sparse_csr: + return (deepcopy(e.stride()), e._typed_storage()._cdata) + return None + + self.ops.append(func._schema.name) + + # Clone and process arguments and outputs + pre_arguments = normalize_function( + func, args, kwargs, normalize_to_only_use_kwargs=True + ).kwargs + + c_p_args = dict(zip(pre_arguments.keys(), clone_inputs(pre_arguments.values()))) + cloned_arguments = { + name: tree_map(unwrap, c_p_args.get(name)) for name in c_p_args + } + cloned_metadata = { + name: [ + parse_metadata(a) for a in pytree.tree_leaves(pre_arguments.get(name)) + ] + for name in pre_arguments + } + + out = func(*args, **kwargs) + arguments = { + name: tree_map(unwrap, pre_arguments.get(name)) for name in pre_arguments + } + tuple_out = out if isinstance(out, tuple) else (out,) + tuple_out = tree_map(unwrap, tuple_out) + + schema_info = SchemaInfo(func._schema) + schema_info.add_argument_values(pre_arguments) + + # Process arguments with outputs + for i in range(len(func._schema.arguments)): + arg = func._schema.arguments[i] + name = standardize_name(arg.name) + if arguments.get(name) is not None: + before = cloned_arguments.get(name) + md = cloned_metadata.get(name) + after = arguments.get(name) + for j in range(len(tuple_out)): + # aten::_unsafe_view is intended to have incorrect aliasing notation (hence unsafe) + unsafe_ops = ("aten::_unsafe_view", "aten::unsafe_split") + if ( + has_aliased(tuple_out[j], after) + and func._schema.name not in unsafe_ops + ): + if not schema_info.may_contain_alias( + SchemaArgument(SchemaArgType.output, j), + SchemaArgument(SchemaArgType.input, i), + ): + raise RuntimeError( + f"Argument {name} is not defined to alias output but was aliasing" + ) + else: + self.aliasing.append( + Aliasing(func._schema.name, name, f"output_{j}") + ) + if after is tuple_out[j] and isinstance(after, torch.Tensor): + # Only mutable ops e.g. (add_, add.out) are allowed to directly return inputs. + if not schema_info.is_mutable( + SchemaArgument(SchemaArgType.input, i) + ) and func not in [ + torch.ops.aten.lift.default, + torch.ops.aten.lift_fresh.default, + ]: + raise RuntimeError( + f"""\ +Dispatcher operators below autograd are not allowed to directly return inputs. +However, we found that `outputs[{str(j)}] is {name}""" + ) + if any( + has_mutated(a, b, c) + for a, b, c in zip( + pytree.tree_leaves(before), pytree.tree_leaves(after), md + ) + ): + if not schema_info.is_mutable( + SchemaArgument(SchemaArgType.input, i) + ): + raise RuntimeError( + f"Argument {name} is not defined as mutable but was mutated" + ) + else: + self.mutated.append(Mutation(func._schema.name, name)) + + # Aliasing between outputs + for i, j in combinations(range(len(func._schema.returns)), 2): + if has_aliased(tuple_out[i], tuple_out[j]): + if not schema_info.may_contain_alias( + SchemaArgument(SchemaArgType.output, i), + SchemaArgument(SchemaArgType.output, j), + ): + raise RuntimeError(f"Outputs {i} and {j} alias unexpectedly") + + return out diff --git a/venv/lib/python3.10/site-packages/torch/export/__init__.py b/venv/lib/python3.10/site-packages/torch/export/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..62b9ef4e4035a4d1716b0d90a1bfe8d68780c5b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/export/__init__.py @@ -0,0 +1,344 @@ +import builtins +import copy +import dataclasses +import inspect +import io +import os +import sys +import typing +import warnings +from enum import auto, Enum +from typing import ( + Any, + Callable, + Dict, + Iterator, + List, + Optional, + Tuple, + Type, + TYPE_CHECKING, + Union, +) + +import torch +import torch.utils._pytree as pytree +from torch.fx._compatibility import compatibility + +from torch.fx.passes.infra.pass_base import PassResult +from torch.fx.passes.infra.pass_manager import PassManager + +from torch.utils._pytree import ( + FlattenFunc, + FromDumpableContextFn, + ToDumpableContextFn, + UnflattenFunc, +) + +if TYPE_CHECKING: + # Import the following modules during type checking to enable code intelligence features, + # Do not import unconditionally, as they import sympy and importing sympy is very slow + from torch.fx.experimental.symbolic_shapes import StrictMinMaxConstraint + + +__all__ = [ + "Constraint", + "Dim", + "ExportBackwardSignature", + "ExportGraphSignature", + "ExportedProgram", + "ModuleCallEntry", + "ModuleCallSignature", + "dims", + "dynamic_dim", + "export", + "load", + "register_dataclass", + "save", + "unflatten", + "FlatArgsAdapter", + "UnflattenedModule", +] + + +from .dynamic_shapes import Constraint, Dim, dims, dynamic_dim +from .exported_program import ExportedProgram, ModuleCallEntry, ModuleCallSignature +from .graph_signature import ExportBackwardSignature, ExportGraphSignature +from .unflatten import FlatArgsAdapter, unflatten, UnflattenedModule + + +PassType = Callable[[torch.fx.GraphModule], Optional[PassResult]] + + +def export( + mod: torch.nn.Module, + args: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]] = None, + *, + dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any], List[Any]]] = None, + strict: bool = True, + preserve_module_call_signature: Tuple[str, ...] = (), +) -> ExportedProgram: + """ + :func:`export` takes an arbitrary Python callable (an nn.Module, a function or + a method) along with example inputs, and produces a traced graph representing + only the Tensor computation of the function in an Ahead-of-Time (AOT) fashion, + which can subsequently be executed with different inputs or serialized. The + traced graph (1) produces normalized operators in the functional ATen operator set + (as well as any user-specified custom operators), (2) has eliminated all Python control + flow and data structures (with certain exceptions), and (3) records the set of + shape constraints needed to show that this normalization and control-flow elimination + is sound for future inputs. + + **Soundness Guarantee** + + While tracing, :func:`export()` takes note of shape-related assumptions + made by the user program and the underlying PyTorch operator kernels. + The output :class:`ExportedProgram` is considered valid only when these + assumptions hold true. + + Tracing makes assumptions on the shapes (not values) of input tensors. + Such assumptions must be validated at graph capture time for :func:`export` + to succeed. Specifically: + + - Assumptions on static shapes of input tensors are automatically validated without additional effort. + - Assumptions on dynamic shape of input tensors require explicit specification + by using the :func:`Dim` API to construct dynamic dimensions and by associating + them with example inputs through the ``dynamic_shapes`` argument. + + If any assumption can not be validated, a fatal error will be raised. When that happens, + the error message will include suggested fixes to the specification that are needed + to validate the assumptions. For example :func:`export` might suggest the + following fix to the definition of a dynamic dimension ``dim0_x``, say appearing in the + shape associated with input ``x``, that was previously defined as ``Dim("dim0_x")``:: + + dim = Dim("dim0_x", max=5) + + This example means the generated code requires dimension 0 of input ``x`` to be less + than or equal to 5 to be valid. You can inspect the suggested fixes to dynamic dimension + definitions and then copy them verbatim into your code without needing to change the + ``dynamic_shapes`` argument to your :func:`export` call. + + Args: + mod: We will trace the forward method of this module. + + args: Example positional inputs. + + kwargs: Optional example keyword inputs. + + dynamic_shapes: + An optional argument where the type should either be: + 1) a dict from argument names of ``f`` to their dynamic shape specifications, + 2) a tuple that specifies dynamic shape specifications for each input in original order. + If you are specifying dynamism on keyword args, you will need to pass them in the order that + is defined in the original function signature. + + The dynamic shape of a tensor argument can be specified as either + (1) a dict from dynamic dimension indices to :func:`Dim` types, where it is + not required to include static dimension indices in this dict, but when they are, + they should be mapped to None; or (2) a tuple / list of :func:`Dim` types or None, + where the :func:`Dim` types correspond to dynamic dimensions, and static dimensions + are denoted by None. Arguments that are dicts or tuples / lists of tensors are + recursively specified by using mappings or sequences of contained specifications. + + strict: When enabled (default), the export function will trace the program through + TorchDynamo which will ensure the soundness of the resulting graph. Otherwise, the + exported program will not validate the implicit assumptions baked into the graph and + may cause behavior divergence between the original model and the exported one. This is + useful when users need to workaround bugs in the tracer, or simply want incrementally + enable safety in their models. Note that this does not affect the resulting IR spec + to be different and the model will be serialized in the same way regardless of what value + is passed here. + WARNING: This option is experimental and use this at your own risk. + + Returns: + An :class:`ExportedProgram` containing the traced callable. + + **Acceptable input/output types** + + Acceptable types of inputs (for ``args`` and ``kwargs``) and outputs include: + + - Primitive types, i.e. ``torch.Tensor``, ``int``, ``float``, ``bool`` and ``str``. + - Dataclasses, but they must be registered by calling :func:`register_dataclass` first. + - (Nested) Data structures comprising of ``dict``, ``list``, ``tuple``, ``namedtuple`` and + ``OrderedDict`` containing all above types. + + """ + from ._trace import _export + + if not isinstance(mod, torch.nn.Module): + raise ValueError( + f"Expected `mod` to be an instance of `torch.nn.Module`, got {type(mod)}." + ) + + return _export( + mod, + args, + kwargs, + dynamic_shapes, + strict=strict, + preserve_module_call_signature=preserve_module_call_signature, + ) + + +def save( + ep: ExportedProgram, + f: Union[str, os.PathLike, io.BytesIO], + *, + extra_files: Optional[Dict[str, Any]] = None, + opset_version: Optional[Dict[str, int]] = None, +) -> None: + """ + + .. warning:: + Under active development, saved files may not be usable in newer versions + of PyTorch. + + Saves an :class:`ExportedProgram` to a file-like object. It can then be + loaded using the Python API :func:`torch.export.load `. + + Args: + ep (ExportedProgram): The exported program to save. + + f (Union[str, os.PathLike, io.BytesIO): A file-like object (has to + implement write and flush) or a string containing a file name. + + extra_files (Optional[Dict[str, Any]]): Map from filename to contents + which will be stored as part of f. + + opset_version (Optional[Dict[str, int]]): A map of opset names + to the version of this opset + + + Example:: + + import torch + import io + + class MyModule(torch.nn.Module): + def forward(self, x): + return x + 10 + + ep = torch.export.export(MyModule(), (torch.randn(5),)) + + # Save to file + torch.export.save(ep, 'exported_program.pt2') + + # Save to io.BytesIO buffer + buffer = io.BytesIO() + torch.export.save(ep, buffer) + + # Save with extra files + extra_files = {'foo.txt': b'bar'.decode('utf-8')} + torch.export.save(ep, 'exported_program.pt2', extra_files=extra_files) + + """ + from torch._export import save + + if not isinstance(ep, ExportedProgram): + raise TypeError( + f"The 'ep' parameter must be an instance of 'ExportedProgram', got '{type(ep).__name__}' instead." + ) + + save(ep, f, extra_files=extra_files, opset_version=opset_version) + + +def load( + f: Union[str, os.PathLike, io.BytesIO], + *, + extra_files: Optional[Dict[str, Any]] = None, + expected_opset_version: Optional[Dict[str, int]] = None, +) -> ExportedProgram: + """ + + .. warning:: + Under active development, saved files may not be usable in newer versions + of PyTorch. + + Loads an :class:`ExportedProgram` previously saved with + :func:`torch.export.save `. + + Args: + ep (ExportedProgram): The exported program to save. + + f (Union[str, os.PathLike, io.BytesIO): A file-like object (has to + implement write and flush) or a string containing a file name. + + extra_files (Optional[Dict[str, Any]]): The extra filenames given in + this map would be loaded and their content would be stored in the + provided map. + + expected_opset_version (Optional[Dict[str, int]]): A map of opset names + to expected opset versions + + Returns: + An :class:`ExportedProgram` object + + Example:: + + import torch + import io + + # Load ExportedProgram from file + ep = torch.export.load('exported_program.pt2') + + # Load ExportedProgram from io.BytesIO object + with open('exported_program.pt2', 'rb') as f: + buffer = io.BytesIO(f.read()) + buffer.seek(0) + ep = torch.export.load(buffer) + + # Load with extra files. + extra_files = {'foo.txt': ''} # values will be replaced with data + ep = torch.export.load('exported_program.pt2', extra_files=extra_files) + print(extra_files['foo.txt']) + print(ep(torch.randn(5))) + """ + from torch._export import load + + return load( + f, extra_files=extra_files, expected_opset_version=expected_opset_version + ) + + +def register_dataclass( + cls: Type[Any], + *, + serialized_type_name: Optional[str] = None, +) -> None: + """ + Registers a dataclass as a valid input/output type for :func:`torch.export.export`. + + Args: + cls: the dataclass type to register + serialized_type_name: The serialized name for the dataclass. This is + required if you want to serialize the pytree TreeSpec containing this + dataclass. + + Example:: + + @dataclass + class InputDataClass: + feature: torch.Tensor + bias: int + + class OutputDataClass: + res: torch.Tensor + + torch.export.register_dataclass(InputDataClass) + torch.export.register_dataclass(OutputDataClass) + + def fn(o: InputDataClass) -> torch.Tensor: + res = res=o.feature + o.bias + return OutputDataClass(res=res) + + ep = torch.export.export(fn, (InputDataClass(torch.ones(2, 2), 1), )) + print(ep) + + """ + + from torch._export.utils import register_dataclass_as_pytree_node + + return register_dataclass_as_pytree_node( + cls, serialized_type_name=serialized_type_name + ) diff --git a/venv/lib/python3.10/site-packages/torch/export/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/export/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49cb598b81e9d31b585ab724b41e7748bb217691 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/export/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/export/__pycache__/_remove_auto_functionalized_pass.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/export/__pycache__/_remove_auto_functionalized_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..569e86e3609d05d11425e713e8d18106c1a6b776 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/export/__pycache__/_remove_auto_functionalized_pass.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/export/__pycache__/_remove_effect_tokens_pass.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/export/__pycache__/_remove_effect_tokens_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50e557ef645f252cd975f28cdb213bcdcfbef891 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/export/__pycache__/_remove_effect_tokens_pass.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/export/__pycache__/_safeguard.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/export/__pycache__/_safeguard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50c5f1ec6abd7554a8bda068f1056a080bee7d70 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/export/__pycache__/_safeguard.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/export/__pycache__/_trace.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/export/__pycache__/_trace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c50ca9a118b0f4885d827cbf3f2adb2eb777017b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/export/__pycache__/_trace.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/export/__pycache__/_tree_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/export/__pycache__/_tree_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0aa9bc3b9d1d8c1a89a3dab40abda3670e19cc4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/export/__pycache__/_tree_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/export/__pycache__/_unlift.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/export/__pycache__/_unlift.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d53dde4d4c8e94a14356ed7a9131d9edad2a4f51 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/export/__pycache__/_unlift.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/export/__pycache__/custom_obj.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/export/__pycache__/custom_obj.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8bfc6c13453c57d2a41cb97c28ac66526b869386 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/export/__pycache__/custom_obj.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/export/__pycache__/dynamic_shapes.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/export/__pycache__/dynamic_shapes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37dbc18b37b4fe3a612b41d3558f62e09de48dbf Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/export/__pycache__/dynamic_shapes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/export/__pycache__/exported_program.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/export/__pycache__/exported_program.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..372f77f0719898ee2c807230a0f7426fe329f5bb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/export/__pycache__/exported_program.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/export/__pycache__/graph_signature.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/export/__pycache__/graph_signature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e588fea9eb3fc5ee850241641f02fe6f54ef1d7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/export/__pycache__/graph_signature.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/export/__pycache__/unflatten.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/export/__pycache__/unflatten.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..237995932491fff4abf35750968de89cd114d823 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/export/__pycache__/unflatten.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/export/_remove_auto_functionalized_pass.py b/venv/lib/python3.10/site-packages/torch/export/_remove_auto_functionalized_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..76280411f2bcf6124c8b659171699806bb01fc2b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/export/_remove_auto_functionalized_pass.py @@ -0,0 +1,93 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import operator +from typing import List + +import torch +from torch._higher_order_ops.auto_functionalize import ( + auto_functionalized, + get_mutable_arg_names, +) +from torch.export import ExportedProgram + + +def unsafe_remove_auto_functionalized_pass( + ep: ExportedProgram, +) -> ExportedProgram: + """ + This pass removes an instances of the higher order op 'auto_functionalized', + and modifies the calling EP inplace to have the original mutator op. + This pass doesn't perform safety checks to make sure that this inplace mutation is safe. + """ + auto_functionalize_nodes: List[torch.fx.Node] = [] + for module in ep.graph_module.modules(): + if not isinstance(module, torch.fx.GraphModule): + continue + for node in ep.graph.nodes: + if node.op == "call_function" and node.target is auto_functionalized: + auto_functionalize_nodes.append(node) + + # Update every use of the HOP + for node in reversed(auto_functionalize_nodes): + func = node.args[0] + original_kwargs = node.kwargs + assert isinstance(func, torch._ops.OpOverload) + + with ep.graph.inserting_before(node): + # This makes the call_function refer to every arg as a kwarg, this is weird but probably fine? + new_node = ep.graph.call_function(func, kwargs=node.kwargs) + for k, v in node.meta.items(): + new_node.meta[k] = v + + # Replace auto_functionalize(func, args) with just func(args) + node.replace_all_uses_with(new_node) + + mutable_args_names = get_mutable_arg_names(new_node.target) + output_specs = ep.graph_signature.output_specs + + # update the users of the auto_func node (the getitem nodes) + for user in list(new_node.users.keys()): + assert user.target == operator.getitem + # getitem corresponding to a mutated input, just replace all uses with the original input + if user.args[1] >= len(func._schema.returns): + assert user.args[1] <= len(func._schema.returns) + len( + mutable_args_names + ) + + # If the result of getitem was used in an output node, update the output spec with the correct name + adusted_index = user.args[1] - len(func._schema.returns) + original_arg = original_kwargs[mutable_args_names[adusted_index]] + for spec in output_specs: + if spec.arg.name == user.name: + spec.arg.name = original_arg.name # pyre-ignore + break + + # This is a little fragile/implementation dependent, but the order of the mutable args is the same as the order + # of the getitem calls following the HOP. + user.replace_all_uses_with( + original_kwargs[mutable_args_names[adusted_index]] + ) + + if len(func._schema.returns) == 1: + # If the function has 1 return then it will just directly return the + # result -- we don't need a getitem. So we can replace all the + # getitem(auto_functionalized, 0) with just the note itself. + for user in list(new_node.users.keys()): + if user.args[1] == 0: + user.replace_all_uses_with(new_node) + + # Same case as above, update the output spec if getitem result used in an output node + for spec in output_specs: + if spec.arg.name == user.name: + spec.arg.name = new_node.name + break + + new_node.meta["val"] = node.meta["val"][: len(func._schema.returns)] + ep.graph.erase_node(node) + + ep.graph.eliminate_dead_code() + return ep diff --git a/venv/lib/python3.10/site-packages/torch/export/_remove_effect_tokens_pass.py b/venv/lib/python3.10/site-packages/torch/export/_remove_effect_tokens_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..f9f2d01ea6be2b3943e9b84ad49a24f2ac0517f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/export/_remove_effect_tokens_pass.py @@ -0,0 +1,126 @@ +import operator +from typing import List + +import torch +from torch._higher_order_ops.effects import with_effects +from .exported_program import ExportedProgram +from .graph_signature import ( + InputKind, + InputSpec, + OutputKind, + OutputSpec, + TensorArgument, +) + + +def _remove_effect_tokens(ep: ExportedProgram) -> ExportedProgram: + """ + Removes the existance of tokens from the exported program, including: + - Removes the input and output tokens + - Replaces with_effects(token, func, args) with just func(args) + + This function does an inplace modification on the given ExportedProgram. + """ + num_tokens: int = 0 + input_token_names: List[str] = [] + new_input_specs: List[InputSpec] = [] + for inp in ep.graph_signature.input_specs: + if inp.kind == InputKind.TOKEN: + num_tokens += 1 + assert isinstance(inp.arg, TensorArgument) + input_token_names.append(inp.arg.name) + else: + new_input_specs.append(inp) + + num_out_tokens: int = 0 + new_output_specs: List[str] = [] + output_token_names: List[OutputSpec] = [] + for out in ep.graph_signature.output_specs: + if out.kind == OutputKind.TOKEN: + num_out_tokens += 1 + output_token_names.append(out.arg.name) + else: + new_output_specs.append(out) + + assert num_tokens == num_out_tokens + + output_node = None + with_effect_nodes: List[torch.fx.Node] = [] + for node in ep.graph.nodes: + if node.op == "output": + output_node = node + break + + if not (node.op == "call_function" and node.target is with_effects): + continue + + with_effect_nodes.append(node) + + # Remove tokens from outputs + assert output_node is not None + output_args = output_node.args[0] + assert len(output_args) >= num_tokens + out_token_nodes = output_args[:num_tokens] + output_node.args = (tuple(output_args[num_tokens:]),) + for out_token in out_token_nodes: + assert out_token.name in output_token_names + ep.graph.erase_node(out_token) + + # Replace with_effects(token, func, args) with just func(args) + for node in reversed(with_effect_nodes): + func = node.args[1] + assert isinstance(func, torch._ops.OpOverload) + + with ep.graph.inserting_before(node): + new_node = ep.graph.call_function(func, node.args[2:]) + for k, v in node.meta.items(): + new_node.meta[k] = v + + node.replace_all_uses_with(new_node) + + # Update user getitem nodes + for user in list(new_node.users.keys()): + assert user.target == operator.getitem + # getitem(with_effects, 0) == token + if user.args[1] == 0: + ep.graph.erase_node(user) + + if len(func._schema.returns) == 1: + # If the function has 1 return then it will just directly return the + # result -- we don't need a getitem. So we can replace all the + # getitem(with_effects, 1) with just the note itself. + for user in list(new_node.users.keys()): + assert user.args[1] == 1 + user.replace_all_uses_with(new_node) + + new_node.meta["val"] = node.meta["val"][1] + elif len(func._schema.returns) > 1: + # If the function has more than 1 return then since we got rid of + # the 1st return value (the token), we need to bump all the other + # getitem calls by 1 down + for user in list(new_node.users.keys()): + assert user.args[1] >= 1 + user.args = (user.args[0], user.args[1] - 1) + + new_node.meta["val"] = node.meta["val"][1:] + else: + assert len(func._schema.returns) == 0 + assert len(new_node.users) == 0 + new_node.meta["val"] = None + + ep.graph.erase_node(node) + + # Remove tokens from inputs + placeholders = [node for node in ep.graph.nodes if node.op == "placeholder"] + assert len(placeholders) >= num_tokens + inp_token_nodes = placeholders[:num_tokens] + for inp_token in inp_token_nodes: + assert inp_token.name in input_token_names + ep.graph.erase_node(inp_token) + + # Update graph signature + ep.graph_signature.input_specs = new_input_specs + ep.graph_signature.output_specs = new_output_specs + + ep.graph.eliminate_dead_code() + return ep diff --git a/venv/lib/python3.10/site-packages/torch/export/_safeguard.py b/venv/lib/python3.10/site-packages/torch/export/_safeguard.py new file mode 100644 index 0000000000000000000000000000000000000000..854fff2bcca279ae4861ec9d3d4a2f7038540c12 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/export/_safeguard.py @@ -0,0 +1,42 @@ +import torch +from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode +from torch.overrides import TorchFunctionMode + + +class AutogradStateOpsFailSafeguard(TorchFunctionMode): + """ + Detect grad state ops during exporting the graph and fail the process by + raising an error, to avoid unexpected behavior. Those grad mode ops could be: + `torch.no_grad` + `torch.enable_grad` + `torch.set_grad_enabled` + + Export with predispatch mode is exempted. + """ + + def __torch_function__(self, func, types, args=(), kwargs=None): + kwargs = kwargs or {} + unsupported_grad_mode_ops = [ + torch._C._set_grad_enabled, + ] + # It's only enabled while tracing, by confirming the torch dispatch mode is + # any active PROXY. This is to allow the autograd ops out of tracing. + current_state = torch._C.is_grad_enabled() + if func in unsupported_grad_mode_ops: + assert len(args) == 1 + changed_state = args[0] + mode = torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.PROXY) + # Intend to check if it's not the pre_dispatch mode. It's allowed to use + # autograd ops in pre_dispatch mode, e.g. `torch.no_grad` + if ( + mode + and isinstance(mode, ProxyTorchDispatchMode) + and not mode.pre_dispatch + and changed_state != current_state + ): + raise RuntimeError( + f"Encountered autograd state manager op {func} trying to change global autograd state " + "while exporting. This is unsafe because we don't capture this op in torch.export " + "today, hence we can't reflect the user intention soundly." + ) + return func(*args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/torch/export/_trace.py b/venv/lib/python3.10/site-packages/torch/export/_trace.py new file mode 100644 index 0000000000000000000000000000000000000000..a7170ea2a17eb0fe03c890a1fb0f278a46ef9c38 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/export/_trace.py @@ -0,0 +1,1060 @@ +import dataclasses +import functools +import inspect +import logging +import re +import time +import warnings +from contextlib import contextmanager, nullcontext +from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union + +import torch +import torch._dynamo +import torch.fx + +import torch.utils._pytree as pytree +from torch._dynamo.exc import UserError, UserErrorType +from torch._export.non_strict_utils import ( + make_constraints, + make_fake_inputs, + make_fake_params_buffers, +) +from torch._export.passes.add_runtime_assertions_for_constraints_pass import ( + _AddRuntimeAssertionsForInlineConstraintsPass, +) +from torch._export.passes.collect_tracepoints_pass import CollectTracepointsPass +from torch._export.passes.lift_constants_pass import ( + ConstantAttrMap, + lift_constants_pass, + rewrite_script_object_meta, +) +from torch._export.wrappers import _wrap_submodules +from torch._functorch.aot_autograd import aot_export_module +from torch._guards import detect_fake_mode +from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode +from torch._utils_internal import log_export_usage +from torch.export.exported_program import OutputKind +from torch.fx.experimental.symbolic_shapes import ( + ConstraintViolationError, + free_unbacked_symbols, + GuardOnDataDependentSymNode, + ShapeEnv, +) +from torch.fx.graph import _PyTreeCodeGen, _PyTreeInfo +from torch.utils._sympy.value_ranges import ValueRangeError + +from ._safeguard import AutogradStateOpsFailSafeguard + +from .dynamic_shapes import _process_constraints, Constraint +from .exported_program import ( + _disable_prexisiting_fake_mode, + ExportedProgram, + InputKind, + ModuleCallEntry, + ModuleCallSignature, +) +from .graph_signature import ( + _sig_to_specs, + ArgumentSpec, + ConstantArgument, + CustomObjArgument, + ExportGraphSignature, + SymIntArgument, + TensorArgument, +) + + +log = logging.getLogger(__name__) + + +@dataclasses.dataclass +class ExportDynamoConfig: + """ + Manage Export-specific configurations of Dynamo. + """ + + allow_rnn: bool = True + reorderable_logging_functions: Set[Callable] = dataclasses.field( + default_factory=set + ) + + +DEFAULT_EXPORT_DYNAMO_CONFIG = ExportDynamoConfig() +DEFAULT_EXPORT_DYNAMO_CONFIG.reorderable_logging_functions = { + logging.critical, + logging.debug, + logging.error, + logging.exception, + logging.info, + logging.log, + logging.warning, + print, + warnings.warn, +} + + +@contextmanager +def _ignore_backend_decomps(): + orig_mkldnn_flag = torch.backends.mkldnn.set_flags(False) + orig_nnpack_flag = torch.backends.nnpack.set_flags(False) + try: + yield + finally: + torch.backends.mkldnn.set_flags(*orig_mkldnn_flag) + torch.backends.nnpack.set_flags(*orig_nnpack_flag) + + +def _convert_input_to_fake(gm, args, kwargs): + params_buffers = _get_params_buffers(gm) + fake_inps: List[torch.Tensor] = [] + for node in gm.graph.nodes: + if node.op == "placeholder" and "val" in node.meta: + fake_val = node.meta["val"] + if fake_val is not None and isinstance(fake_val, torch.Tensor): + fake_inps.append(fake_val) + + if detected_fake_mode := detect_fake_mode(fake_inps): + fake_mode = detected_fake_mode + else: + fake_mode = FakeTensorMode(shape_env=ShapeEnv()) + + if len(args) == 0 and len(kwargs) == 0: + return (), {}, params_buffers, fake_mode + + count = 0 + + def convert_to_fake(x): + nonlocal count + val = fake_inps[count] + count += 1 + return val + + fake_args = pytree.tree_map_only(torch.Tensor, convert_to_fake, args) + # TODO properly use the cached fake tensor + fake_kwargs = pytree.tree_map_only(torch.Tensor, fake_mode.from_tensor, kwargs) + fake_params_buffers = pytree.tree_map_only( + torch.Tensor, + functools.partial(fake_mode.from_tensor, static_shapes=True), + params_buffers, + ) + return fake_args, fake_kwargs, fake_params_buffers, fake_mode + + +def _replace_param_buffer_names(param_buffer_table, sig): + for spec in sig.input_specs: + if spec.kind in ( + InputKind.PARAMETER, + InputKind.BUFFER, + ): + spec.target = param_buffer_table[spec.target] + for spec in sig.output_specs: + if spec.kind in ( + OutputKind.BUFFER_MUTATION, + OutputKind.GRADIENT_TO_PARAMETER, + ): + spec.target = param_buffer_table[spec.target] + + +def _convert_to_positional_args(orig_arg_names, args, kwargs): + assert len(orig_arg_names) == len(args) + len(kwargs), ( + f"Total number of arg names is expected to be {len(orig_arg_names)} " + f"but got {len(args)} positional args, {len(kwargs)} kwargs." + ) + reordered_kwargs = [kwargs[kw_name] for kw_name in orig_arg_names[len(args) :]] + return ( + *args, + *reordered_kwargs, + ) + + +def _normalize_nn_module_stack(gm_torch_level, root_cls): + # Append a root module to every nn_module_stack. + root = "L['self']" + root_key = re.sub(r"[^a-zA-Z0-9]", "_", root) + for gm in gm_torch_level.modules(): + if not isinstance(gm, torch.fx.GraphModule): + continue + for node in gm.graph.nodes: + if node.op in ["placeholder", "output"]: + continue + add_root = True + if nn_module_stack := node.meta.get("nn_module_stack", {}): + path, ty = next(iter(nn_module_stack.values())) + # After deserializing the class `ty` might not exist anymore so + # it could be a string + if inspect.isclass(ty) and issubclass(ty, torch.nn.Module): + # TODO Figure out why sometimes we have root sometimes we don't. + if path == root and ty is root_cls: + add_root = False + else: + assert isinstance(ty, str) + if add_root: + + def normalize_path(path): + try: + parts = [] + + class Path: + def __getattr__(self, name): + parts.append(name) + return self + + def __getitem__(self, idx): + parts.append(str(idx)) + return self + + eval(path, {"L": {"self": Path()}}) + return ".".join(parts) + except Exception: # TODO(zhxchen17) Remove this. + return path + + nn_module_stack = {root_key: (root, root_cls), **nn_module_stack} + node.meta["nn_module_stack"] = { + key: (normalize_path(path), ty) + for key, (path, ty) in nn_module_stack.items() + } + + +def _get_param_buffer_mapping( + original_module: torch.nn.Module, + traced_module: torch.nn.Module, +) -> Dict[str, str]: + """ + Returns a mapping of parameter/buffer names from the new module to the + original model. This is to help with restoring the FQN for parameter/buffers + of a traced module to what the original module contains. + """ + + param_lookup: Dict[int, List[str]] = {} + buffer_lookup: Dict[int, List[str]] = {} + for name, param in original_module.named_parameters(remove_duplicate=False): + param_lookup.setdefault(id(param), []).append(name) + for name, buffer in original_module.named_buffers(remove_duplicate=False): + buffer_lookup.setdefault(id(buffer), []).append(name) + + param_buffer_table: Dict[str, str] = {} + for dynamo_name, dynamo_param in traced_module.named_parameters( + remove_duplicate=False + ): + assert dynamo_name not in param_buffer_table + if id(dynamo_param) in param_lookup: + param_buffer_table[dynamo_name] = param_lookup[id(dynamo_param)].pop() + + for dynamo_name, dynamo_buffer in traced_module.named_buffers( + remove_duplicate=False + ): + assert dynamo_name not in param_buffer_table + if id(dynamo_buffer) in buffer_lookup: + param_buffer_table[dynamo_name] = buffer_lookup[id(dynamo_buffer)].pop() + + return param_buffer_table + + +def _remap_constants( + orig_constant_attrs: ConstantAttrMap, + graph_signature: ExportGraphSignature, + constants: Dict[str, Union[torch.Tensor, torch.ScriptObject]], +) -> None: + """Rewrite the graph signature and constants table to use the FQN from the original module.""" + remap_table: Dict[str, str] = {} + for name, value in constants.items(): + if value in orig_constant_attrs: + remap_table[name] = orig_constant_attrs[value] + + for spec in graph_signature.input_specs: + if spec.kind in ( + InputKind.CONSTANT_TENSOR, + InputKind.CUSTOM_OBJ, + ): + orig_target = spec.target + assert orig_target is not None + spec.target = remap_table.get(orig_target, orig_target) + + constant = constants[orig_target] + del constants[orig_target] + constants[spec.target] = constant + + +def _restore_state_dict( + original_module: torch.nn.Module, traced_module: torch.fx.GraphModule +) -> None: + """ + Restores the state dict of the traced module to that of the original module. + """ + param_buffer_table = _get_param_buffer_mapping(original_module, traced_module) + # Since the graph module is flattened (no module heirarchy), we + # need to noramlize the module by replacing "." with "_". If we + # don't, it will try to save the weight to a submodule which no + # longer exists. + for name, fqn in param_buffer_table.items(): + param_buffer_table[name] = fqn.replace(".", "_") + + # Replace state dict attr names with the fqn + for name, fqn in param_buffer_table.items(): + if not hasattr(traced_module, name): + continue + + attr = getattr(traced_module, name) + if isinstance(attr, torch.Tensor) and not isinstance(attr, torch.nn.Parameter): + traced_module.register_buffer(fqn, attr) + else: + setattr(traced_module, fqn, attr) + delattr(traced_module, name) + + # Replace graph getattr nodes with the correct name + for node in traced_module.graph.nodes: + if node.op == "get_attr": + attr_name = node.target + if attr_name in param_buffer_table: + node.target = param_buffer_table[attr_name] + + traced_module.recompile() + + +def _export_to_torch_ir( + f: Callable, + args: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]] = None, + constraints: Optional[List[Constraint]] = None, + *, + preserve_module_call_signature: Tuple[str, ...] = (), + disable_constraint_solver: bool = False, + restore_fqn: bool = True, + _log_export_usage: bool = True, +) -> torch.fx.GraphModule: + """ + Traces either an nn.Module's forward function or just a callable with PyTorch + operations inside and produce a torch.fx.GraphModule in torch IR. + """ + + if _log_export_usage: + log_export_usage(event="export.private_api", flags={"_export_to_torch_ir"}) + + kwargs = kwargs or {} + + if not isinstance(args, tuple): + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expecting `args` to be a tuple of example positional inputs, got {type(args)}", + ) + + with torch._dynamo.config.patch(dataclasses.asdict(DEFAULT_EXPORT_DYNAMO_CONFIG)): + try: + module_call_specs: Dict[str, Dict[str, pytree.TreeSpec]] = {} + with _wrap_submodules( + f, preserve_module_call_signature, module_call_specs + ), _ignore_backend_decomps(): + gm_torch_level, _ = torch._dynamo.export( + f, + constraints=constraints, # type: ignore[arg-type] + assume_static_by_default=True, + tracing_mode="symbolic", + disable_constraint_solver=disable_constraint_solver, + _log_export_usage=_log_export_usage, + )( + *args, + **kwargs, + ) + except (ConstraintViolationError, ValueRangeError) as e: + raise UserError(UserErrorType.CONSTRAINT_VIOLATION, str(e)) # noqa: TRY200 + except GuardOnDataDependentSymNode as e: + raise UserError( # noqa: TRY200 + UserErrorType.ANTI_PATTERN, + f"Consider annotating your code using torch._constrain_as_*(). {str(e)}", + case_name="constrain_as_size_example", + ) + + gm_torch_level.meta["module_call_specs"] = module_call_specs + + if isinstance(f, torch.nn.Module) and restore_fqn: + _restore_state_dict(f, gm_torch_level) + + return gm_torch_level + + +def _gather_constant_attrs(m: torch.nn.Module) -> ConstantAttrMap: + """Search the module hierarchy, gathering up all tensor and ScriptObject constants. + + Returns a dictionary mapping hash(value) to the name of the constant. We + have to abuse `hash` here unfortunately, see: [ScriptObject hash]. + """ + constants = ConstantAttrMap() + buffers_parameters = set(m.buffers()) + buffers_parameters.update(m.parameters()) + + def inner(m: torch.nn.Module, prefix_atoms: List[str], constants): + for k, v in m.__dict__.items(): + if isinstance(v, (torch.Tensor, torch.ScriptObject)): + if v in buffers_parameters: + # filter out buffers and parameters, leaving only constants + continue + + fqn = ".".join(prefix_atoms + [k]) + if v in constants: + raise ValueError( + f"Duplicate reference to constant attribute found: '{constants[v]}' and '{fqn}'." + ) + + constants[v] = fqn + for k, v in m.named_children(): + inner(v, prefix_atoms + [k], constants) + + inner(m, [], constants) + return constants + + +def _export_non_strict( + mod: torch.nn.Module, + fake_args, + fake_kwargs, + fake_params_buffers, + constant_attrs: ConstantAttrMap, + *, + transform=lambda x: x, # TODO(zhxchen17) Revisit if this is needed later. + pre_dispatch=False, +): + # [NOTE] If the user is exporting under training mode, we want to detect if there is any + # state change in the autograd global state and error. If the user is exporting under inference + # mode, we don't care. + is_grad_enabled = torch._C.is_grad_enabled() + grad_safe_guard = ( + AutogradStateOpsFailSafeguard() if is_grad_enabled else nullcontext() + ) + + @contextmanager + def _compiling_state_context(): + old_value = torch.compiler._is_compiling_flag + try: + torch.compiler._is_compiling_flag = True + yield + finally: + torch.compiler._is_compiling_flag = old_value + + # This _reparametrize_module makes sure inputs and module.params/buffers have the same fake_mode, + # otherwise aot_export_module will error out because it sees a mix of fake_modes. + # And we want aot_export_module to use the fake_tensor mode in dynamo to keep the pipeline easy to reason about. + with torch.nn.utils.stateless._reparametrize_module( + mod, fake_params_buffers + ), grad_safe_guard, _ignore_backend_decomps(), _compiling_state_context(): # type: ignore[attr-defined] + gm, graph_signature = transform(aot_export_module)( + mod, + fake_args, + trace_joint=False, + pre_dispatch=pre_dispatch, + kwargs=fake_kwargs, + ) + # TODO unfortunately preserving graph-level metadata is not + # working well with aot_export. So we manually copy it. + # (The node-level meta is addressed above.) + if isinstance(mod, torch.fx.GraphModule) and hasattr(mod, "meta"): + gm.meta.update(mod.meta) + + if pre_dispatch: + from torch._export.passes.replace_set_grad_with_hop_pass import ( + replace_set_grad_with_hop_pass, + ) + + gm = replace_set_grad_with_hop_pass(gm) + + # NOTE: aot_export adds symint metadata for placeholders with int values; + # since these become specialized, we replace such metadata with the original values + flat_args = pytree.tree_leaves((fake_args, fake_kwargs)) + index = 0 + total_non_user_inputs = ( + len(graph_signature.parameters) + + len(graph_signature.buffers) + + len(graph_signature.input_tokens) + ) + for node in gm.graph.nodes: + if node.op == "placeholder": + if index >= total_non_user_inputs: + user_arg = flat_args[index - total_non_user_inputs] + if not isinstance(user_arg, torch.Tensor): + node.meta["val"] = user_arg + index += 1 + + is_joint = graph_signature.backward_signature is not None + + def make_argument_spec(node) -> ArgumentSpec: + if isinstance(node, (int, bool, float, type(None))): + # For const outputs we just directly return this + return ConstantArgument(value=node) + + assert ( + "val" in node.meta + ), f"{node} is not a constant or a node with a 'val' metadata field" + val = node.meta["val"] + if isinstance(val, FakeTensor): + return TensorArgument(name=node.name) + elif isinstance(val, torch.SymInt): + return SymIntArgument(name=node.name) + elif isinstance(val, torch.ScriptObject): + return CustomObjArgument( + name=node.name, class_fqn=val._type().qualified_name() # type: ignore[attr-defined] + ) + else: + # TODO: this branch is likely wrong, all permissible ConstantArgument type + # should have been handled already + return ConstantArgument(value=val) + + input_specs, output_specs = _sig_to_specs( + user_inputs=set(graph_signature.user_inputs), + inputs_to_parameters=graph_signature.inputs_to_parameters, # type: ignore[arg-type] + inputs_to_buffers=graph_signature.inputs_to_buffers, # type: ignore[arg-type] + user_outputs=set(graph_signature.user_outputs), # type: ignore[arg-type] + buffer_mutations=graph_signature.buffers_to_mutate, # type: ignore[arg-type] + user_input_mutations=graph_signature.user_inputs_to_mutate, # type: ignore[arg-type] + grad_params=graph_signature.backward_signature.gradients_to_parameters if is_joint else {}, # type: ignore[arg-type, union-attr] + grad_user_inputs=graph_signature.backward_signature.gradients_to_user_inputs if is_joint else {}, # type: ignore[arg-type, union-attr] + loss_output=graph_signature.backward_signature.loss_output if is_joint else None, # type: ignore[arg-type, union-attr] + inputs=[ + make_argument_spec(node) + for node in gm.graph.nodes + if node.op == "placeholder" + ], + outputs=[ + make_argument_spec(node) + for node in pytree.tree_leaves(next(iter(reversed(gm.graph.nodes))).args) + ], + input_tokens=graph_signature.input_tokens, + output_tokens=graph_signature.output_tokens, + ) + export_graph_signature = ExportGraphSignature( + input_specs=input_specs, output_specs=output_specs + ) + + constants = rewrite_script_object_meta(gm) + constants.update(lift_constants_pass(gm, export_graph_signature, constant_attrs)) + + @dataclasses.dataclass + class _ExportedProgramNonStrict: + gm: torch.fx.GraphModule + sig: ExportGraphSignature + constants: Dict[str, Union[torch.Tensor, torch._C.ScriptObject]] + + return _ExportedProgramNonStrict( + gm, + export_graph_signature, + constants, + ) + + +def _get_params_buffers(mod: torch.nn.Module) -> Dict[str, torch.Tensor]: + params_buffers: Dict[str, torch.Tensor] = {} + for name, param in mod.named_parameters(remove_duplicate=False): + params_buffers[name] = param + + for name, buffer in mod.named_buffers(remove_duplicate=False): + params_buffers[name] = buffer + return params_buffers + + +def _rewrite_dynamo_tensor_constants( + orig_mod_buffers: Set[torch.Tensor], + traced_mod_buffers: Dict[str, torch.Tensor], + graph_signature: ExportGraphSignature, + constants: Dict[str, Union[torch.Tensor, torch.ScriptObject]], +): + """Dynamo erroneously marks tensor attributes on modules as a buffers. + + Rewrite them to be tensor constants. + """ + for spec in graph_signature.input_specs: + if spec.kind == InputKind.BUFFER: + assert spec.target is not None + value = traced_mod_buffers[spec.target] + if value not in orig_mod_buffers: + # This was a tensor constant erroneously marked as a buffer. + # Convert it int oa constant in the graph signature, and add its + # value to the constants table. + spec.kind = InputKind.CONSTANT_TENSOR + constants[spec.target] = value + + +def _rewrite_non_persistent_buffers( + orig_mod: torch.nn.Module, + graph_signature: ExportGraphSignature, + constants: Dict[str, Union[torch.Tensor, torch.ScriptObject]], +): + """Dynamo erroneously drops the persistent flag on buffers. + + Rewrite non-persistent buffers to reflect the original module. + """ + state_dict = orig_mod.state_dict() + for spec in graph_signature.input_specs: + if spec.kind == InputKind.BUFFER: + assert spec.target is not None + if spec.target not in state_dict: + assert spec.target not in constants + spec.persistent = False + constants[spec.target] = orig_mod.get_buffer(spec.target) + + +def get_ep_stats(ep: ExportedProgram) -> Dict[str, Any]: + op_count = 0 + op_set = set() + for m in ep.graph_module.modules(): + if not isinstance(m, torch.fx.GraphModule): + continue + for node in m.graph.nodes: + if node.op != "call_function": + continue + op_count += 1 + assert hasattr(node.target, "__module__") + assert hasattr(node.target, "__name__") + op_set.add(f"{node.target.__module__}.{node.target.__name__}") + return {"op_count": op_count, "op_set": op_set} + + +_EXPORT_FLAGS: Optional[Set[str]] = None + + +def _log_export_wrapper(fn): + @functools.wraps(fn) + def wrapper(*args, **kwargs): + global _EXPORT_FLAGS + try: + start = time.time() + ep = fn(*args, **kwargs) + end = time.time() + log_export_usage( + event="export.time", + metrics=end - start, + flags=_EXPORT_FLAGS, + **get_ep_stats(ep), + ) + except Exception as e: + t = type(e) + error_type = t.__module__ + "." + t.__qualname__ + log_export_usage( + event="export.error", + type=error_type, + message=str(e), + flags=_EXPORT_FLAGS, + ) + raise e + finally: + _EXPORT_FLAGS = None + + return ep + + return wrapper + + +@_log_export_wrapper +@_disable_prexisiting_fake_mode +def _export( + mod: torch.nn.Module, + args: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]] = None, + dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any], List[Any]]] = None, + *, + strict: bool = True, + preserve_module_call_signature: Tuple[str, ...] = (), + pre_dispatch: bool = False, +) -> ExportedProgram: + """ + Traces either an nn.Module's forward function or just a callable with PyTorch + operations inside and produce a ExportedProgram. + + Args: + f: the `nn.Module` to trace. + + args: example positional inputs. + + kwargs: optional example keyword inputs. + + dynamic_shapes: + An optional argument where the type should either be: + 1) a dict from argument names of ``f`` to their dynamic shape specifications, + 2) a tuple that specifies dynamic shape specifications for each input in original order. + If you are specifying dynamism on keyword args, you will need to pass them in the order that + is defined in the original function signature. + + The dynamic shape of a tensor argument can be specified as either + (1) a dict from dynamic dimension indices to :func:`Dim` types, where it is + not required to include static dimension indices in this dict, but when they are, + they should be mapped to None; or (2) a tuple / list of :func:`Dim` types or None, + where the :func:`Dim` types correspond to dynamic dimensions, and static dimensions + are denoted by None. Arguments that are dicts or tuples / lists of tensors are + recursively specified by using mappings or sequences of contained specifications. + + preserve_module_call_signature: A list of submodule paths for which the original + calling conventions are preserved as metadata. + + Returns: + An ExportedProgram containing the traced method. + """ + from .dynamic_shapes import _process_dynamic_shapes + + global _EXPORT_FLAGS + flags = set() + flags.add("strict" if strict else "non_strict") + flags.add("pre_dispatch" if pre_dispatch else "aot_dispatch") + log_export_usage(event="export.enter", flags=flags) + _EXPORT_FLAGS = flags + + constraints = _process_dynamic_shapes(mod, args, kwargs, dynamic_shapes) or [] + + kwargs = kwargs or {} + + constant_attrs = _gather_constant_attrs(mod) + + flat_args, orig_in_spec = pytree.tree_flatten((args, kwargs)) + + if not strict: + out_spec = None + + module_call_specs: Dict[str, Dict[str, pytree.TreeSpec]] = {} + + def strip_root(x): + if isinstance(x, str) and x.startswith("_export_root"): + stripped = x[len("_export_root") :] + return stripped[1:] if stripped.startswith(".") else stripped + return x + + def fixup_key(x): + return "L__self__" + strip_root(x) + + def _tuplify_outputs(aot_export): + def _aot_export_non_strict(mod, args, kwargs=None, **flags): + kwargs = kwargs or {} + + class Wrapper(torch.nn.Module): + def __init__(self, mod): + super().__init__() + self._export_root = mod + + def forward(self, *args, **kwargs): + nonlocal out_spec + if isinstance(self._export_root, torch.fx.GraphModule): + with torch.fx.traceback.preserve_node_meta(): + tree_out = torch.fx.Interpreter(self._export_root).run( + *args, **kwargs + ) + else: + tree_out = self._export_root(*args, **kwargs) + flat_outs, out_spec = pytree.tree_flatten(tree_out) + return tuple(flat_outs) + + wrapped_mod = Wrapper(mod) + # Patch export_root to the signatures so that wrapper module correctly populates the + # in/out spec + new_preserved_call_signatures = [ + "_export_root." + i for i in preserve_module_call_signature + ] + with _wrap_submodules( + wrapped_mod, new_preserved_call_signatures, module_call_specs + ): + gm, sig = aot_export(wrapped_mod, args, kwargs=kwargs, **flags) + + sig.parameters = pytree.tree_map(strip_root, sig.parameters) + sig.buffers = pytree.tree_map(strip_root, sig.buffers) + sig.inputs_to_buffers = pytree.tree_map( + strip_root, sig.inputs_to_buffers + ) + sig.inputs_to_parameters = pytree.tree_map( + strip_root, sig.inputs_to_parameters + ) + sig.buffers_to_mutate = pytree.tree_map( + strip_root, sig.buffers_to_mutate + ) + for node in gm.graph.nodes: + if "nn_module_stack" in node.meta: + nn_module_stack = node.meta["nn_module_stack"] + node.meta["nn_module_stack"] = { + fixup_key(key): val + for key, val in pytree.tree_map( + strip_root, nn_module_stack + ).items() + } + + return gm, sig + + return _aot_export_non_strict + + ( + fake_mode, + fake_args, + fake_kwargs, + equalities_inputs, + original_signature, + ) = make_fake_inputs(mod, args, kwargs, constraints) + + fake_params_buffers = make_fake_params_buffers( + fake_mode, _get_params_buffers(mod) + ) + with fake_mode: + ep_non_strict = _export_non_strict( + mod, + fake_args, + fake_kwargs, + fake_params_buffers, + constant_attrs, + pre_dispatch=pre_dispatch, + transform=_tuplify_outputs, + ) + try: + range_constraints = make_constraints( + fake_mode, + equalities_inputs, + original_signature, + ep_non_strict.gm, + ) + except (ConstraintViolationError, ValueRangeError) as e: + raise UserError(UserErrorType.CONSTRAINT_VIOLATION, str(e)) # noqa: TRY200 + + assert out_spec is not None + + gm = ep_non_strict.gm + + module_call_signatures = { + strip_root(fqn): ModuleCallSignature(inputs=[], outputs=[], **specs) + for fqn, specs in module_call_specs.items() + } + + if len(preserve_module_call_signature) > 0: + for node in gm.graph.nodes: + if node.target == torch.ops.higher_order._export_tracepoint: + if "path" in node.kwargs: + path = strip_root(node.kwargs["path"]) + with gm.graph.inserting_before(node): + new_node = gm.graph.create_node( + "call_function", + torch.ops.higher_order._export_tracepoint, + args=node.args, + kwargs={ + "path": path, + "kind": node.kwargs["kind"], + }, + ) + node.replace_all_uses_with(new_node) + gm.graph.erase_node(node) + + res = CollectTracepointsPass(module_call_signatures, ep_non_strict.sig)(gm) + assert res is not None + gm = res.graph_module + + _rewrite_non_persistent_buffers(mod, ep_non_strict.sig, ep_non_strict.constants) + return ExportedProgram( + root=gm, + graph=gm.graph, + graph_signature=ep_non_strict.sig, + state_dict=mod.state_dict(keep_vars=True), + range_constraints=range_constraints, + module_call_graph=[ + ModuleCallEntry( + "", + ModuleCallSignature( + inputs=[], outputs=[], in_spec=orig_in_spec, out_spec=out_spec + ), + ) + ] + + [ + ModuleCallEntry(fqn, sig) for fqn, sig in module_call_signatures.items() + ], + example_inputs=(args, kwargs), + constants=ep_non_strict.constants, + ) + + gm_torch_level = _export_to_torch_ir( + mod, + args, + kwargs, + constraints, + preserve_module_call_signature=preserve_module_call_signature, + restore_fqn=False, # don't need to restore because we will do it later + _log_export_usage=False, + ) + + # We detect the fake_mode by looking at gm_torch_level's placeholders, this is the fake_mode created in dynamo. + ( + fake_args, + fake_kwargs, + fake_params_buffers, + dynamo_fake_mode, + ) = _convert_input_to_fake(gm_torch_level, args, kwargs) + + # First, we want to pass through the graph to try populating + # val field for getattr if there is anything missing. + # This can happen when quantization adds extra params and forgets + # to update "val" + for node in gm_torch_level.graph.nodes: + if node.op == "get_attr" and "val" not in node.meta: + attr = getattr(gm_torch_level, node.target) + # Checks if it is not a HigherOrderOp branch or a module + if not isinstance(attr, torch.nn.Module): + assert ( + dynamo_fake_mode is not None + ), "Cannot find dynamo_fake_mode. This could be due to the exported graph module have no placeholders." + node.meta["val"] = dynamo_fake_mode.from_tensor( + attr, static_shapes=True + ) + + # When aot_export lifts the params, we lose the nn_module_stack + # and source_fn from the param nodes as they are treated as fresh inputs + # Therefore, we manually extract them before calling into aot_export + params_buffers_to_node_meta = {} + for node in gm_torch_level.graph.nodes: + target = node.target + meta = node.meta + if node.op == "call_module": + submodule = getattr(gm_torch_level, target) + if isinstance(submodule, torch.nn.Module): + for name, _ in submodule.named_parameters( + recurse=True, remove_duplicate=False + ): + params_buffers_to_node_meta[target + "." + name] = meta + + for name, _ in submodule.named_buffers( + recurse=True, remove_duplicate=False + ): + params_buffers_to_node_meta[target + "." + name] = meta + + if node.op == "get_attr": + submodule = getattr(gm_torch_level, target) + if not isinstance(submodule, torch.fx.GraphModule): + params_buffers_to_node_meta[target] = meta + + # If the call_function uses param as input, we also need to update params' meta + # with this call_function node's meta. + # This is basically the same flow as torch.fx.traceback.preserve_meta() + if node.op == "call_function" and not isinstance( + node.target, torch._ops.HigherOrderOperator + ): + for arg in node._input_nodes: + if arg.op == "get_attr": + for entry in torch.fx.proxy._COPY_META_FIELDS: + if entry in meta: + params_buffers_to_node_meta[arg.target][entry] = meta[entry] + + # Fix the graph output signature to be tuple if scalar + out_spec = orig_out_spec = gm_torch_level._out_spec + assert out_spec is not None + # aot_export expect the return type to always be a tuple. + if out_spec.type not in (list, tuple): + out_spec = pytree.TreeSpec(tuple, None, [out_spec]) + + orig_arg_names = gm_torch_level.graph._codegen.pytree_info.orig_args # type: ignore[attr-defined] + + gm_torch_level.graph._codegen = _PyTreeCodeGen( + _PyTreeInfo( + orig_arg_names, + gm_torch_level._in_spec, + out_spec, + ) + ) + gm_torch_level.recompile() + + _normalize_nn_module_stack(gm_torch_level, type(mod)) + + # NOTE: graph module expects only positional args + ep_non_strict = _export_non_strict( + gm_torch_level, + _convert_to_positional_args(orig_arg_names, fake_args, fake_kwargs), + {}, + fake_params_buffers, + constant_attrs, + pre_dispatch=pre_dispatch, + ) + + gm = ep_non_strict.gm + export_graph_signature = ep_non_strict.sig + constants = ep_non_strict.constants + + # After aot_export, set the param/buffer metadata back into placeholders + # Technically, users can still construct this data from param names + # without relying on this metadata + for node in gm.graph.nodes: + if node.op == "placeholder": + if node.target in export_graph_signature.inputs_to_parameters: + param_name = export_graph_signature.inputs_to_parameters[node.target] + if param_name in params_buffers_to_node_meta: + for k, v in params_buffers_to_node_meta[param_name].items(): + node.meta[k] = v + if node.target in export_graph_signature.inputs_to_buffers: + buffer_name = export_graph_signature.inputs_to_buffers[node.target] + if buffer_name in params_buffers_to_node_meta: + for k, v in params_buffers_to_node_meta[buffer_name].items(): + node.meta[k] = v + + # The unbacked symint symbols are updated in aot_export + # so we serialize them here instead of inside dynamo + + gm.meta["inline_constraints"] = { + k: v + for k, v in dynamo_fake_mode.shape_env.var_to_range.items() + if free_unbacked_symbols(k) + } + + num_lifted = next( + ( + i + for i, s in enumerate(export_graph_signature.input_specs) + if s.kind == InputKind.USER_INPUT + ), + len(export_graph_signature.input_specs), + ) + range_constraints = _process_constraints( + dynamo_fake_mode, + gm, + num_lifted, + flat_args, + ) + + # Do some cleanups on the graph module to restore the state dict to the + # expected form. Each of these steps should probably get fixed upstream. + # 1. Remove tensor constants that were added as buffers. + _rewrite_dynamo_tensor_constants( + orig_mod_buffers=set(mod.buffers()), + traced_mod_buffers=dict(gm_torch_level.named_buffers()), + graph_signature=ep_non_strict.sig, + constants=ep_non_strict.constants, + ) + # 2. Restore FQN of param/buffers + param_buffer_table: Dict[str, str] = _get_param_buffer_mapping(mod, gm_torch_level) + _replace_param_buffer_names(param_buffer_table, export_graph_signature) + + # 3. Remove non-persistent buffers from the graph signature + _rewrite_non_persistent_buffers(mod, ep_non_strict.sig, ep_non_strict.constants) + + # 4. Rewrite constants to have the same FQN as the original module. + _remap_constants(constant_attrs, export_graph_signature, constants) + + module_call_signatures = { + fqn: ModuleCallSignature(inputs=[], outputs=[], **specs) + for fqn, specs in gm_torch_level.meta["module_call_specs"].items() + } + + if len(preserve_module_call_signature) > 0: + res = CollectTracepointsPass(module_call_signatures, export_graph_signature)(gm) + assert res is not None + gm = res.graph_module + + assert orig_out_spec is not None + exported_program = ExportedProgram( + root=gm, + graph=gm.graph, + graph_signature=export_graph_signature, + state_dict=mod.state_dict(keep_vars=True), + range_constraints=range_constraints, + module_call_graph=[ + ModuleCallEntry( + "", + ModuleCallSignature( + inputs=[], outputs=[], in_spec=orig_in_spec, out_spec=orig_out_spec + ), + ) + ] + + [ModuleCallEntry(fqn, sig) for fqn, sig in module_call_signatures.items()], + example_inputs=(args, kwargs), + constants=constants, + ) + log.debug("Exported program from AOTAutograd:\n%s", exported_program) + + if len(range_constraints) > 0: + exported_program = exported_program._transform_do_not_use( + _AddRuntimeAssertionsForInlineConstraintsPass(range_constraints) + ) + + return exported_program diff --git a/venv/lib/python3.10/site-packages/torch/export/_tree_utils.py b/venv/lib/python3.10/site-packages/torch/export/_tree_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a1615ebd5f586ce5216c65d40162a74ffb7bc5d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/export/_tree_utils.py @@ -0,0 +1,64 @@ +from typing import Any, Callable, Dict, Optional + +from torch.utils._pytree import Context, TreeSpec + + +def reorder_kwargs(user_kwargs: Dict[str, Any], spec: TreeSpec) -> Dict[str, Any]: + """Reorder user-provided kwargs to match the order in `spec`. `spec` is + expected to be the in_spec of an exported program, i.e. the spec that + results from flattening `(args, kwargs)`. + + We need this to provide consistent input ordering, such so that users can + pass in foo(a=a, b=b) OR foo(b=b, a=a) and receive the same result. + """ + # Make sure that the spec is actually shaped like (args, kwargs) + assert spec.type is tuple + assert spec.num_children == 2 + kwargs_spec = spec.children_specs[1] + assert kwargs_spec.type is dict + + if set(user_kwargs) != set(kwargs_spec.context): + raise ValueError( + f"kwarg key mismatch: " + f"Got {list(user_kwargs)} but expected {kwargs_spec.context}" + ) + + reordered_kwargs = {} + for kw in kwargs_spec.context: + reordered_kwargs[kw] = user_kwargs[kw] + + return reordered_kwargs + + +def is_equivalent( + spec1: TreeSpec, + spec2: TreeSpec, + equivalence_fn: Callable[[Optional[type], Context, Optional[type], Context], bool], +) -> bool: + """Customizable equivalence check for two TreeSpecs. + + Arguments: + spec1: The first TreeSpec to compare + spec2: The second TreeSpec to compare + equivalence_fn: A function to determine the equivalence of two + TreeSpecs by examining their types and contexts. It will be called like: + + equivalence_fn(spec1.type, spec1.context, spec2.type, spec2.context) + + This function will be applied recursively to all children. + + Returns: + True if the two TreeSpecs are equivalent, False otherwise. + """ + if not equivalence_fn(spec1.type, spec1.context, spec2.type, spec2.context): + return False + + # Recurse on children + if len(spec1.children_specs) != len(spec2.children_specs): + return False + + for child_spec1, child_spec2 in zip(spec1.children_specs, spec2.children_specs): + if not is_equivalent(child_spec1, child_spec2, equivalence_fn): + return False + + return True diff --git a/venv/lib/python3.10/site-packages/torch/export/_unlift.py b/venv/lib/python3.10/site-packages/torch/export/_unlift.py new file mode 100644 index 0000000000000000000000000000000000000000..bede2986d4eab0a4996c13f6f6f3d2f68f55c9dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/export/_unlift.py @@ -0,0 +1,314 @@ +import copy +from itertools import chain +from typing import Any, Dict, List, Optional, Tuple + +import torch +import torch.utils._pytree as pytree +from torch._export.utils import _check_input_constraints_for_graph +from torch.export.unflatten import _assign_attr, _AttrKind +from torch.fx.graph import _PyTreeCodeGen, _PyTreeInfo +from ._remove_effect_tokens_pass import _remove_effect_tokens + +from .exported_program import ( + ExportedProgram, + ExportGraphSignature, + InputKind, + OutputKind, +) + + +@torch._dynamo.disable +def _check_input_constraints_pre_hook(self, *args, **kwargs): + flat_args_with_path, received_spec = pytree.tree_flatten_with_path(args) + + if received_spec != self._in_spec: + raise ValueError( # noqa: TRY200 + "Trying to flatten user inputs with exported input tree spec: \n" + f"{self._in_spec}\n" + "but actually got inputs with tree spec of: \n" + f"{received_spec}" + ) + + return _check_input_constraints_for_graph( + [node for node in self.graph.nodes if node.op == "placeholder"], + flat_args_with_path, + self.range_constraints, + ) + + +def _unlift_inputs_as_getattr( + gm: torch.fx.GraphModule, + lifted_inputs: List[Optional[str]], +) -> Tuple[Dict[str, torch.fx.Node], Dict[str, torch.fx.Node]]: + """ + Unlift inputs referring to params/buffers/constants as getattr nodes in the + graph + """ + unlifted_name_to_node = {} + input_name_to_node = {} + + placeholder_nodes = [node for node in gm.graph.nodes if node.op == "placeholder"] + assert len(lifted_inputs) == len(placeholder_nodes) + for input_node, lifted_node in zip(placeholder_nodes, lifted_inputs): + if lifted_node is None: + input_name_to_node[input_node.name] = input_node + + else: + with gm.graph.inserting_after(input_node): + getattr_node = gm.graph.get_attr(lifted_node) + input_node.replace_all_uses_with(getattr_node) + metadata = input_node.meta + gm.graph.erase_node(input_node) + getattr_node.meta = metadata + unlifted_name_to_node[lifted_node] = getattr_node + + return unlifted_name_to_node, input_name_to_node + + +def _insert_copy_for_mutations( + gm: torch.fx.GraphModule, + mutated_outputs: List[Optional[str]], + unlifted_name_to_node: Dict[str, torch.fx.Node], + input_name_to_node: Dict[str, torch.fx.Node], +) -> None: + """ + Find the all the buffers and inputs that were mutated and insert copy_ + operators to reflect mutations. + """ + output_node = None + for node in gm.graph.nodes: + if node.op == "output": + output_node = node + break + assert output_node is not None + outputs = pytree.tree_flatten(output_node.args)[0] + assert len(outputs) == len(mutated_outputs) + + user_output_nodes = [] + for return_node, mutated_node_name in zip(outputs, mutated_outputs): + if mutated_node_name is None: + user_output_nodes.append(return_node) + continue + + if mutated_node_name in unlifted_name_to_node: + mutated_node = unlifted_name_to_node[mutated_node_name] + elif mutated_node_name in input_name_to_node: + mutated_node = input_name_to_node[mutated_node_name] + else: + raise RuntimeError( + f"Could not find {mutated_node_name} in either buffer or input nodes" + ) + + with gm.graph.inserting_before(output_node): + _ = gm.graph.call_function( + torch.ops.aten.copy_.default, (mutated_node, return_node) + ) + + with gm.graph.inserting_before(output_node): + # Only return user outputs + new_output = gm.graph.output(tuple(user_output_nodes)) + output_node.replace_all_uses_with(new_output) + gm.graph.erase_node(output_node) + + +def _get_codegen( + in_spec: pytree.TreeSpec, + out_spec: Optional[pytree.TreeSpec], +) -> _PyTreeCodeGen: + """ + Create the codegen for the graph module based on the in/out specs + """ + if ( + in_spec.type == tuple + and in_spec.num_children == 2 + and in_spec.children_specs[0].type == tuple + and in_spec.children_specs[1].type == dict + ): + # if in_spec contains the args (tuple) and kwargs (dict) + names = [f"arg_{i}" for i in range(in_spec.children_specs[0].num_children)] + # add kwarg names + names.extend(in_spec.children_specs[1].context) + else: + names = [f"arg_{i}" for i in range(in_spec.num_children)] + + return _PyTreeCodeGen( + _PyTreeInfo( + names, + in_spec, + out_spec, + ) + ) + + +def _unlift( + gm: torch.fx.GraphModule, + lifted_inputs: List[Optional[str]], + mutated_outputs: List[Optional[str]], + in_spec: pytree.TreeSpec, + out_spec: Optional[pytree.TreeSpec], + state_dict: Dict[str, Any], + constants: Dict[str, Any], +): + """ + Args: + lifted_inputs: A list matching the graph module's input nodes. For + an input node that is referring to a lifted parameter/buffer, this + list will contain the fqn the corresponding attribute. Otherwise, this + list will contain None. This is used to unlift the lifted parameters as + get_attr nodes. + + mutated_outputs: A list matching the graph module's output nodes. For + an output node that is referring to a mutated buffer or user input, this + list will contain the name of the corresponding buffer or user input + that needs to be mutated. Otherwise, this list will contain None. This + is used to re-insert an inplace copy_ operator to copy the mutated + values back to the original node. + """ + unlifted_name_to_node, input_name_to_node = _unlift_inputs_as_getattr( + gm, lifted_inputs + ) + _insert_copy_for_mutations( + gm, mutated_outputs, unlifted_name_to_node, input_name_to_node + ) + gm.graph._codegen = _get_codegen(in_spec, out_spec) + gm.graph.lint() + gm.graph.eliminate_dead_code() + gm.recompile() + return gm + + +def _register_attrs_to_new_gm( + new_gm: torch.fx.GraphModule, + graph_signature: ExportGraphSignature, + state_dict: Dict[str, Any], + constants: Dict[str, Any], +) -> None: + non_persistent_buffers = set(graph_signature.non_persistent_buffers) + for name in graph_signature.buffers: + if name in non_persistent_buffers: + persistent = False + value = constants[name] + else: + persistent = True + value = state_dict[name] + _assign_attr( + value, new_gm, name, attr_kind=_AttrKind.BUFFER, persistent=persistent + ) + for name in graph_signature.parameters: + value = state_dict[name] + _assign_attr( + value, + new_gm, + name, + attr_kind=_AttrKind.PARAMETER, + ) + + for name in chain( + graph_signature.lifted_custom_objs, graph_signature.lifted_tensor_constants + ): + value = constants[name] + _assign_attr( + value, + new_gm, + name, + attr_kind=_AttrKind.CONSTANT, + ) + + +class _StatefulGraphModuleFactory(type): + """ + Metaclass that ensures a private constructor for _StatefulGraphModule + """ + + def __call__(cls, *args, **kwargs): + raise TypeError( + f"{cls.__module__}.{cls.__qualname__} has no public constructor. " + ) + + def _create(cls, root, graph, range_constraints=None): + return super().__call__( + root, + graph, + range_constraints=range_constraints, + ) + + +class _StatefulGraphModule(torch.fx.GraphModule, metaclass=_StatefulGraphModuleFactory): + def __init__(self, root, graph, range_constraints=None): + super().__init__(root, graph) + # Need to fix up non-persistent buffers. + self.range_constraints = range_constraints or [] + + +def _create_stateful_graph_module( + plain_graph_module: torch.fx.GraphModule, + range_constraints, + # TODO(suo) this should not be optional, but is since we still ahve + # capture_pre_autograd_graph grr + graph_signature: Optional[ExportGraphSignature] = None, +): + stateful_gm = _StatefulGraphModule._create( + plain_graph_module, + plain_graph_module.graph, + range_constraints=range_constraints, + ) + stateful_gm.register_forward_pre_hook( + _check_input_constraints_pre_hook, with_kwargs=True + ) + + if graph_signature is None: + return stateful_gm + # Fix up non-persistent buffers. torch.fx does not distinguish between + # persistent and non-persistent buffers, so we must restore that distinction + # here. + for buffer in graph_signature.non_persistent_buffers: + _assign_attr( + plain_graph_module.get_buffer(buffer), + stateful_gm, + buffer, + attr_kind=_AttrKind.BUFFER, + persistent=False, + ) + + return stateful_gm + + +def _unlift_exported_program_lifted_states(ep: ExportedProgram) -> torch.nn.Module: + ep = _remove_effect_tokens(ep) + new_gm = torch.fx.GraphModule(ep.graph_module, copy.deepcopy(ep.graph)) + _register_attrs_to_new_gm(new_gm, ep.graph_signature, ep.state_dict, ep.constants) + + lifted_inputs: List[Optional[str]] = [ + in_spec.target + if in_spec.kind + in ( + InputKind.BUFFER, + InputKind.CONSTANT_TENSOR, + InputKind.PARAMETER, + InputKind.CUSTOM_OBJ, + ) + else None + for in_spec in ep.graph_signature.input_specs + ] + + mutated_outputs: List[Optional[str]] = [ + out_spec.target + if out_spec.kind in (OutputKind.BUFFER_MUTATION, OutputKind.USER_INPUT_MUTATION) + else None + for out_spec in ep.graph_signature.output_specs + ] + + new_gm = _unlift( + new_gm, + lifted_inputs, + mutated_outputs, + ep.call_spec.in_spec, + ep.call_spec.out_spec, + ep.state_dict, + ep.constants, + ) + unlift_gm = _create_stateful_graph_module( + new_gm, ep.range_constraints, ep.graph_signature + ) + unlift_gm.meta.update(ep.graph_module.meta) + return unlift_gm diff --git a/venv/lib/python3.10/site-packages/torch/export/custom_obj.py b/venv/lib/python3.10/site-packages/torch/export/custom_obj.py new file mode 100644 index 0000000000000000000000000000000000000000..8e7f2080a4ee705a2621386c9b69a089d507544a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/export/custom_obj.py @@ -0,0 +1,16 @@ +from dataclasses import dataclass + + +__all__ = ["ScriptObjectMeta"] + + +@dataclass +class ScriptObjectMeta: + """ + Metadata which is stored on nodes representing ScriptObjects. + """ + + # Key into constants table to retrieve the real ScriptObject. + constant_name: str + + class_fqn: str diff --git a/venv/lib/python3.10/site-packages/torch/export/dynamic_shapes.py b/venv/lib/python3.10/site-packages/torch/export/dynamic_shapes.py new file mode 100644 index 0000000000000000000000000000000000000000..73d64b8f5d07a481968bd90caaef2ac4b2e8382f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/export/dynamic_shapes.py @@ -0,0 +1,876 @@ +import builtins +import dataclasses +import inspect +import math +import sys +import weakref +from collections import defaultdict +from typing import Any, Callable, Dict, List, Optional, Set, Tuple, TYPE_CHECKING, Union + +import torch +from torch._subclasses.fake_tensor import FakeTensor +from torch.utils._pytree import SUPPORTED_NODES + +from .exported_program import ExportedProgram + +if TYPE_CHECKING: + from sympy import Symbol + + from torch._guards import Source + + from ..fx.experimental.symbolic_shapes import ShapeEnv, StrictMinMaxConstraint + +__all__ = ["Constraint", "Dim", "dims", "dynamic_dim"] + + +class _Dim(type): + """ + Metaclass for :func:`Dim` types. + """ + + @staticmethod + def readable(name, min_, max_): + if min_ == 2: + min_ = None + if max_ == sys.maxsize - 1: + max_ = None + if min_ is None and max_ is None: + return f"Dim('{name}')" + if min_ is None: + return f"Dim('{name}', max={max_})" + if max_ is None: + return f"Dim('{name}', min={min_})" + return f"Dim('{name}', min={min_}, max={max_})" + + def __add__(cls, other): + # e.g., dim + 1 + if type(other) is not int: + raise NotImplementedError( + f"Attempted to add {other} to {cls.__name__}, where an integer was expected. " + "(Only increasing linear operations with integer coefficients are supported.)" + ) + return cls._derive(lambda x: x + other) + + def __radd__(cls, other): + return cls + other + + def __sub__(cls, other): + # e.g., dim - 1 + if type(other) is not int: + raise NotImplementedError( + f"Attempted to subtract {other} from {cls.__name__}, where an integer was expected. " + "(Only increasing linear operations with integer coefficients are supported.)" + ) + return cls._derive(lambda x: x - other) + + def __rsub__(cls, other): + raise NotImplementedError( + f"Attempted to negate {cls.__name__}. " + "(Only increasing linear operations with integer coefficients are supported.)" + ) + + def __mul__(cls, other): + # e.g., dim * 2 + if type(other) is not int or other <= 0: + raise NotImplementedError( + f"Attempted to multiply {other} with {cls.__name__}, where a positive integer was expected. " + "(Only increasing linear operations with integer coefficients are supported.)" + ) + return cls._derive(lambda x: x * other) + + def __rmul__(cls, other): + return cls * other + + def _derived_name(cls, fn): + from sympy import sympify + + return str(fn(sympify(cls.__name__))) + + def _derive(cls, fn): + return _DerivedDim(cls._derived_name(fn), (int,), {"root": cls, "fn": fn}) + + +class _DerivedDim(_Dim): + """ + Metaclass for derived :func:`Dim` types. + + Currently we only support increasing linear expressions with integer coefficients. + In other words, a derived Dim can always be written in the form Ax + B, where + x is a regular Dim (i.e., non-derived Dim), A and B are integers, and A is positive. + (In particular, the latter ensures that x < y => Ax + B < Ay + B.) + These restrictions on the form of derived Dims makes the metatheory simpler: e.g., + it simplifies computing ranges for derived Dims, solving for underlying regular Dims, + deciding equalities between derived Dims, and so on. + + The function lambda x: Ax + B is expressed by `fn`, where x is a normal Dim, `root`. + The range of a derived Dim is computed by mapping `fn` over the range of its `root`. + """ + + @property + def min(self): + # assume that self.fn is an increasing function + # TODO(avik): use sympy value range analysis instead? + from sympy import Integer + + _min_symint = self.fn(Integer(self.root.min)) # type: ignore[attr-defined] + assert _min_symint >= 2, ( + f"Expected derived min value of {self.__name__} to be >= 2. " + f"Please specify an appropriate min value for {self.root.__name__} " # type: ignore[attr-defined] + f"(currently {self.root.min})." # type: ignore[attr-defined] + ) + return int(_min_symint) + + @property + def max(self): + # assume that self.fn is an increasing function + # TODO(avik): use sympy value range analysis instead? + from sympy import Integer + + _max_symint = self.fn(Integer(self.root.max)) # type: ignore[attr-defined] + assert _max_symint <= sys.maxsize - 1, ( + f"Expected derived max value of {self.__name__} to be <= {sys.maxsize - 1}. " + f"Please specify an appropriate max value for {self.root.__name__} " # type: ignore[attr-defined] + f"(currently {self.root.max})." # type: ignore[attr-defined] + ) + return int(_max_symint) + + def _derive(self, fn): + # We support nesting, e.g., 2*dim + 1. + # This is implemented by composing operations on the same root. + # As a consequence, roots are always regular Dims (i.e., not derived Dims). + return _DerivedDim( + self._derived_name(fn), + (int,), + {"root": self.root, "fn": lambda x: fn(self.fn(x))}, # type: ignore[attr-defined] + ) + + +def Dim(name: str, *, min: Optional[int] = None, max: Optional[int] = None): + """ + :func:`Dim` constructs a type analogous to a named symbolic integer with a range. + It can be used to describe multiple possible values of a dynamic tensor dimension. + Note that different dynamic dimensions of the same tensor, or of different tensors, + can be described by the same type. + + Args: + name (str): Human-readable name for debugging. + min (Optional[int]): Minimum possible value of given symbol (inclusive) + max (Optional[int]): Maximum possible value of given symbol (inclusive) + + Returns: + A type that can be used in dynamic shape specifications for tensors. + """ + _min = 2 if min is None else builtins.max(min, 2) + _max = sys.maxsize - 1 if max is None else builtins.min(max, sys.maxsize - 1) + assert _max > _min, f"Cannot create Dim with inconsistent min={min}, max={max}" + dim = _Dim(name, (int,), {"min": _min, "max": _max}) + dim.__module__ = getattr( + inspect.getmodule(inspect.stack()[1][0]), "__name__", "__main__" + ) + return dim + + +def dims(*names: str, min: Optional[int] = None, max: Optional[int] = None): + """ + Util to create multiple :func:`Dim` types. + """ + return tuple(Dim(name, min=min, max=max) for name in names) + + +@dataclasses.dataclass +class _ConstraintTarget: + """ + This represents input tensor dimensions. Don't create this + class directly; instead, use :func:`dynamic_dim`. + """ + + w_tensor: Any # weakref to torch.Tensor + # TODO: We don't need t_id; we can get it off of w_tensor + t_id: int + dim: int + + +class _ConstraintFactory(type): + """ + Metaclass that ensures a private constructor for :class:`_Constraint` + """ + + def __call__(cls, *args, **kwargs): + raise TypeError( + f"{cls.__module__}.{cls.__qualname__} has no public constructor. " + f"Please use torch.export.dynamic_dim() to create one" + ) + + def _create( + cls, w_tensor, t_id, dim, constraint_range, shared=None, debug_name=None + ): + return super().__call__( + w_tensor, t_id, dim, constraint_range, shared, debug_name + ) + + +def _create_constraint( + w_tensor, t_id, dim, constraint_range, shared=None, debug_name=None +): + return _Constraint._create( + w_tensor, t_id, dim, constraint_range, shared, debug_name + ) + + +@dataclasses.dataclass +class _Constraint(_ConstraintTarget, metaclass=_ConstraintFactory): + """ + + .. warning:: + Do not construct :class:`_Constraint` directly, use :func:`dynamic_dim` instead. + + This represents constraints on input tensor dimensions, e.g., requiring + them to be fully polymorphic or within some range. + + """ + + # NOTE(avik): In the future, this could be Union[StrictMinMaxConstraint, ] + constraint_range: "StrictMinMaxConstraint" + # Represent that `constraint_range` is shared with another _ConstraintTarget, which + # typically arises because of a specified equality with another dynamic dimension. + shared: Optional[_ConstraintTarget] = None + debug_name: Optional[str] = None + + def _clone_with_range(self, lower=2, upper=math.inf): + # Import sympy locally + from torch.fx.experimental.symbolic_shapes import StrictMinMaxConstraint + from torch.utils._sympy.value_ranges import ValueRanges + + constraint_range = StrictMinMaxConstraint( + vr=self.constraint_range.vr & ValueRanges(lower=lower, upper=upper), + warn_only=False, + ) + return _create_constraint( + self.w_tensor, + self.t_id, + self.dim, + constraint_range, + self.shared, + self.debug_name, + ) + + def __ge__(self, lower): + return self._clone_with_range(lower=lower) + + def __gt__(self, lower): + return self._clone_with_range(lower=lower + 1) + + def __le__(self, upper): + return self._clone_with_range(upper=upper) + + def __lt__(self, upper): + return self._clone_with_range(upper=upper - 1) + + def __bool__(self): + # NOTE(avik): We do not support compound expressions like a <= x <= b. + # This is because Python implicitly desugars them into bool(a <= x) and bool(x <= b), + # and moreover, enforces that any overload of __bool__ must return True or False. + # FWIW, sympy also raises TypeError in this case. + raise TypeError( + "Cannot determine truth value of _Constraint. " + "If you are trying to combine _Constraint's with logical connectives, " + "you can specify them separately instead." + ) + + @property + def serializable_spec(self): + # We need a serialization compatible format of the constraint so that it + # can be savedin the graph module w/o breaking the module serialization. + # The saved constraints will be used directly for the post-exporting pass + # that converts constraints to runtime assertion. The saved constraints + # will not be saved in the serialized module. + # TODO: A better way is needed. Currently we use 't_id' to map the constraint, + # which is not reliable + return { + "t_id": self.t_id, + "dim": self.dim, + "min": self.constraint_range.vr.lower, + "max": self.constraint_range.vr.upper, + } + + def __eq__(self, other): + if not isinstance(other, _Constraint): + raise TypeError( + "A dynamic dim can be specified equal only to another dynamic dim. " + f"Equality with {type(other)} is not supported." + ) + + # import sympy locally + from torch.fx.experimental.symbolic_shapes import StrictMinMaxConstraint + + constraint_range = StrictMinMaxConstraint( + vr=self.constraint_range.vr & other.constraint_range.vr, + warn_only=False, + ) + if self.debug_name is None: + debug_name = other.debug_name + else: + assert other.debug_name is None or self.debug_name == other.debug_name + debug_name = self.debug_name + return _create_constraint( + self.w_tensor, + self.t_id, + self.dim, + constraint_range, + shared=_ConstraintTarget(other.w_tensor, other.t_id, other.dim), + debug_name=debug_name, + ) + + +@dataclasses.dataclass +class _PhantomRoot: + """ + This represents the root of a derived Dim where the root does not directly + specify the shape of any input dimension, but the derived Dim does. + + e.g., the input shapes 2*dim and dim + 1 are related via a "phantom" dim. + + The fields `name`, `constraint_range`, and `val` carried by a phantom root + help create a symbol for it. Any derived dims with this phantom root are + backed by expressions over this symbol. + """ + + name: str + constraint_range: "StrictMinMaxConstraint" + val: int + + +@dataclasses.dataclass +class _DerivedConstraint(_ConstraintTarget): + """ + This represents a derived Dim, whose root is either a regular constraint target + (which directly specifies the shape of some input dimension) or a phantom root + (which does so indirectly). + """ + + # NOTE: This is not currently a subclass of _Constraint because we do not support + # `shared` for derived `Dim`s. Indeed, sharing is a necessary concept only for + # legacy constraints based on `dynamic_dim`: equality can be expressed simply by + # reusing the same (derived or normal) `Dim`. + root: Union[_ConstraintTarget, _PhantomRoot] + fn: Callable + constraint_range: "StrictMinMaxConstraint" + debug_name: Optional[str] = None + + @property + def shared(self): + # Some code paths expect a union of _Constraint and _DerivedConstraint. + # Thus we expose a `shared` field that is always None. + # TODO(avik): clean this up + return None + + @property + def serializable_spec(self): + # same as _Constraint.serializable_spec + return { + "t_id": self.t_id, + "dim": self.dim, + "min": self.constraint_range.vr.lower, + "max": self.constraint_range.vr.upper, + } + + +Constraint = Union[_Constraint, _DerivedConstraint] + + +def dynamic_dim(t: torch.Tensor, index: int, debug_name: Optional[str] = None): + """ + .. warning:: + (This feature is DEPRECATED. See :func:`Dim` instead.) + + :func:`dynamic_dim` constructs a :class:`_Constraint` object that describes the dynamism of + a dimension ``index`` of tensor ``t``. :class:`_Constraint` objects should be passed to + ``constraints`` argument of :func:`export`. + + Args: + t (torch.Tensor): Example input tensor that have dynamic dimension size(s) + index (int): Index of dynamic dimension + + Returns: + A :class:`_Constraint` object that describes shape dynamism. It can be passed to :func:`export` so + that :func:`export` does not assume static size of specified tensor, i.e. keeping it dynamic + as a symbolic size rather than specializing according to size of example tracing input. + + Specifically :func:`dynamic_dim` can be used to express following types of dynamism. + + - Size of a dimension is dynamic and unbounded:: + + t0 = torch.rand(2, 3) + t1 = torch.rand(3, 4) + + # First dimension of t0 can be dynamic size rather than always being static size 2 + constraints = [dynamic_dim(t0, 0)] + ep = export(fn, (t0, t1), constraints=constraints) + + - Size of a dimension is dynamic with a lower bound:: + + t0 = torch.rand(10, 3) + t1 = torch.rand(3, 4) + + # First dimension of t0 can be dynamic size with a lower bound of 5 (inclusive) + # Second dimension of t1 can be dynamic size with a lower bound of 2 (exclusive) + constraints = [ + dynamic_dim(t0, 0) >= 5, + dynamic_dim(t1, 1) > 2, + ] + ep = export(fn, (t0, t1), constraints=constraints) + + - Size of a dimension is dynamic with an upper bound:: + + t0 = torch.rand(10, 3) + t1 = torch.rand(3, 4) + + # First dimension of t0 can be dynamic size with a upper bound of 16 (inclusive) + # Second dimension of t1 can be dynamic size with a upper bound of 8 (exclusive) + constraints = [ + dynamic_dim(t0, 0) <= 16, + dynamic_dim(t1, 1) < 8, + ] + ep = export(fn, (t0, t1), constraints=constraints) + + - Size of a dimension is dynamic and it is always equal to size of another dynamic dimension:: + + t0 = torch.rand(10, 3) + t1 = torch.rand(3, 4) + + # Sizes of second dimension of t0 and first dimension are always equal + constraints = [ + dynamic_dim(t0, 1) == dynamic_dim(t1, 0), + ] + ep = export(fn, (t0, t1), constraints=constraints) + + - Mix and match all types above as long as they do not express conflicting requirements + + """ + from torch._dynamo.exc import UserError, UserErrorType + + if not isinstance(t, torch.Tensor): + raise UserError( + UserErrorType.DYNAMIC_DIM, + f"Expected tensor as input to dynamic_dim but got {type(t)}", + ) + + if t.dim() < 1: + raise UserError( + UserErrorType.DYNAMIC_DIM, "Cannot mark 0-dimension tensors to be dynamic" + ) + + if index >= t.dim(): + raise UserError( + UserErrorType.DYNAMIC_DIM, + f"Expected the dimension passed to dynamic_dim to be in the range [0:{t.dim()-1}]" + f" but got {index}, which is out of bounds for the given tensor.", + ) + + # Import sympy locally + import sympy + + from torch.fx.experimental.symbolic_shapes import StrictMinMaxConstraint + from torch.utils._sympy.value_ranges import ValueRanges + + return _create_constraint( + weakref.ref(t), + id(t), + index, + StrictMinMaxConstraint( + vr=ValueRanges(lower=2, upper=sympy.oo), warn_only=False + ), + debug_name=debug_name, + ) + + +def _process_equalities( + constraint: Constraint, + get_sources: Callable[[int, int], List["Source"]], + shape_env: "ShapeEnv", + source_pairs: List[Tuple["Source", "Source"]], + derived_equalities: List[Tuple["Source", Union["Source", "Symbol"], Callable]], + phantom_symbols: Dict[str, "Symbol"], +): + """ + Updates `source_pairs`, `derived_equalities`, and `phantom_symbols` (which become + fields of `EqualityConstraint`) based on a given input `constraint`. + """ + + source, *other_sources = get_sources(constraint.t_id, constraint.dim) + # When t.size()[dim] maps to src0, src1, ..., srcN, we add + # constraints that make src0 "equal" to src1, ..., srcN. + source_pairs.extend((source, other_source) for other_source in other_sources) + if not isinstance(constraint, _DerivedConstraint): + if constraint.shared is not None: + # Moreover, when t.size()[dim] is specified equal to t'.size()[dim'] + # and t'.size()[dim'] maps to src1', ..., srcN', we add + # constraints that also make src0 "equal" to src1', ..., srcN'. + other_sources = get_sources(constraint.shared.t_id, constraint.shared.dim) + source_pairs.extend( + (source, other_source) for other_source in other_sources + ) + else: + # branch based on the root of the _DerivedConstraint + if not isinstance(constraint.root, _PhantomRoot): + # either root points to an input source + root = get_sources(constraint.root.t_id, constraint.root.dim)[0] # type: ignore[assignment] + else: + # or root points to a phantom symbol + if constraint.root.name in phantom_symbols: + root = phantom_symbols[constraint.root.name] # type: ignore[assignment] + else: + # create a phantom symbol in the shape env based on the _PhantomRoot + root = shape_env.create_symbol( + val=constraint.root.val, + source=torch._dynamo.source.ConstantSource(constraint.root.name), + dynamic_dim=torch.fx.experimental.symbolic_shapes.DimDynamic.DYNAMIC, + constraint_dim=constraint.root.constraint_range, + ) + phantom_symbols[constraint.root.name] = root # type: ignore[assignment] + + fn = constraint.fn + # A derived equality (source, root, fn) informally corresponds to source = fn(root). + # Here source describes an input and root might describe another input or a phantom symbol. + derived_equalities.append((source, root, fn)) + + +def _process_dynamic_shapes( + f: Callable, + args: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]] = None, + dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any], List[Any]]] = None, +) -> Optional[List[Constraint]]: + from collections import defaultdict + from collections.abc import Mapping, Sequence + + from torch._dynamo.exc import UserError, UserErrorType + + if dynamic_shapes is None or len(dynamic_shapes) == 0: + return None + + kwargs = kwargs if kwargs is not None else {} + + def tree_zip(combined_args, dynamic_shapes): + if isinstance(combined_args, (tuple, list)): + if not isinstance(dynamic_shapes, Sequence): + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expected dynamic_shapes of a {type(combined_args)} to be a Sequence, " + f"got {dynamic_shapes} instead", + ) + if len(combined_args) != len(dynamic_shapes): + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expected {dynamic_shapes} to have {len(combined_args)} items", + ) + for i, shape in enumerate(dynamic_shapes): + yield from tree_zip(combined_args[i], shape) + elif isinstance(combined_args, dict): + if not isinstance(dynamic_shapes, Mapping): + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expected dynamic_shapes of a {type(combined_args)} to be a Mapping, " + f"got {dynamic_shapes} instead", + ) + if len(combined_args) != len(dynamic_shapes): + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expected {dynamic_shapes} to have {len(combined_args)} items", + ) + for k, shape in dynamic_shapes.items(): + yield from tree_zip(combined_args[k], shape) + elif type(combined_args) in SUPPORTED_NODES: + if not isinstance(dynamic_shapes, Sequence): + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expected dynamic_shapes of a user-registered class (e.g., " + f"{type(combined_args)}) to be a Sequence that matches the " + f"flattened structure, but got {dynamic_shapes} instead", + ) + yield from tree_zip( + SUPPORTED_NODES[type(combined_args)].flatten_fn(combined_args)[0], + dynamic_shapes, + ) + elif isinstance(combined_args, torch.Tensor): + yield (combined_args, dynamic_shapes) + else: + if dynamic_shapes is not None: + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expected dynamic_shapes of a {type(combined_args)} to be None, " + f"got {dynamic_shapes} instead", + ) + + # map of Dim names representing input shape dimensions to constraints on them + symbols: Dict[str, List[Constraint]] = defaultdict(list) + # track roots that do not directly represent input shape dimensions + phantom_roots: Dict[str, _PhantomRoot] = {} + derived_constraints_with_phantom_root: List[_DerivedConstraint] = [] + + def to_constraint(dim, tensor, i): + import sympy + + from torch.fx.experimental.symbolic_shapes import StrictMinMaxConstraint + from torch.utils._sympy.solve import try_solve + from torch.utils._sympy.value_ranges import ValueRanges + + def root_value(): + # given tensor.shape[i] is the value of dim = fn(root), + # find the value of root + symbol = sympy.Symbol(dim.root.__name__, integer=True) + expr = dim.fn(symbol) + solution = try_solve(sympy.Eq(expr, tensor.shape[i]), symbol) + if solution is not None: + return int(solution[1]) # type: ignore[call-overload] + else: + raise UserError( # noqa: TRY200 + UserErrorType.CONSTRAINT_VIOLATION, + f"Expected shape[{i}] = {tensor.shape[i]} of input Tensor to be " + f"of the form {expr}, where {symbol} is an integer", + ) + + if isinstance(dim, _DerivedDim): + # generate a _DerivedConstraint where the root is: + # - either a _ConstraintTarget (if dim.root directly describes an input shape) + # - or a _PhantomRoot (otherwise) + dim_root = dim.root # type: ignore[attr-defined] + if dim_root.__name__ in symbols: + # root represents an input shape dimension + root_constraint = symbols[dim_root.__name__][0] + root = _ConstraintTarget( + root_constraint.w_tensor, + root_constraint.t_id, + root_constraint.dim, + ) + elif dim_root.__name__ not in phantom_roots: + # create a phantom root + root = _PhantomRoot( # type: ignore[assignment] + name=dim_root.__name__, + constraint_range=StrictMinMaxConstraint( + vr=ValueRanges(lower=dim_root.min, upper=dim_root.max), + warn_only=False, + ), + val=root_value(), + ) + phantom_roots[dim_root.__name__] = root # type: ignore[assignment] + else: + root = phantom_roots[dim_root.__name__] # type: ignore[assignment] + constraint = _DerivedConstraint( + weakref.ref(tensor), + id(tensor), + i, + root, + dim.fn, # type: ignore[attr-defined] + StrictMinMaxConstraint( + vr=ValueRanges(lower=dim.min, upper=dim.max), + warn_only=False, + ), + debug_name=dim.__name__, + ) + if isinstance(root, _PhantomRoot): + # NOTE(avik): since we have not processed all inputs yet, we may replace this + # with a root that does represent an input shape dimension later (see below) + derived_constraints_with_phantom_root.append(constraint) + else: + constraint = dynamic_dim(tensor, i, debug_name=dim.__name__) + if dim.min != 2: + constraint = constraint >= dim.min + if dim.max != sys.maxsize - 1: + constraint = constraint <= dim.max + return constraint + + bounds: Dict[str, Tuple[int, int]] = {} + + def check_same_bounds(dim): + if dim.__name__ in symbols: + min_, max_ = bounds[dim.__name__] + if dim.min != min_ or dim.max != max_: + this_ = _Dim.readable(dim.__name__, min_, max_) + that_ = _Dim.readable(dim.__name__, dim.min, dim.max) + raise UserError( + UserErrorType.INVALID_INPUT, + f"Found different definitions {this_} and {that_} " + f"for the same symbolic dimension {dim}!", + ) + + else: + bounds[dim.__name__] = (dim.min, dim.max) + + def update_symbols(tensor, shape): + if isinstance(shape, dict): + for i, dim in shape.items(): + if isinstance(dim, _Dim): + check_same_bounds(dim) + constraint = to_constraint(dim, tensor, i) + symbols[dim.__name__].append(constraint) + else: + if dim is not None: + raise UserError( + UserErrorType.INVALID_INPUT, + f"Unexpected item #{i} ({dim}) in dynamic_shape {shape} of Tensor, " + "try None instead", + ) + elif isinstance(shape, (tuple, list)): + for i, dim in enumerate(shape): + if isinstance(dim, _Dim): + check_same_bounds(dim) + constraint = to_constraint(dim, tensor, i) + symbols[dim.__name__].append(constraint) + else: + if dim is not None: + raise UserError( + UserErrorType.INVALID_INPUT, + f"Unexpected item #{i} ({dim}) in dynamic_shape {shape} of Tensor, " + "try None instead", + ) + else: + if shape is not None: + raise UserError( + UserErrorType.INVALID_INPUT, + f"Unexpected dynamic_shape {shape} of Tensor, " "try None instead", + ) + + import inspect + + if isinstance(f, ExportedProgram): + f = f.module() + signature = ( + inspect.signature(f.forward) + if isinstance(f, torch.nn.Module) + else inspect.signature(f) + ) + combined_args = signature.bind(*args, **kwargs).arguments + + # This means user didn't specify dynamic shapes with argument names. + combined_args = combined_args if isinstance(dynamic_shapes, Mapping) else list(combined_args.values()) # type: ignore[assignment] + for tensor, shape in tree_zip(combined_args, dynamic_shapes): + update_symbols(tensor, shape) + + constraints = [] + for derived_constraint_with_phantom_root in derived_constraints_with_phantom_root: + phantom_root_name = derived_constraint_with_phantom_root.root.name # type: ignore[union-attr] + if phantom_root_name in symbols: + # We found an input shape dimension corresponding to this name, so we + # do not need a phantom symbol for it after all. + # NOTE(avik): Overall we want to maintain the invariant that roots that + # are phantom symbols are really "phantom," i.e., they cannot be represented + # by any input source. This is important when we are deciding derived equalities, + # since we can focus our attention exclusively on input sources: deciding + # derived equalities involving phantom symbols are, in comparison, trivial. + derived_constraint_with_phantom_root.root = symbols[phantom_root_name][0] + + for dynamic_dims in symbols.values(): + if all( + isinstance(dynamic_dim, _DerivedConstraint) for dynamic_dim in dynamic_dims + ): + constraints.extend(dynamic_dims) + else: + primary, *others = dynamic_dims + if others: + for other in others: + constraints.append(primary == other) # type: ignore[arg-type] + else: + constraints.append(primary) + + return constraints # type: ignore[return-value] + + +def _process_constraints( + fake_mode, + graph_module: torch.fx.GraphModule, + num_lifted_params_buffers: int, + example_inputs: List[torch.Tensor], +) -> Dict: + """ + Process the constraints stored in the graph module to return something more readable. + + Args: + graph_module (torch.fx.GraphModule): GraphModule returned from + dynamo.export, which contains the "input_shape_constraints" and + "inline_constraints" metadata + + example_inputs: Flattened list of example inputs used to export the graph module + + Returns: + range_constraints (Dict[sympy.Symbol, ValueRanges]): Mapping of + symbols (from SymInts) appearing in the fake tensors in + node.meta["val"] to their range constraints, which are a tuple + containing (lower, upper) constraints. + """ + from torch._export.passes.add_runtime_assertions_for_constraints_pass import ( + InputDim, + ) + + # Import sympy locally + from torch.fx.experimental.symbolic_shapes import SymInt + from torch.utils._sympy.value_ranges import ValueRanges + + input_shape_constraints = graph_module.meta.get("input_shape_constraints", []) + inline_constraints = graph_module.meta.get("inline_constraints", []) + + # Create dict mapping tensor_id to node names + tensor_id_to_nodes: Dict[int, List[str]] = defaultdict(list) + # Create dict mapping placeholder node names to their nodes + placeholder_nodes: Dict[str, torch.fx.Node] = {} + for i, node in enumerate(graph_module.graph.nodes): + if node.op != "placeholder": + # All placeholder nodes should be together in the beginning of the + # graph + break + if i >= num_lifted_params_buffers: + example_input = example_inputs[i - num_lifted_params_buffers] + tensor_id_to_nodes[id(example_input)].append(node.name) + placeholder_nodes[node.name] = node + + # Create dict mapping (node name, dim) a list of range (lower, upper) + # constraints + multi_range_constraints: Dict[InputDim, List[ValueRanges]] = defaultdict(list) + for constraint in input_shape_constraints: + for node in tensor_id_to_nodes[constraint["t_id"]]: + node_dim = InputDim(node, constraint["dim"]) + + # Accumulate range constraints + multi_range_constraints[node_dim].append( + ValueRanges(constraint["min"], constraint["max"]) + ) + + # Create dict mapping symbol to a singular range (lower, upper) + range_constraints: Dict[Any, ValueRanges] = {} + + # Add inline constraints to range_constraints + range_constraints = { + symbol: inline_constraints[symbol] for symbol in inline_constraints + } + + free_symbols: Set["Symbol"] = set() + # Add input range constraints to range_constraints + for input_dim, multi_range_constraint in multi_range_constraints.items(): # type: ignore[assignment] + # Simplify the range constraints into a single range constraint + # Ex. ranges [2, 10] and [3, 11] would get merged to [3, 10] + min_vals = [rc.lower for rc in multi_range_constraint] + max_vals = [rc.upper for rc in multi_range_constraint] + min_val = max(min_vals) # type: ignore[type-var] + max_val = min(max_vals) # type: ignore[type-var] + assert min_val <= max_val # type: ignore[operator] + + # Add input node range constraints + val = placeholder_nodes[input_dim.input_name].meta["val"] + assert isinstance(val, FakeTensor) + symint = val.shape[input_dim.dim] + assert isinstance( + symint, SymInt + ), f"Expected SymInt but got {symint}: {type(symint)}" + symbol = symint.node.expr + range_constraints[symbol] = ValueRanges(min_val, max_val) + free_symbols.update(symbol.free_symbols) + + for symbol in free_symbols: + if symbol not in range_constraints: + # Placeholders can have symbolic shapes that are derived expressions. + # The above code will record direct range constraints for them + # so that we can do runtime assertions. In addition, for serde checks + # we want to record range constraints for their root symbols. + range_constraints[symbol] = fake_mode.shape_env.var_to_range[symbol] + + return range_constraints diff --git a/venv/lib/python3.10/site-packages/torch/export/exported_program.py b/venv/lib/python3.10/site-packages/torch/export/exported_program.py new file mode 100644 index 0000000000000000000000000000000000000000..8d1d2740a437015875d2d2b85895f7ed00e67f26 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/export/exported_program.py @@ -0,0 +1,745 @@ +import copy +import dataclasses +import functools +import types +import warnings +from collections import namedtuple +from typing import ( + Any, + Callable, + Dict, + Iterator, + List, + Optional, + Tuple, + Type, + TYPE_CHECKING, + Union, +) + +from torch.fx.immutable_collections import immutable_dict, immutable_list + +if TYPE_CHECKING: + # Import the following modules during type checking to enable code intelligence features, + # such as auto-completion in tools like pylance, even when these modules are not explicitly + # imported in user code. + + import sympy + + from torch.utils._sympy.value_ranges import ValueRanges + +import torch +import torch.utils._pytree as pytree +from torch.export._tree_utils import is_equivalent, reorder_kwargs +from torch.fx._compatibility import compatibility +from torch.fx.experimental.proxy_tensor import maybe_disable_fake_tensor_mode + +from torch.fx.passes.infra.pass_base import PassResult +from torch.fx.passes.infra.pass_manager import PassManager + +from .graph_signature import ( # noqa: F401 + _sig_to_specs, + ArgumentSpec, + ConstantArgument, + CustomObjArgument, + ExportGraphSignature, + InputKind, + InputSpec, + OutputKind, + OutputSpec, + SymIntArgument, + TensorArgument, +) + + +__all__ = [ + "ExportedProgram", + "ModuleCallEntry", + "ModuleCallSignature", +] + + +PassType = Callable[[torch.fx.GraphModule], Optional[PassResult]] + + +@dataclasses.dataclass +class ModuleCallSignature: + inputs: List[ArgumentSpec] + outputs: List[ArgumentSpec] + in_spec: pytree.TreeSpec + out_spec: pytree.TreeSpec + + +@dataclasses.dataclass +class ModuleCallEntry: + fqn: str + signature: Optional[ModuleCallSignature] = None + + +def _disable_prexisiting_fake_mode(fn): + @functools.wraps(fn) + def wrapper(*args, **kwargs): + with maybe_disable_fake_tensor_mode(): + return fn(*args, **kwargs) + + return wrapper + + +def _fx_collection_equivalence_fn( + spec1_type: Optional[type], + spec1_context: pytree.Context, + spec2_type: Optional[type], + spec2_context: pytree.Context, +) -> bool: + """Treat containers and their immutable variants as the same type. Otherwise + compare as normal. + """ + if spec1_type is None or spec2_type is None: + return spec1_type is spec2_type and spec1_context == spec2_context + + if issubclass(spec1_type, (dict, immutable_dict)) and issubclass( + spec2_type, (dict, immutable_dict) + ): + return spec1_context == spec2_context + + if issubclass(spec1_type, (list, immutable_list)) and issubclass( + spec2_type, (list, immutable_list) + ): + return spec1_context == spec2_context + + return spec1_type is spec2_type and spec1_context == spec2_context + + +class ExportedProgram: + """ + Package of a program from :func:`export`. It contains + an :class:`torch.fx.Graph` that represents Tensor computation, a state_dict containing + tensor values of all lifted parameters and buffers, and various metadata. + + You can call an ExportedProgram like the original callable traced by + :func:`export` with the same calling convention. + + To perform transformations on the graph, use ``.module`` property to access + an :class:`torch.fx.GraphModule`. You can then use + `FX transformation `_ + to rewrite the graph. Afterwards, you can simply use :func:`export` + again to construct a correct ExportedProgram. + """ + + def __init__( + self, + root: Union[torch.nn.Module, Dict[str, Any]], + graph: torch.fx.Graph, + graph_signature: ExportGraphSignature, + state_dict: Dict[str, Union[torch.Tensor, torch.nn.Parameter]], + range_constraints: "Dict[sympy.Symbol, Any]", + module_call_graph: List[ModuleCallEntry], + example_inputs: Optional[Tuple[Tuple[Any, ...], Dict[str, Any]]] = None, + verifier: Optional[Type[Any]] = None, # TODO Change typing hint to Verifier. + tensor_constants: Optional[ + Dict[str, torch.Tensor] + ] = None, # TODO: deprecate this + constants: Optional[ + Dict[str, Union[torch.Tensor, torch._C.ScriptObject]] + ] = None, + ): + # Remove codegen related things from the graph. It should just be a flat graph. + graph._codegen = torch.fx.graph.CodeGen() + self._graph_module = _create_graph_module_for_export(root, graph) + if isinstance(root, torch.fx.GraphModule): + self._graph_module.meta.update(root.meta) + + self._graph_signature: ExportGraphSignature = graph_signature + self._state_dict: Dict[str, Any] = state_dict + self._range_constraints: "Dict[sympy.Symbol, ValueRanges]" = range_constraints + assert module_call_graph is not None + self._module_call_graph: List[ModuleCallEntry] = module_call_graph + self._example_inputs = example_inputs + + self._constants = tensor_constants or constants or {} + assert self._constants is not None + + from torch._export.verifier import Verifier + + if verifier is None: + verifier = Verifier + assert issubclass(verifier, Verifier) + self._verifier = verifier + # Validate should be always the last step of the constructor. + self.verifier().check(self) + + @property + @compatibility(is_backward_compatible=False) + def graph_module(self): + return self._graph_module + + @property + @compatibility(is_backward_compatible=False) + def graph(self): + return self.graph_module.graph + + @property + @compatibility(is_backward_compatible=False) + def graph_signature(self): + return self._graph_signature + + @property + @compatibility(is_backward_compatible=False) + def state_dict(self): + return self._state_dict + + @compatibility(is_backward_compatible=False) + def parameters(self) -> Iterator[torch.nn.Parameter]: + """ + Returns an iterator over original module's parameters. + """ + for _, param in self.named_parameters(): + yield param + + @compatibility(is_backward_compatible=False) + def named_parameters(self) -> Iterator[Tuple[str, torch.nn.Parameter]]: + """ + Returns an iterator over original module parameters, yielding + both the name of the parameter as well as the parameter itself. + """ + for param_name in self.graph_signature.parameters: + yield param_name, self.state_dict[param_name] + + @compatibility(is_backward_compatible=False) + def buffers(self) -> Iterator[torch.Tensor]: + """ + Returns an iterator over original module buffers. + """ + for _, buf in self.named_buffers(): + yield buf + + @compatibility(is_backward_compatible=False) + def named_buffers(self) -> Iterator[Tuple[str, torch.Tensor]]: + """ + Returns an iterator over original module buffers, yielding + both the name of the buffer as well as the buffer itself. + """ + non_persistent_buffers = set(self.graph_signature.non_persistent_buffers) + for buffer_name in self.graph_signature.buffers: + if buffer_name in non_persistent_buffers: + yield buffer_name, self.constants[buffer_name] + else: + yield buffer_name, self.state_dict[buffer_name] + + @property + @compatibility(is_backward_compatible=False) + def range_constraints(self): + return self._range_constraints + + @property + @compatibility(is_backward_compatible=False) + def module_call_graph(self): + return self._module_call_graph + + @property + @compatibility(is_backward_compatible=False) + def example_inputs(self): + return self._example_inputs + + @property + @compatibility(is_backward_compatible=False) + def call_spec(self): + CallSpec = namedtuple("CallSpec", ["in_spec", "out_spec"]) + + if len(self.module_call_graph) == 0: + return CallSpec(in_spec=None, out_spec=None) + assert self.module_call_graph[0].fqn == "" + return CallSpec( + in_spec=self.module_call_graph[0].signature.in_spec, + out_spec=self.module_call_graph[0].signature.out_spec, + ) + + @property + @compatibility(is_backward_compatible=False) + def verifier(self) -> Any: + return self._verifier + + @property + @compatibility(is_backward_compatible=False) + def dialect(self) -> str: + return self._verifier.dialect + + @property + @compatibility(is_backward_compatible=False) + def tensor_constants(self): + return self._constants + + @property + @compatibility(is_backward_compatible=False) + def constants(self): + return self._constants + + def _get_flat_args_with_check(self, args, kwargs): + """Flatten args, kwargs using pytree, then, check specs. + + Args: + args: List[Any] original args passed to __call__ + kwargs: Dict[str, Any] original kwargs passed to __call + + Returns: + A tuple of (flat_args, received_spec) + flat_args is flattend args / kwargs + received_spec is the pytree spec produced while flattening the + tuple (args, kwargs) + """ + in_spec = self.call_spec.in_spec + if in_spec is not None: + kwargs = reorder_kwargs(kwargs, in_spec) + flat_args_with_path, received_spec = pytree.tree_flatten_with_path( + (args, kwargs) + ) # type: ignore[possibly-undefined] + self._check_input_constraints(flat_args_with_path) + flat_args = tuple(x[1] for x in flat_args_with_path) + return flat_args, received_spec + + def _graph_module_flat_inputs(self, args: Any, kwargs: Any) -> Any: + """Transform args, kwargs of __call__ to args for graph_module. + + self.graph_module takes stuff from state dict as inputs. + The invariant is for ep: ExportedProgram is + ep(args, kwargs) == + ep.postprocess(ep.graph_module(ep.graph_module_flat_inputs(args, kwargs))) + """ + + in_spec = self.call_spec.in_spec + flat_args, received_spec = self._get_flat_args_with_check(args, kwargs) + if in_spec is not None and not is_equivalent( + received_spec, in_spec, _fx_collection_equivalence_fn + ): + raise ValueError( + "Trying to flatten user inputs with exported input tree spec: \n" + f"{in_spec}\n" + "but actually got inputs with tree spec of: \n" + f"{received_spec}" + ) + + additional_inputs = [] + for input_ in self.graph_signature.input_specs: + if input_.kind == InputKind.USER_INPUT: + continue + elif input_.kind in ( + InputKind.PARAMETER, + InputKind.BUFFER, + ): + if input_.persistent is False: + # This is a non-persistent buffer, grab it from our + # constants instead of the state dict. + additional_inputs.append(self.constants[input_.target]) + else: + additional_inputs.append(self.state_dict[input_.target]) + elif input_.kind in ( + InputKind.CONSTANT_TENSOR, + InputKind.CUSTOM_OBJ, + ): + additional_inputs.append(self.constants[input_.target]) + additional_inputs = tuple(additional_inputs) + + # NOTE: calling convention is first params, then buffers, then args as user supplied them. + # See: torch/_functorch/aot_autograd.py#L1034 + return additional_inputs + flat_args + + def __call__(self, *args: Any, **kwargs: Any) -> Any: + raise RuntimeError( + "Unable to call ExportedProgram directly. " + "You should use `exported_program.module()` instead." + ) + + def _postprocess_graph_module_outputs(self, res, orig_args, orig_kwargs): + """Process potential mutations to the input. + + Because self.graph_module is functional, so mutations has to be written + back after execution of graph_module. + """ + import torch._export.error as error + + flat_args, _ = self._get_flat_args_with_check(orig_args, orig_kwargs) + if self.call_spec.out_spec is not None: + buffer_mutation = self.graph_signature.buffers_to_mutate + user_input_mutation = self.graph_signature.user_inputs_to_mutate + num_mutated = len(buffer_mutation) + len(user_input_mutation) + mutated_values = res[:num_mutated] + + # Exclude dependency token from final result. + assertion_dep_token = self.graph_signature.assertion_dep_token + if assertion_dep_token is not None: + assertion_dep_token_index = next(iter(assertion_dep_token.keys())) + res = res[:assertion_dep_token_index] + + res = res[num_mutated:] + try: + res = pytree.tree_unflatten(res, self.call_spec.out_spec) + except Exception: + _, received_spec = pytree.tree_flatten(res) + raise error.InternalError( # noqa: TRY200 + "Trying to flatten user outputs with exported output tree spec: \n" + f"{self.call_spec.out_spec}\n" + "but actually got outputs with tree spec of: \n" + f"{received_spec}" + ) + finally: + user_inputs = [ + spec + for spec in self.graph_signature.input_specs + if spec.kind == InputKind.USER_INPUT + ] + for i, value in enumerate(mutated_values): + output_spec = self.graph_signature.output_specs[i] + if output_spec.kind == OutputKind.BUFFER_MUTATION: + assert output_spec.target is not None + self.state_dict[output_spec.target] = value + elif output_spec.kind == OutputKind.USER_INPUT_MUTATION: + assert output_spec.target is not None + index = next( + i + for i, spec in enumerate(user_inputs) + if spec.arg.name == output_spec.target + ) + flat_args[index].copy_(value) + else: + raise AssertionError(f"Unexpected kind: {output_spec.kind}") + return res + + def __str__(self) -> str: + graph_module = self.graph_module.print_readable(print_output=False).replace( + "\n", "\n " + ) + string = ( + "ExportedProgram:\n" + f" {graph_module}\n" + f"Graph signature: {self.graph_signature}\n" + f"Range constraints: {self.range_constraints}\n" + ) + return string + + def module(self) -> torch.nn.Module: + """ + Returns a self contained GraphModule with all the parameters/buffers inlined. + """ + from ._unlift import _unlift_exported_program_lifted_states + + module = _unlift_exported_program_lifted_states(self) + + def _train(self, mode: bool = True): + raise NotImplementedError("Calling train() is not supported yet.") + + def _eval(self, mode: bool = True): + raise NotImplementedError("Calling eval() is not supported yet.") + + module.train = types.MethodType(_train, module) # type: ignore[method-assign] + module.eval = types.MethodType(_eval, module) # type: ignore[method-assign] + return module + + @_disable_prexisiting_fake_mode + def run_decompositions( + self, decomp_table: Optional[Dict[torch._ops.OperatorBase, Callable]] = None + ) -> "ExportedProgram": + """ + Run a set of decompositions on the exported program and returns a new + exported program. By default we will run the Core ATen decompositions to + get operators in the + `Core ATen Operator Set `_. + + For now, we do not decompose joint graphs. + """ + from torch._decomp import core_aten_decompositions + from torch._export.passes.add_runtime_assertions_for_constraints_pass import ( + _AddRuntimeAssertionsForInlineConstraintsPass, + ) + from torch._export.passes.lift_constants_pass import ( + ConstantAttrMap, + lift_constants_pass, + ) + from torch._export.passes.replace_sym_size_ops_pass import ( + _replace_sym_size_ops_pass, + ) + from torch._functorch.aot_autograd import aot_export_module + + def _get_placeholders(gm): + placeholders = [] + for node in gm.graph.nodes: + if node.op != "placeholder": + break + placeholders.append(node) + return placeholders + + decomp_table = decomp_table or core_aten_decompositions() + + old_placeholders = _get_placeholders(self.graph_module) + fake_args = [node.meta["val"] for node in old_placeholders] + + buffers_to_remove = [name for name, _ in self.graph_module.named_buffers()] + for name in buffers_to_remove: + delattr(self.graph_module, name) + # TODO(zhxhchen17) Return the new graph_signature directly. + gm, graph_signature = aot_export_module( + self.graph_module, fake_args, decompositions=decomp_table, trace_joint=False + ) + + # Update the signatures with the new placeholder names in case they + # changed when calling aot_export + def update_arg(old_arg, new_ph): + if isinstance(old_arg, ConstantArgument): + return old_arg + elif isinstance(old_arg, TensorArgument): + return TensorArgument(name=new_ph.name) + elif isinstance(old_arg, SymIntArgument): + return SymIntArgument(name=new_ph.name) + raise RuntimeError(f"Type of old_arg not supported: {type(old_arg)}") + + new_placeholders = _get_placeholders(gm) + new_outputs = list(gm.graph.nodes)[-1].args[0] + + # To match the output target with correct input for input mutations + # need to find the old to new placeholder map + old_new_placeholder_map = { + spec.arg.name: new_placeholders[i].name + for i, spec in enumerate(self.graph_signature.input_specs) + if not isinstance(spec.arg, ConstantArgument) + } + + input_specs = [ + InputSpec( + spec.kind, + update_arg(spec.arg, new_placeholders[i]), + spec.target, + spec.persistent, + ) + for i, spec in enumerate(self.graph_signature.input_specs) + ] + output_specs = [ + OutputSpec( + spec.kind, + update_arg(spec.arg, new_outputs[i]), + old_new_placeholder_map.get(spec.target, spec.target), + ) + for i, spec in enumerate(self.graph_signature.output_specs) + ] + + assert len(new_placeholders) == len(old_placeholders) + + new_graph_signature = ExportGraphSignature( + input_specs=input_specs, output_specs=output_specs + ) + # NOTE: aot_export adds symint metadata for placeholders with int + # values; since these become specialized, we replace such metadata with + # the original values. + # Also, set the param/buffer metadata back to the placeholders. + for old_node, new_node in zip(old_placeholders, new_placeholders): + if not isinstance(old_node.meta["val"], torch.Tensor): + new_node.meta["val"] = old_node.meta["val"] + + if ( + new_node.target in new_graph_signature.inputs_to_parameters + or new_node.target in new_graph_signature.inputs_to_buffers + ): + for k, v in old_node.meta.items(): + new_node.meta[k] = v + + # TODO unfortunately preserving graph-level metadata is not + # working well with aot_export. So we manually copy it. + # (The node-level meta is addressed above.) + gm.meta.update(self.graph_module.meta) + + new_range_constraints = _get_updated_range_constraints(gm) + + constants = lift_constants_pass(gm, new_graph_signature, ConstantAttrMap()) + for k, v in constants.items(): + assert k not in self.constants + self.constants[k] = v + + _replace_sym_size_ops_pass(gm) + exported_program = ExportedProgram( + root=gm, + graph=gm.graph, + graph_signature=new_graph_signature, + state_dict=self.state_dict, + range_constraints=new_range_constraints, + module_call_graph=copy.deepcopy(self.module_call_graph), + example_inputs=self.example_inputs, + verifier=self.verifier, + constants=self.constants, + ) + + if len(new_range_constraints) > 0: + exported_program = exported_program._transform_do_not_use( + _AddRuntimeAssertionsForInlineConstraintsPass(new_range_constraints) + ) + + return exported_program + + def _transform_do_not_use(self, *passes: PassType) -> "ExportedProgram": + pm = PassManager(list(passes)) + res = pm(self.graph_module) + transformed_gm = res.graph_module if res is not None else self.graph_module + assert transformed_gm is not None + + if transformed_gm is self.graph_module and not res.modified: + return self + + # TODO(zhxchen17) Remove this. + def _get_updated_graph_signature( + old_signature: ExportGraphSignature, + new_gm: torch.fx.GraphModule, + ) -> ExportGraphSignature: + """ + Update the graph signature's user_input/user_outputs. + """ + new_input_specs = [] + for i, node in enumerate(new_gm.graph.nodes): + if node.op != "placeholder": + break + + assert i < len( + old_signature.input_specs + ), "Number of inputs changed after transformation" + old_input_spec = old_signature.input_specs[i] + arg = ( + old_input_spec.arg + if isinstance( + old_input_spec.arg, (ConstantArgument, CustomObjArgument) + ) + else type(old_input_spec.arg)(node.name) + ) + new_input_specs.append( + InputSpec( + old_input_spec.kind, + arg, + old_input_spec.target, + old_input_spec.persistent, + ) + ) + + output_node = list(new_gm.graph.nodes)[-1] + assert output_node.op == "output" + + new_output_specs = [] + for i, node in enumerate(output_node.args[0]): + assert i < len( + old_signature.output_specs + ), "Number of outputs changed after transformation" + old_output_spec = old_signature.output_specs[i] + arg = ( + old_output_spec.arg + if isinstance( + old_output_spec.arg, (ConstantArgument, CustomObjArgument) + ) + else type(old_output_spec.arg)(node.name) + ) + new_output_specs.append( + OutputSpec(old_output_spec.kind, arg, old_output_spec.target) + ) + + new_signature = ExportGraphSignature( + input_specs=new_input_specs, output_specs=new_output_specs + ) + return new_signature + + transformed_ep = ExportedProgram( + root=transformed_gm, + graph=transformed_gm.graph, + graph_signature=_get_updated_graph_signature( + self.graph_signature, transformed_gm + ), + state_dict=self.state_dict, + range_constraints=_get_updated_range_constraints(transformed_gm), + module_call_graph=copy.deepcopy(self._module_call_graph), + example_inputs=self.example_inputs, + verifier=self.verifier, + constants=self.constants, + ) + transformed_ep.graph_module.meta.update(self.graph_module.meta) + transformed_ep.graph_module.meta.update(res.graph_module.meta) + return transformed_ep + + def _check_input_constraints(self, flat_args_with_path): + from torch._export.utils import _check_input_constraints_for_graph + + placeholders = [p for p in self.graph.nodes if p.op == "placeholder"] + input_placeholders = [ + p + for p, s in zip(placeholders, self.graph_signature.input_specs) + if s.kind == InputKind.USER_INPUT + ] + _check_input_constraints_for_graph( + input_placeholders, flat_args_with_path, self.range_constraints + ) + + def _validate(self): + self.verifier().check(self) + + # TODO(zhxchen17) Formalize this. + def _update( + self, graph_module, graph_signature, state_dict=None + ) -> "ExportedProgram": + return ExportedProgram( + root=graph_module, + graph=graph_module.graph, + graph_signature=graph_signature, + state_dict=state_dict or self.state_dict, + range_constraints=copy.deepcopy(self.range_constraints), + module_call_graph=copy.deepcopy(self._module_call_graph), + example_inputs=self.example_inputs, + verifier=self.verifier, + tensor_constants=self.tensor_constants, + ) + + +def _get_updated_range_constraints( + gm: torch.fx.GraphModule, +) -> "Dict[sympy.Symbol, Any]": + def get_shape_env(gm): + vals = [ + node.meta["val"] + for node in gm.graph.nodes + if node.meta.get("val", None) is not None + ] + from torch._guards import detect_fake_mode + + fake_mode = detect_fake_mode(vals) + if fake_mode is not None: + return fake_mode.shape_env + for v in vals: + if isinstance(v, torch.SymInt): + return v.node.shape_env + + shape_env = get_shape_env(gm) + if shape_env is None: + return {} + range_constraints = { + k: v + for k, v in shape_env.var_to_range.items() + if k not in shape_env.replacements + } + # Only when we have an unbacked symint, and it's used as constructor inputs, + # runtime_var_to_range will make a difference compated to var_to_range. + # e.g. [2, oo) -> [0, oo) + for k, v in shape_env.var_to_range.items(): + if k not in shape_env.replacements: + range_constraints[k] = v + return range_constraints + + +def _create_graph_module_for_export(root, graph): + try: + gm = torch.fx.GraphModule(root, graph) + except SyntaxError: + # If custom objects stored in memory are being used in the graph, + # the generated python code will result in a syntax error on the custom + # object, since it is unable to parse the in-memory object. However + # we can still run the graph eagerly through torch.fx.Interpreter, + # so we will bypass this error. + warnings.warn( + "Unable to execute the generated python source code from " + "the graph. The graph module will no longer be directly callable, " + "but you can still run the ExportedProgram, and if needed, you can " + "run the graph module eagerly using torch.fx.Interpreter." + ) + gm = torch.fx.GraphModule(root, torch.fx.Graph()) + gm._graph = graph + + return gm diff --git a/venv/lib/python3.10/site-packages/torch/export/graph_signature.py b/venv/lib/python3.10/site-packages/torch/export/graph_signature.py new file mode 100644 index 0000000000000000000000000000000000000000..9829fece8d785d2664f8be13c4f831e364047ad1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/export/graph_signature.py @@ -0,0 +1,504 @@ +import dataclasses +from enum import auto, Enum +from typing import Collection, Dict, List, Mapping, Optional, Set, Tuple, Union + + +__all__ = [ + "ConstantArgument", + "CustomObjArgument", + "ExportBackwardSignature", + "ExportGraphSignature", + "InputKind", + "InputSpec", + "OutputKind", + "OutputSpec", + "SymIntArgument", + "TensorArgument", +] + + +@dataclasses.dataclass +class TensorArgument: + name: str + + +@dataclasses.dataclass +class SymIntArgument: + name: str + + +@dataclasses.dataclass +class CustomObjArgument: + name: str + class_fqn: str + + +@dataclasses.dataclass +class ConstantArgument: + value: Union[int, float, bool, None] + + +ArgumentSpec = Union[ + TensorArgument, SymIntArgument, ConstantArgument, CustomObjArgument +] + + +class InputKind(Enum): + USER_INPUT = auto() + PARAMETER = auto() + BUFFER = auto() + CONSTANT_TENSOR = auto() + CUSTOM_OBJ = auto() + TOKEN = auto() + + +@dataclasses.dataclass +class InputSpec: + kind: InputKind + arg: ArgumentSpec + target: Optional[str] + persistent: Optional[bool] = None + + def __post_init__(self): + if self.kind == InputKind.BUFFER: + assert ( + self.persistent is not None + ), "Failed to specify persistent flag on BUFFER." + assert isinstance( + self.arg, + (TensorArgument, SymIntArgument, ConstantArgument, CustomObjArgument), + ), f"got {type(self.arg)}" + + +class OutputKind(Enum): + USER_OUTPUT = auto() + LOSS_OUTPUT = auto() + BUFFER_MUTATION = auto() + GRADIENT_TO_PARAMETER = auto() + GRADIENT_TO_USER_INPUT = auto() + USER_INPUT_MUTATION = auto() + TOKEN = auto() + + +@dataclasses.dataclass +class OutputSpec: + kind: OutputKind + arg: ArgumentSpec + target: Optional[str] + + def __post_init__(self): + assert isinstance(self.arg, (TensorArgument, SymIntArgument, ConstantArgument)) + + +def _sig_to_specs( + *, + user_inputs: Set[str], + inputs_to_parameters: Mapping[str, str], + inputs_to_buffers: Mapping[str, str], + user_outputs: Set[str], + buffer_mutations: Mapping[str, str], + user_input_mutations: Mapping[str, str], + grad_params: Mapping[str, str], + grad_user_inputs: Mapping[str, str], + loss_output: Optional[str], + inputs: List[ArgumentSpec], + outputs: List[ArgumentSpec], + input_tokens: List[str], + output_tokens: List[str], +) -> Tuple[List[InputSpec], List[OutputSpec]]: + def to_input_spec(inp: ArgumentSpec) -> InputSpec: + if not isinstance(inp, TensorArgument): + return InputSpec(kind=InputKind.USER_INPUT, arg=inp, target=None) + name = inp.name + if name in user_inputs: + return InputSpec(kind=InputKind.USER_INPUT, arg=inp, target=None) + elif name in inputs_to_parameters: + return InputSpec( + kind=InputKind.PARAMETER, + arg=inp, + target=inputs_to_parameters[name], + ) + elif name in inputs_to_buffers: + return InputSpec( + kind=InputKind.BUFFER, + arg=inp, + target=inputs_to_buffers[name], + # Mark as True for now; we will fix this up to distinguish + # persistent from non-persistent later in tracing. + # See: rewrite_non_persistent_buffers() + # TODO(suo): this is horrible. + persistent=True, + ) + elif name in input_tokens: + return InputSpec(kind=InputKind.TOKEN, arg=inp, target=None) + else: + raise AssertionError(f"Unknown tensor input kind: {name}") + + def to_output_spec(idx: int, o: ArgumentSpec) -> OutputSpec: + if not isinstance(o, TensorArgument): + return OutputSpec(kind=OutputKind.USER_OUTPUT, arg=o, target=None) + name = o.name + if idx < len(buffer_mutations) + len(user_input_mutations) + len(output_tokens): + if name in buffer_mutations: + return OutputSpec( + kind=OutputKind.BUFFER_MUTATION, + arg=o, + target=buffer_mutations[name], + ) + elif name in user_input_mutations: + return OutputSpec( + kind=OutputKind.USER_INPUT_MUTATION, + arg=o, + target=user_input_mutations[name], + ) + elif name in output_tokens: + return OutputSpec(kind=OutputKind.TOKEN, arg=o, target=None) + else: + raise AssertionError(f"Unknown tensor mutation kind: {name}") + else: + if name in user_outputs: + return OutputSpec(kind=OutputKind.USER_OUTPUT, arg=o, target=None) + + elif name in grad_params: + return OutputSpec( + kind=OutputKind.GRADIENT_TO_PARAMETER, + arg=o, + target=grad_params[name], + ) + elif name in grad_user_inputs: + return OutputSpec( + kind=OutputKind.GRADIENT_TO_USER_INPUT, + arg=o, + target=grad_user_inputs[name], + ) + elif name == loss_output: + return OutputSpec(kind=OutputKind.LOSS_OUTPUT, arg=o, target=None) + + else: + raise AssertionError(f"Unknown tensor output kind: {name}") + + input_specs = [to_input_spec(inp) for inp in inputs] + output_specs = [to_output_spec(idx, o) for idx, o in enumerate(outputs)] + return input_specs, output_specs + + +@dataclasses.dataclass +class ExportBackwardSignature: + gradients_to_parameters: Dict[str, str] + gradients_to_user_inputs: Dict[str, str] + loss_output: str + + +@dataclasses.dataclass +class ExportGraphSignature: + """ + :class:`ExportGraphSignature` models the input/output signature of Export Graph, + which is a fx.Graph with stronger invariants gurantees. + + Export Graph is functional and does not access "states" like parameters + or buffers within the graph via ``getattr`` nodes. Instead, :func:`export` + gurantees that parameters, buffers, and constant tensors are lifted out of + the graph as inputs. Similarly, any mutations to buffers are not included + in the graph either, instead the updated values of mutated buffers are + modeled as additional outputs of Export Graph. + + The ordering of all inputs and outputs are:: + + Inputs = [*parameters_buffers_constant_tensors, *flattened_user_inputs] + Outputs = [*mutated_inputs, *flattened_user_outputs] + + e.g. If following module is exported:: + + class CustomModule(nn.Module): + def __init__(self): + super(CustomModule, self).__init__() + + # Define a parameter + self.my_parameter = nn.Parameter(torch.tensor(2.0)) + + # Define two buffers + self.register_buffer('my_buffer1', torch.tensor(3.0)) + self.register_buffer('my_buffer2', torch.tensor(4.0)) + + def forward(self, x1, x2): + # Use the parameter, buffers, and both inputs in the forward method + output = (x1 + self.my_parameter) * self.my_buffer1 + x2 * self.my_buffer2 + + # Mutate one of the buffers (e.g., increment it by 1) + self.my_buffer2.add_(1.0) # In-place addition + + return output + + Resulting Graph would be:: + + graph(): + %arg0_1 := placeholder[target=arg0_1] + %arg1_1 := placeholder[target=arg1_1] + %arg2_1 := placeholder[target=arg2_1] + %arg3_1 := placeholder[target=arg3_1] + %arg4_1 := placeholder[target=arg4_1] + %add_tensor := call_function[target=torch.ops.aten.add.Tensor](args = (%arg3_1, %arg0_1), kwargs = {}) + %mul_tensor := call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor, %arg1_1), kwargs = {}) + %mul_tensor_1 := call_function[target=torch.ops.aten.mul.Tensor](args = (%arg4_1, %arg2_1), kwargs = {}) + %add_tensor_1 := call_function[target=torch.ops.aten.add.Tensor](args = (%mul_tensor, %mul_tensor_1), kwargs = {}) + %add_tensor_2 := call_function[target=torch.ops.aten.add.Tensor](args = (%arg2_1, 1.0), kwargs = {}) + return (add_tensor_2, add_tensor_1) + + Resulting ExportGraphSignature would be:: + + ExportGraphSignature( + input_specs=[ + InputSpec(kind=, arg=TensorArgument(name='arg0_1'), target='my_parameter'), + InputSpec(kind=, arg=TensorArgument(name='arg1_1'), target='my_buffer1'), + InputSpec(kind=, arg=TensorArgument(name='arg2_1'), target='my_buffer2'), + InputSpec(kind=, arg=TensorArgument(name='arg3_1'), target=None), + InputSpec(kind=, arg=TensorArgument(name='arg4_1'), target=None) + ], + output_specs=[ + OutputSpec(kind=, arg=TensorArgument(name='add_2'), target='my_buffer2'), + OutputSpec(kind=, arg=TensorArgument(name='add_1'), target=None) + ] + ) + """ + + input_specs: List[InputSpec] + output_specs: List[OutputSpec] + + # A list of parameters uniquely identified by mangled fully qualified name + @property + def parameters(self) -> Collection[str]: + # TODO Make this tuple. + return [ + s.target + for s in self.input_specs + if s.kind == InputKind.PARAMETER + if isinstance(s.target, str) + ] + + # A list of buffers uniquely identified by mangled fully qualified name + @property + def buffers(self) -> Collection[str]: + # TODO Make this tuple. + return [ + s.target + for s in self.input_specs + if s.kind == InputKind.BUFFER + if isinstance(s.target, str) + ] + + @property + def non_persistent_buffers(self) -> Collection[str]: + return [ + s.target + for s in self.input_specs + if s.kind == InputKind.BUFFER + if s.persistent is False + if isinstance(s.target, str) + ] + + # A list of lifted constant tensors + @property + def lifted_tensor_constants(self) -> Collection[str]: + # TODO Make this tuple. + return [ + s.target + for s in self.input_specs + if s.kind == InputKind.CONSTANT_TENSOR + if isinstance(s.target, str) + ] + + @property + def lifted_custom_objs(self) -> Collection[str]: + # TODO Make this tuple. + return [ + s.target + for s in self.input_specs + if s.kind == InputKind.CUSTOM_OBJ + if isinstance(s.target, str) + ] + + # Graph node names of pytree-flattened inputs of original program + @property + def user_inputs(self) -> Collection[Union[int, float, bool, None, str]]: + user_inputs: List[Union[int, float, bool, None, str]] = [] + for s in self.input_specs: + if s.kind != InputKind.USER_INPUT: + continue + + if isinstance(s.arg, (TensorArgument, SymIntArgument, CustomObjArgument)): + user_inputs.append(s.arg.name) + elif isinstance(s.arg, ConstantArgument): + user_inputs.append(s.arg.value) + else: + raise RuntimeError(f"{s.arg} is not a valid user inputs") + return tuple(user_inputs) + + # Graph node names of pytree-flattened outputs of original program + @property + def user_outputs(self) -> Collection[Union[int, float, bool, None, str]]: + user_outputs: List[Union[int, float, bool, None, str]] = [] + for s in self.output_specs: + if s.kind != OutputKind.USER_OUTPUT: + continue + + if isinstance(s.arg, (TensorArgument, SymIntArgument)): + user_outputs.append(s.arg.name) + elif isinstance(s.arg, ConstantArgument): + user_outputs.append(s.arg.value) + else: + raise RuntimeError(f"{s.arg} is not a valid user output") + return tuple(user_outputs) + + # A dictionary mapping graph input node names to parameters. If a graph input + # name is found in this dictionary, it is guranteed to be a lifted parameter. + @property + def inputs_to_parameters(self) -> Mapping[str, str]: + return { + s.arg.name: s.target + for s in self.input_specs + if s.kind == InputKind.PARAMETER + and isinstance(s.arg, TensorArgument) + and isinstance(s.target, str) + } + + # A dictionary mapping graph input node names to buffers. If a graph input + # name is found in this dictionary, it is guranteed to be a lifted buffer. + @property + def inputs_to_buffers(self) -> Mapping[str, str]: + return { + s.arg.name: s.target # type: ignore[union-attr, misc] + for s in self.input_specs + if s.kind == InputKind.BUFFER + and isinstance(s.arg, TensorArgument) + and isinstance(s.target, str) + } + + # A dictionary mapping graph output node names to buffers that are mutated in the + # original program. Buffers that are not mutated will not be found in this dictionary. + @property + def buffers_to_mutate(self) -> Mapping[str, str]: + return { + s.arg.name: s.target + for s in self.output_specs + if s.kind == OutputKind.BUFFER_MUTATION + and isinstance(s.arg, TensorArgument) + and isinstance(s.target, str) + } + + @property + def user_inputs_to_mutate(self) -> Mapping[str, str]: + return { + s.arg.name: s.target + for s in self.output_specs + if s.kind == OutputKind.USER_INPUT_MUTATION + and isinstance(s.arg, TensorArgument) + and isinstance(s.target, str) + } + + # A dictionary mapping graph input node names to lifted tensor constants. + @property + def inputs_to_lifted_tensor_constants(self) -> Mapping[str, str]: + return { + s.arg.name: s.target + for s in self.input_specs + if s.kind == InputKind.CONSTANT_TENSOR + and isinstance(s.arg, TensorArgument) + and isinstance(s.target, str) + } + + @property + def inputs_to_lifted_custom_objs(self) -> Mapping[str, str]: + return { + s.arg.name: s.target + for s in self.input_specs + if s.kind == InputKind.CUSTOM_OBJ + and isinstance(s.arg, CustomObjArgument) + and isinstance(s.target, str) + } + + @property + def backward_signature(self) -> Optional[ExportBackwardSignature]: + loss_output = None + gradients_to_parameters: Dict[str, str] = {} + gradients_to_user_inputs: Dict[str, str] = {} + for spec in self.output_specs: + if spec.kind == OutputKind.LOSS_OUTPUT: + assert loss_output is None + assert isinstance(spec.arg, TensorArgument) + loss_output = spec.arg.name + elif spec.kind == OutputKind.GRADIENT_TO_PARAMETER: + assert isinstance(spec.target, str) + assert isinstance(spec.arg, TensorArgument) + gradients_to_parameters[spec.arg.name] = spec.target + elif spec.kind == OutputKind.GRADIENT_TO_USER_INPUT: + assert isinstance(spec.target, str) + assert isinstance(spec.arg, TensorArgument) + gradients_to_user_inputs[spec.arg.name] = spec.target + + if loss_output is None: + return None + + return ExportBackwardSignature( + loss_output=loss_output, + gradients_to_parameters=gradients_to_parameters, + gradients_to_user_inputs=gradients_to_user_inputs, + ) + + # Map from assertion dependency token index to assertion dep token output + # name in output. The shape of output after aot_autograd will be like: + # (updated_inputs, user_outputs, dep_token). + @property + def assertion_dep_token(self) -> Optional[Mapping[int, str]]: + return None + + @property + def input_tokens(self) -> List[str]: + input_tokens = [] + for s in self.input_specs: + if s.kind == InputKind.TOKEN: + assert isinstance(s.arg, TensorArgument) + input_tokens.append(s.arg.name) + return input_tokens + + @property + def output_tokens(self) -> List[str]: + output_tokens = [] + for s in self.output_specs: + if s.kind == OutputKind.TOKEN: + assert isinstance(s.arg, TensorArgument) + output_tokens.append(s.arg.name) + return output_tokens + + def __post_init__(self) -> None: + assertion_dep_token = self.assertion_dep_token + if assertion_dep_token is None: + return + assert len(assertion_dep_token) == 1 + assertion_dep_token_index = next(iter(assertion_dep_token.keys())) + assert ( + len(self.user_outputs) + len(self.buffers_to_mutate) + == assertion_dep_token_index + ) + + def replace_all_uses(self, old: str, new: str): + """ + Replace all uses of the old name with new name in the signature. + """ + assert isinstance(old, str) + assert isinstance(new, str) + arg_types = (TensorArgument, SymIntArgument, CustomObjArgument) + for o in self.output_specs: + if isinstance(o.arg, arg_types): + if o.arg.name == old: + o.arg.name = new + for i in self.input_specs: + if isinstance(i.arg, arg_types): + if i.arg.name == old: + i.arg.name = new + + def get_replace_hook(self): + def _(old, new, user): + if user.op in ("output", "input"): + self.replace_all_uses(old.name, new) + + return _ diff --git a/venv/lib/python3.10/site-packages/torch/export/unflatten.py b/venv/lib/python3.10/site-packages/torch/export/unflatten.py new file mode 100644 index 0000000000000000000000000000000000000000..b16febd462194334d6f03ab7a7ffc97e5e4ac723 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/export/unflatten.py @@ -0,0 +1,860 @@ +import abc +import copy +import operator +from copy import deepcopy +from enum import Enum +from itertools import chain +from typing import Any, cast, Dict, List, Optional, Union + +import torch +import torch.fx._pytree as fx_pytree +import torch.utils._pytree as pytree +from torch.export._tree_utils import reorder_kwargs +from torch.export.exported_program import ( + ConstantArgument, + ExportedProgram, + ModuleCallSignature, + SymIntArgument, + TensorArgument, +) +from torch.fx._symbolic_trace import is_fx_tracing +from torch.utils._pytree import GetAttrKey, SequenceKey + +__all__ = ["InterpreterModule", "UnflattenedModule", "unflatten", "FlatArgsAdapter"] + + +class _AttrKind(Enum): + PARAMETER = "parameter" + BUFFER = "buffer" + CONSTANT = "constant" + + +# Assign attribute 'from_obj' to the qualified name 'target' on 'to_module +# This installs empty Modules where none exist yet if they are subpaths of target +def _assign_attr( + from_obj: Union[torch.Tensor, torch.ScriptObject], + to_module: torch.nn.Module, + target: str, + attr_kind: _AttrKind, + persistent: bool = True, +): + *prefix, field = target.split(".") + for item in prefix: + t = getattr(to_module, item, None) + + if t is None: + t = torch.nn.Module() + setattr(to_module, item, t) + to_module = t + + if attr_kind == _AttrKind.PARAMETER: + assert isinstance(from_obj, torch.nn.Parameter) + to_module.register_parameter(field, from_obj) + elif attr_kind == _AttrKind.BUFFER: + assert isinstance(from_obj, torch.Tensor) + to_module.register_buffer(field, from_obj, persistent=persistent) + elif attr_kind == _AttrKind.CONSTANT: + assert isinstance(from_obj, (torch.Tensor, torch.ScriptObject)) + setattr(to_module, field, from_obj) + + +class InterpreterModule(torch.nn.Module): + """A module that uses torch.fx.Interpreter to execute instead of the usual + codegen that GraphModule uses. This provides better stack trace information + and makes it easier to debug execution. + """ + + def __init__( + self, + graph: torch.fx.Graph, + ): + super().__init__() + self.graph = graph + self.graph.owning_module = self + + def forward(self, *args, **kwargs): + assert self.graph_module is not None, "Didn't finalize this InterpreterModule" + if torch.compiler.is_dynamo_compiling(): + # Dynamo cannot trace through torch.fx.Interpreter, so fall back to + # GraphModule codegen in this instance. + return self.graph_module(*args, **kwargs) + else: + if kwargs: + # Handle **kwargs. FX only natively supports positional + # arguments (through placeholders). So in order to pass in + # kwargs, we must correspond the names of the placeholders with + # the keys in the kwarg dict. + arg_list = list(args) + kwarg_names = self.arg_names[len(arg_list) :] + for kwarg_name in kwarg_names: + if kwarg_name in kwargs: + arg_list.append(kwargs[kwarg_name]) + + # Assert that the kwargs passed in exactly match the positional + # arguments specified by the GraphModule. This should be + # guaranteed by the unflattening process. + assert len(kwarg_names) == len(kwargs) + assert len(arg_list) == len(self.arg_names) + args = tuple(arg_list) + + return torch.fx.Interpreter(self, graph=self.graph).run( + *args, enable_io_processing=False + ) + + def finalize(self): + # We need to "finalize" because GraphModule populates its own state_dict + # based on the get_attrs observed in the graph. So we need to fully + # construct the graph and call _sink_params before generating this + # GraphModule. + + # need to set `graph_module` directly on the dict to avoid it getting + # registered as a submodule. + self.__dict__["graph_module"] = torch.fx.GraphModule(self, self.graph) + self.graph.lint() + + # Cache arg names for kwarg handling (see forward()) + self.arg_names = [] + for node in self.graph.nodes: + if node.op == "placeholder": + self.arg_names.append(node.target) + + +class FlatArgsAdapter(abc.ABC): + """ + Adapts input arguments with ``input_spec`` to align ``target_spec``. + """ + + @abc.abstractmethod + def adapt( + self, + target_spec: pytree.TreeSpec, + input_spec: pytree.TreeSpec, + input_args: List[Any], + ) -> List[Any]: + """NOTE: This adapter may mutate given ``input_args_with_path``.""" + ... + + +class UnflattenedModule(torch.nn.Module): + def __init__( + self, + export_module: ExportedProgram, + flat_args_adapter: Optional[FlatArgsAdapter] = None, + ): + super().__init__() + if export_module.graph_signature.backward_signature is not None: + raise ValueError("Unflattening on JointExportModule NYI") + + export_graph = deepcopy(export_module.graph) + self.graph_signature = deepcopy(export_module.graph_signature) + self.graph = torch.fx.Graph() + self.module_call_graph = deepcopy(export_module.module_call_graph) + self.flat_args_adapter = flat_args_adapter + # Flag to indicate whether args have been adapted. + self.adapted = False + + _inplace_buffer_mutations(export_graph, self.graph_signature) + _outline_submodules(export_graph, self) + + self.range_constraints = export_module.range_constraints + self.equality_constraints: List = [] + + state_dict = export_module.state_dict + for name in self.graph_signature.parameters: + cloned = torch.nn.Parameter(state_dict[name].clone()) + _assign_attr( + cloned, + self, + name, + attr_kind=_AttrKind.PARAMETER, + ) + + non_persistent_buffers = set(self.graph_signature.non_persistent_buffers) + for name in self.graph_signature.buffers: + if name in non_persistent_buffers: + persistent = False + cloned = export_module.constants[name].clone() + else: + persistent = True + cloned = state_dict[name].clone() + + _assign_attr( + cloned, + self, + name, + attr_kind=_AttrKind.BUFFER, + persistent=persistent, + ) + + for fqn in chain( + self.graph_signature.lifted_tensor_constants, + self.graph_signature.lifted_custom_objs, + ): + constant = export_module.constants[fqn] + if isinstance(constant, torch.Tensor): + constant = constant.clone() + _assign_attr( + constant, + self, + fqn, + attr_kind=_AttrKind.CONSTANT, + ) + + inputs_to_state: Dict[str, str] = { + **self.graph_signature.inputs_to_parameters, + **self.graph_signature.inputs_to_buffers, + **self.graph_signature.inputs_to_lifted_tensor_constants, + **self.graph_signature.inputs_to_lifted_custom_objs, + } + + _sink_params(self, inputs_to_state, []) + # Check all input nodes has been processed. + for module in self.modules(): + if not isinstance(module, torch.fx.GraphModule): + continue + for node in module.graph.nodes: + if node.op != "placeholder": + continue + assert node.name not in inputs_to_state + + # Cache so we don't have to compute this every time. + # NOTE: this needs to be kept in sync with the placeholders in + # self.graph, but currently we have no way to guarantee that. + self.input_placeholders = [ + node for node in self.graph.nodes if node.op == "placeholder" + ] + self.check_input_constraints = True + assert self.module_call_graph[0].fqn == "" + + def forward(self, *args, **kwargs): + signature = self.module_call_graph[0].signature + + reordered_kwargs = reorder_kwargs(kwargs, signature.in_spec) + + flat_args_with_path, in_spec = pytree.tree_flatten_with_path( + (args, reordered_kwargs) + ) + flat_args = [x[1] for x in flat_args_with_path] + if is_fx_tracing(): + return_val = torch.fx.Interpreter(self, graph=self.graph).run( + *flat_args, enable_io_processing=False + ) + # For scalar return value, fx.Graph wraps in a tuple + if isinstance(return_val, tuple) and len(return_val) == 1: + return return_val[0] + return return_val + + if in_spec != signature.in_spec: + if not self.adapted: + print( + "Input treespec does not match with exported module's: \n" + f"Input treespec: {in_spec}. ", + f"Exported module treespec: {signature.in_spec}", + ) + if self.flat_args_adapter is None: + raise TypeError( + "There is no flat args adapter sepcified. " + "Are you sure you are calling this with the right arguments? " + ) + else: + if not self.adapted: + print("Adapting flat arg to match exported module's treespec") + flat_args = self.flat_args_adapter.adapt( + target_spec=signature.in_spec, + input_spec=in_spec, + input_args=flat_args, + ) + self.adapted = True + if len(flat_args) != signature.in_spec.num_leaves: + raise TypeError( + f"Flat args adaption failed, number of args mismatch " + f"Adatped: {len(flat_args)} \n" + f"Exported module: {signature.in_spec.num_leaves}" + ) + + if self.check_input_constraints: + # Import here to avoid an unfortunate circular dependency. + # TODO(suo): untangle this. + from torch._export.utils import _check_input_constraints_for_graph + + if self.adapted is True: + # TODO(suo): The FlatArgsAdapter returns a list of flat args, + # which we don't have keypaths for. For now, just create a dummy + # keypath to associate with the arg. + new_flat_args_with_path = [ # type: ignore[var-annotated] + ((SequenceKey(idx=0), GetAttrKey(name="")), arg) + for arg in flat_args + ] + else: + new_flat_args_with_path = flat_args_with_path # type: ignore[assignment] + + _check_input_constraints_for_graph( + self.input_placeholders, new_flat_args_with_path, self.range_constraints + ) + tree_out = torch.fx.Interpreter(self, graph=self.graph).run( + *flat_args, enable_io_processing=False + ) + return pytree.tree_unflatten(tree_out, signature.out_spec) + + +def unflatten( + module: ExportedProgram, flat_args_adapter: Optional[FlatArgsAdapter] = None +) -> UnflattenedModule: + """Unflatten an ExportedProgram, producing a module with the same module + hierarchy as the original eager module. This can be useful if you are trying + to use :mod:`torch.export` with another system that expects a module + hierachy instead of the flat graph that :mod:`torch.export` usually produces. + + .. note:: The args/kwargs of unflattened modules will not necessarily match + the eager module, so doing a module swap (e.g. :code:`self.submod = + new_mod`) will not necessarily work. If you need to swap a module out, you + need to set the :code:`preserve_module_call_signature` parameter of + :func:`torch.export.export`. + + Args: + module (ExportedProgram): The ExportedProgram to unflatten. + flat_args_adapter (Optional[FlatArgsAdapter]): Adapt flat args if input TreeSpec does not match with exported module's. + + Returns: + An instance of :class:`UnflattenedModule`, which has the same module + hierarchy as the original eager module pre-export. + """ + return UnflattenedModule(module, flat_args_adapter) + + +def _inplace_buffer_mutations(graph: torch.fx.Graph, graph_signature) -> None: + """Transform buffer mutations from their functionalized form into a copy_ + node in the graph. + + Functionalization represents buffer mutation by passing the buffer as an input and output. So for example, the eager code: + def forward(self, x): + self.buffer += x + return x * x + + Will become a graph that looks like: + def forward(self, buffer, x): + mutated_buffer = aten.add(buffer, x) + mul = aten.mul(x, x) + return (mutated_buffer, mul) + + We want to inplace this into something that looks like the original eager code: + def forward(self, buffer, x): + mutated_buffer = aten.add(buffer, x) + buffer.copy_(mutated_buffer) + mul = aten.mul(x, x) + return (mul,) + """ + output_node = next(iter(reversed(graph.nodes))) + assert output_node.op == "output" and len(output_node.args) == 1 + return_args = output_node.args[0] + + mutation_node_to_buffer = graph_signature.buffers_to_mutate + mutations = return_args[: len(mutation_node_to_buffer)] + buffers_to_inputs = {v: k for k, v in graph_signature.inputs_to_buffers.items()} + input_name_to_node = { + node.name: node for node in graph.nodes if node.op == "placeholder" + } + + for mutation in mutations: + buffer_name = mutation_node_to_buffer[mutation.name] + input_name = buffers_to_inputs[buffer_name] + input_node = input_name_to_node[input_name] + + with graph.inserting_after(mutation): + new_node = graph.create_node( + "call_function", torch.ops.aten.copy_, (input_node, mutation) + ) + for k, v in mutation.meta.items(): + new_node.meta[k] = v + # Replace all uses of the previously functional mutation with our copy_ output. + mutation.replace_all_uses_with(new_node, lambda x: x is not new_node) + + # Remove the mutated buffer from the graph outputs, since we don't need to + # thread it through anymore. We don't need to handle the inputs, which will + # be handled by _sink_params. + user_outputs = tuple( + return_args[len(mutation_node_to_buffer) :], + ) + output_node.args = ((user_outputs),) + + +def _is_prefix(candidate, target): + """Check whether `candidate` is a prefix of `target`.""" + return len(candidate) < len(target) and target[: len(candidate)] == candidate + + +def _compute_accessor(parent_fqn: str, child_fqn: str) -> str: + if parent_fqn == "": + # Handle the root module correctly. + return child_fqn + + parent_split = parent_fqn.split(".") + child_split = child_fqn.split(".") + + assert ( + child_split[: len(parent_split)] == parent_split + ), f"Child module '{child_fqn}' is not a descendant of parent module '{parent_fqn}'" + return ".".join(child_split[len(parent_split) :]) + + +def _verify_graph_equivalence(x: torch.nn.Module, y: torch.nn.Module): + def graph_dump(graph: torch.fx.Graph) -> str: + ret = [] + nodes_idx: Dict[int, int] = {} + + def arg_dump(arg) -> str: + if isinstance(arg, torch.fx.Node): + return "%" + str(nodes_idx[id(arg)]) + return str(arg) + + for i, node in enumerate(graph.nodes): + args_dump = [str(arg) for arg in pytree.tree_map(arg_dump, node.args)] + args_dump += [ + f"{key}={value}" + for key, value in pytree.tree_map(arg_dump, node.kwargs).items() + ] + target = node.target if node.op == "call_function" else "" + ret.append(f"{i}: {node.op}[{target}]({', '.join(args_dump)})") + nodes_idx[id(node)] = i + return "\n".join(ret) + + assert graph_dump(x.graph) == graph_dump(y.graph) + + +def _add_spec(gm: torch.nn.Module, spec) -> str: + i = 0 + while hasattr(gm, f"_spec_{i}"): + i += 1 + name = f"_spec_{i}" + setattr(gm, name, spec) + return name + + +def _generate_flatten(gm: torch.nn.Module, node, spec) -> torch.fx.Node: + name = _add_spec(gm, spec) + spec_node = gm.graph.get_attr(name) + return gm.graph.call_function(fx_pytree.tree_flatten_spec, (node, spec_node)) + + +def _generate_unflatten(gm: torch.nn.Module, nodes, spec) -> torch.fx.Node: + name = _add_spec(gm, spec) + spec_node = gm.graph.get_attr(name) + return gm.graph.call_function(pytree.tree_unflatten, (nodes, spec_node)) + + +def _add_submodule(mod: torch.nn.Module, target: str, module_to_add: torch.nn.Module): + *prefix, field = target.split(".") + + for item in prefix: + submod = getattr(mod, item, None) + + if submod is None: + submod = torch.nn.Module() + setattr(mod, item, submod) + + if not isinstance(submod, torch.nn.Module): + return False + + mod = submod + + mod.add_module(field, module_to_add) + + +class _ModuleFrame: + def __init__( + self, + flat_graph, + nodes, + seen_nodes, + seen_modules, + parent, + module_stack, + module_id, + module_call_graph: Dict[str, ModuleCallSignature], + module: Optional[torch.nn.Module] = None, + ): + self.flat_graph = flat_graph + self.nodes = nodes + self.seen_nodes = seen_nodes + self.seen_modules = seen_modules + self.parent = parent + self.module_stack = module_stack + self.module_id = module_id + + self.module_call_graph = module_call_graph + self.verbose = False + + self.fqn = self.module_stack[-1] + if module is not None: + self.module = module + else: + self.module = InterpreterModule(torch.fx.Graph()) + if self.module_id in self.seen_modules: + self.cached_graph_module = self.seen_modules[self.module_id] + else: + self.cached_graph_module = None + self.seen_modules[self.module_id] = self.module + + self.graph = self.module.graph + + # Mapping of nodes in the flat graph to nodes in this graph. + self.node_map: Dict[torch.fx.Node, torch.fx.Node] = {} + self.node_to_placeholder = {} + + self.parent_call_module: Optional[torch.fx.Node] = None + if parent is not None: + accessor = _compute_accessor(parent.fqn, self.fqn) + _add_submodule( + parent.module, + accessor, + self.module + if self.cached_graph_module is None + else self.cached_graph_module, + ) + self.parent_call_module = parent.graph.call_module(accessor) + + signature = module_call_graph.get(self.fqn) + if signature is not None and self.parent is not None: + assert signature.in_spec.num_children == 2 + args_spec = signature.in_spec.children_specs[0] + kwargs_spec = signature.in_spec.children_specs[1] + assert args_spec.context is None + assert kwargs_spec.context is not None + + with self.graph.inserting_after(None): + arg_nodes = [] + for idx in range(args_spec.num_children): + arg_nodes.append(self.graph.placeholder(f"_positional_arg_{idx}")) + kwarg_nodes = {} + for name in kwargs_spec.context: + kwarg_nodes[name] = self.graph.placeholder(name) + flat_args = _generate_flatten( + self.module, + (tuple(arg_nodes), kwarg_nodes), + signature.in_spec, + ) + for idx, arg in enumerate(signature.inputs): + flat_arg_node = self.graph.create_node( + op="call_function", + target=operator.getitem, + args=(flat_args, idx), + name=arg.name + if not isinstance(arg, ConstantArgument) + else f"_constant_{idx}", + ) + if isinstance(arg, ConstantArgument): + continue + flat_arg_node.meta = copy.copy(self.seen_nodes[arg.name].meta) + self.node_to_placeholder[self.seen_nodes[arg.name]] = flat_arg_node + + with self.parent.graph.inserting_before(self.parent_call_module): + input_nodes: List[Optional[torch.fx.Node]] = [] + for input in signature.inputs: + if isinstance(input, ConstantArgument) and input.value is None: + input_nodes.append(None) + else: + assert isinstance(input, (TensorArgument, SymIntArgument)) + input_nodes.append( + self.parent.remap_input(self.seen_nodes[input.name]) + ) + + inputs_node = _generate_unflatten( + self.parent.module, + input_nodes, + signature.in_spec, + ) + + args_node = self.parent.graph.call_function( + operator.getitem, (inputs_node, 0) + ) + kwargs_node = self.parent.graph.call_function( + operator.getitem, (inputs_node, 1) + ) + arg_nodes = [ + self.parent.graph.call_function(operator.getitem, (args_node, i)) + for i in range(args_spec.num_children) + ] + kwarg_nodes = { + k: self.parent.graph.call_function( + operator.getitem, (kwargs_node, k) + ) + for k in kwargs_spec.context + } + assert self.parent_call_module is not None + self.parent_call_module.args = tuple(arg_nodes) + self.parent_call_module.kwargs = kwarg_nodes + + def add_placeholder(self, x): + assert x.graph is self.flat_graph + # x is not in subgraph, create a new placeholder for subgraph + with self.graph.inserting_before(None): + placeholder_node = self.graph.placeholder(x.name, type_expr=x.type) + # copy all meta fields, even if some fields might be irrelvant for + # the placeholder node + placeholder_node.meta = copy.copy(x.meta) + self.node_to_placeholder[x] = placeholder_node + + def remap_input(self, x): + assert x.graph is self.flat_graph + if x in self.node_map: + return self.node_map[x] + if x not in self.node_to_placeholder: + self.add_placeholder(x) + if self.parent_call_module is not None: + # Important to *prepend* the output to match how we are + # inserting placeholder nodes. + self.parent_call_module.insert_arg(0, self.parent.remap_input(x)) + return self.node_to_placeholder[x] + + def finalize_outputs(self): + orig_outputs = [] + + signature = self.module_call_graph.get(self.fqn) + if signature is not None and self.parent is not None: + for output in signature.outputs: + if isinstance(output, (TensorArgument, SymIntArgument)): + orig_outputs.append(self.seen_nodes[output.name]) + else: + raise RuntimeError( + f"Unsupported data type for output node: {output}" + ) + + tree_out_node = _generate_unflatten( + self.module, + tuple( + self.node_map[self.seen_nodes[output.name]] + for output in orig_outputs + ), + signature.out_spec, + ) + parent_out: Optional[torch.fx.Node] = _generate_flatten( + self.parent.module, self.parent_call_module, signature.out_spec + ) + graph_outputs: Union[torch.fx.Node, List[torch.fx.Node]] = tree_out_node + else: + graph_outputs = [] + # Iterate through nodes we have copied into self.graph. + for orig_node in self.node_map.keys(): + for user_node in orig_node.users: + if user_node.name not in self.seen_nodes: + # external user node, need to expose as an output + orig_outputs.append(orig_node) + graph_outputs.append(self.node_map[orig_node]) + break + + parent_out = self.parent_call_module + if len(graph_outputs) == 1: + graph_outputs = graph_outputs[0] + + assert isinstance(graph_outputs, (list, torch.fx.Node)) + + self.graph.output(graph_outputs) + + # Rewrite outputs in parent module + if parent_out is None: + return + + parent_out.meta["val"] = ( + graph_outputs.meta.get("val") + if isinstance(graph_outputs, torch.fx.Node) + else [o.meta.get("val") for o in graph_outputs] + ) + + if len(orig_outputs) == 1 and signature is None: + self.parent.node_map[orig_outputs[0]] = parent_out + else: + for i, orig_output in enumerate(orig_outputs): + # Use Proxy to record getitem access. + proxy_out = torch.fx.Proxy(parent_out)[i].node # type: ignore[index] + proxy_out.meta["val"] = orig_output.meta.get("val") + self.parent.node_map[orig_output] = proxy_out + + if self.cached_graph_module is not None: + _verify_graph_equivalence(self.cached_graph_module, self.module) + + def copy_node(self, node): + self.print("copying", node.format_node()) + self.node_map[node] = self.graph.node_copy(node, self.remap_input) + self.seen_nodes[node.name] = node + + def run_outer(self): + i = 0 + for node in self.flat_graph.nodes: + self.print(i, node.meta.get("nn_module_stack"), node.format_node()) + i += 1 + + # Copy all graph inputs + node_idx: int = 0 + node = self.nodes[node_idx] + while node.op == "placeholder": + self.copy_node(node) + node_idx += 1 + node = self.nodes[node_idx] + + self.run_from(node_idx) + + # Copy graph outputs + for node in self.flat_graph.nodes: + if node.op == "output": + self.copy_node(node) + + def print(self, *args, **kwargs): + if self.verbose: + print(*args, **kwargs) + + def run_from(self, node_idx): + module_idx = 0 + # Walk through the graph, building up a new graph with the right submodules + while node_idx < len(self.nodes): + node = self.nodes[node_idx] + assert node.op != "placeholder" + + self.print() + self.print("STEP", node_idx, node.format_node()) + self.print(self.module_stack) + if node.op == "output": + if len(self.module_stack) == 1: + # We want the output node of the original graph to be handled + # specially by the outermost stack frame (in run_outer). So + # skip finalization here. + return node_idx + + # We've reached the end of the graph. Wrap up all the existing stack frames. + self.finalize_outputs() + return node_idx + + node_module_stack = ( + [path for path, ty in node.meta["nn_module_stack"].values()] + if "nn_module_stack" in node.meta + else self.module_stack + ) + if node_module_stack[: len(self.module_stack)] != self.module_stack: + # This means that the current module is done executing and the + # current node is the beginning of a new module. + # + # In this case, we should finalize this module and return without + # incrementing the node counter. + self.finalize_outputs() + self.print("outlining", self.fqn) + self.print(self.graph) + return node_idx + + assert node_module_stack is not None + + if _is_prefix(self.module_stack, node_module_stack): + # This means that the current node represents the execution of a new + # module. + next_module = node_module_stack[len(self.module_stack)] + self.print("Creating new stack frame for", next_module) + # Run a nested version of module outliner from the current node + # counter. Once it is complete, continue from that point. + node_idx = _ModuleFrame( + self.flat_graph, + self.nodes, + self.seen_nodes, + self.seen_modules, + self, + self.module_stack + [next_module], + list(node.meta["nn_module_stack"].keys())[len(self.module_stack)], + self.module_call_graph, + ).run_from(node_idx) + module_idx += 1 + continue + + # The only remaining possibility is that we are in the right stack + # frame. Copy the node into this frame's graph and increment the node counter. + assert node_module_stack == self.module_stack + self.copy_node(node) + node_idx += 1 + + +def _outline_submodules(orig_graph: torch.fx.Graph, root_module: UnflattenedModule): + seen_nodes: Dict[str, torch.fx.Node] = {} + seen_modules: Dict[int, torch.nn.Module] = {} + _ModuleFrame( + orig_graph, + tuple(orig_graph.nodes), + seen_nodes, + seen_modules, + None, + [""], + "", + { + entry.fqn: entry.signature + for entry in root_module.module_call_graph + if entry.signature + }, + module=root_module, + ).run_outer() + + +def _sink_params( + module: torch.nn.Module, + inputs_to_state: Dict[str, str], + scope: List[str], +): + """Sink params, buffers, and constants from graph inputs into get_attr nodes. + + Exported modules are purely functional, so they pass their parameters and + buffers in as inputs to the graph. + + To replicate eager's semantics, we need to get them from the module state + via get_attr instead. + + module: GraphModule, potentially containining nested submodules. + inputs_to_state: mapping graph input names to the corresponding key in the state_dict. + scope: tracks where we are in the module hierarchy, so that we can emit the + right `getattr(self, "foo.bar")` calls, etc. + """ + # We need to use _modules here instead of named_children(), because we + # explicitly want duplicate modules to show up in the traversal. + for name, submodule in module._modules.items(): + _sink_params(cast(torch.nn.Module, submodule), inputs_to_state, scope + [name]) + + if not hasattr(module, "graph"): + # Not all modules have graphs defined, if they are empty modules with no operations (like ParameterList) + return + + graph = module.graph + inputs = list(filter(lambda n: n.op == "placeholder", graph.nodes)) + the_last_input = inputs[-1] + + # Also remove from call_module nodes + call_module_nodes = filter(lambda n: n.op == "call_module", graph.nodes) + for node in call_module_nodes: + node.args = tuple(filter(lambda n: n.name not in inputs_to_state, node.args)) + + for node in inputs: + if node.name not in inputs_to_state: + continue + + if len(node.users) > 0: + state_name = inputs_to_state[node.name].split(".") + # If there's a mismatch beteewn scope name and state name, then there must be multuple scopes + # pointing to the same state name, meaning some modules are shared. In such case, we can simply + # skip updating the current node because another later iteration will take care of this input + # node when the unique match between scope and state name occurs. + # To make sure this always happen, we should enforce the invariant that no placeholder node + # in the unflattened graph appears in inputs_to_state dict, which means all the extra input + # nodes have been handled. + if state_name[: len(scope)] != scope: + continue + attr_path = state_name[len(scope) :] + state_attr = _recursive_getattr(module, attr_path) + assert isinstance(state_attr, (torch.Tensor, torch.ScriptObject)) + + # Make sure the newly created get_attr node is placed after the last placeholder node + with graph.inserting_after(the_last_input): + new_node = graph.create_node("get_attr", ".".join(attr_path)) + + node.replace_all_uses_with(new_node, propagate_meta=True) + graph.erase_node(node) + if isinstance(module, InterpreterModule): + module.finalize() + + +def _recursive_getattr(obj, attr_path): + for attr in attr_path: + obj = getattr(obj, attr) + + return obj diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/channel/cma/factory.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/channel/cma/factory.h new file mode 100644 index 0000000000000000000000000000000000000000..eb71e1496af576f443c91f5ff335b83041a9ddaa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/channel/cma/factory.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { +namespace channel { +namespace cma { + +std::shared_ptr create(); + +} // namespace cma +} // namespace channel +} // namespace tensorpipe diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/common/buffer.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/common/buffer.h new file mode 100644 index 0000000000000000000000000000000000000000..f91cea36c4a09d29289cbb698aed343c13ce2373 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/common/buffer.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace tensorpipe { + +class Buffer { + class AbstractBufferWrapper { + public: + virtual Device device() const = 0; + virtual void copyConstructInto(void* ptr) const = 0; + virtual void moveConstructInto(void* ptr) = 0; + virtual ~AbstractBufferWrapper() = default; + }; + + template + class BufferWrapper : public AbstractBufferWrapper { + static_assert( + std::is_trivially_copyable::value, + "wrapping non-trivially copyable class"); + + public: + TBuffer buffer; + + explicit BufferWrapper(TBuffer buffer) : buffer(std::move(buffer)) {} + + Device device() const override { + return buffer.getDevice(); + } + + void copyConstructInto(void* ptr) const override { + new (ptr) BufferWrapper(*this); + } + + void moveConstructInto(void* ptr) override { + new (ptr) BufferWrapper(std::move(*this)); + } + }; + + public: + template + /* implicit */ Buffer(TBuffer b) { + static_assert( + sizeof(BufferWrapper) <= kStructSize, "kStructSize too small"); + static_assert( + alignof(BufferWrapper) <= kStructAlign, + "kStructAlign too small"); + new (&raw_) BufferWrapper(std::move(b)); + } + + Buffer() : Buffer(CpuBuffer{}) {} + + Buffer(const Buffer& other) { + other.ptr()->copyConstructInto(&raw_); + } + + Buffer& operator=(const Buffer& other) { + if (this != &other) { + ptr()->~AbstractBufferWrapper(); + other.ptr()->copyConstructInto(&raw_); + } + return *this; + } + + Buffer(Buffer&& other) noexcept { + other.ptr()->moveConstructInto(&raw_); + } + + Buffer& operator=(Buffer&& other) { + if (this != &other) { + ptr()->~AbstractBufferWrapper(); + other.ptr()->moveConstructInto(&raw_); + } + return *this; + } + + ~Buffer() { + ptr()->~AbstractBufferWrapper(); + } + + template + TBuffer& unwrap() { + BufferWrapper* wrapperPtr = + dynamic_cast*>(ptr()); + if (wrapperPtr == nullptr) { + throw std::runtime_error("Invalid unwrapping of tensorpipe::Buffer"); + } + return wrapperPtr->buffer; + } + + template + const TBuffer& unwrap() const { + const BufferWrapper* wrapperPtr = + dynamic_cast*>(ptr()); + if (wrapperPtr == nullptr) { + throw std::runtime_error("Invalid unwrapping of tensorpipe::Buffer"); + } + return wrapperPtr->buffer; + } + + Device device() const { + return ptr()->device(); + } + + private: + static constexpr int kStructSize = 32; + static constexpr int kStructAlign = 8; + std::aligned_storage::type raw_{}; + + const AbstractBufferWrapper* ptr() const { + // FIXME: Once we go C++17, use std::launder on the returned pointer. + return reinterpret_cast(&raw_); + } + + AbstractBufferWrapper* ptr() { + // FIXME: Once we go C++17, use std::launder on the returned pointer. + return reinterpret_cast(&raw_); + } +}; + +} // namespace tensorpipe diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/common/cpu_buffer.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/common/cpu_buffer.h new file mode 100644 index 0000000000000000000000000000000000000000..2b5fe49235374a40ac0a8a8e2dc6689664d52cb4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/common/cpu_buffer.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +namespace tensorpipe { + +struct CpuBuffer { + void* ptr{nullptr}; + + Device getDevice() const { + return Device{kCpuDeviceType, 0}; + } +}; + +} // namespace tensorpipe diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/common/cuda_buffer.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/common/cuda_buffer.h new file mode 100644 index 0000000000000000000000000000000000000000..e772304b3ffdd97a39026f018d40ed7877f39e60 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/common/cuda_buffer.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { + +struct CudaBuffer { + void* ptr{nullptr}; + cudaStream_t stream{cudaStreamDefault}; + + Device getDevice() const; +}; + +} // namespace tensorpipe diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/common/device.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/common/device.h new file mode 100644 index 0000000000000000000000000000000000000000..d7ff69bf2ac8b770c6329e4fb1107dabb5d630dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/common/device.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include + +namespace tensorpipe { + +const std::string kCpuDeviceType{"cpu"}; +const std::string kCudaDeviceType{"cuda"}; + +struct Device { + std::string type; + int index; + + // This pointless constructor is needed to work around a bug in GCC 5.5 (and + // possibly other versions). It appears to be needed in the nop types that + // are used inside nop::Optional. + Device() {} + + Device(std::string type, int index) : type(std::move(type)), index(index) {} + + std::string toString() const { + std::stringstream ss; + ss << type << ":" << index; + return ss.str(); + } + + bool operator==(const Device& other) const { + return type == other.type && index == other.index; + } +}; + +} // namespace tensorpipe + +namespace std { + +template <> +struct hash<::tensorpipe::Device> { + size_t operator()(const ::tensorpipe::Device& device) const noexcept { + return std::hash{}(device.toString()); + } +}; + +template <> +struct hash> { + size_t operator()(const std::pair<::tensorpipe::Device, ::tensorpipe::Device>& + p) const noexcept { + size_t h1 = std::hash<::tensorpipe::Device>{}(p.first); + size_t h2 = std::hash<::tensorpipe::Device>{}(p.second); + // Shifting one hash to avoid collisions between (a, b) and (b, a). + return h1 ^ (h2 << 1); + } +}; + +} // namespace std diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/common/error.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/common/error.h new file mode 100644 index 0000000000000000000000000000000000000000..7dc997cb2f594126fcd433169de992b4cb0aef78 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/common/error.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include + +namespace tensorpipe { + +// Base class for actual errors. +class BaseError { + public: + virtual ~BaseError() = default; + + // Returns an explanatory string. + // Like `std::exception` but returns a `std::string`. + virtual std::string what() const = 0; +}; + +// Wrapper class for errors. +// +// Background: we wish to not use exceptions yet need an error +// representation that can propagate across function and thread +// boundaries. This representation must be copyable (so we can store +// and return it at a later point in time) and retain downstream type +// information. This implies a heap allocation because it's the +// easiest way to deal with variable size objects (barring a union of +// all downstream error classes and a lot of custom code). Instead of +// passing a shared_ptr around directly, we use this wrapper class to +// keep implementation details hidden from calling code. +// +class Error final { + public: + // Constant instance that indicates success. + static const Error kSuccess; + + // Default constructor for error that is not an error. + Error() {} + + Error(std::shared_ptr error, std::string file, int line) + : error_(std::move(error)), file_(std::move(file)), line_(line) {} + + virtual ~Error() = default; + + // Converting to boolean means checking if there is an error. This + // means we don't need to use an `std::optional` and allows for a + // snippet like the following: + // + // if (error) { + // // Deal with it. + // } + // + operator bool() const { + return static_cast(error_); + } + + template + std::shared_ptr castToType() const { + return std::dynamic_pointer_cast(error_); + } + + template + bool isOfType() const { + return castToType() != nullptr; + } + + // Like `std::exception` but returns a `std::string`. + std::string what() const; + + private: + std::shared_ptr error_; + std::string file_; + int line_; +}; + +class SystemError final : public BaseError { + public: + explicit SystemError(const char* syscall, int error) + : syscall_(syscall), error_(error) {} + + std::string what() const override; + + int errorCode() const; + + private: + const char* syscall_; + const int error_; +}; + +class ShortReadError final : public BaseError { + public: + ShortReadError(ssize_t expected, ssize_t actual) + : expected_(expected), actual_(actual) {} + + std::string what() const override; + + private: + const ssize_t expected_; + const ssize_t actual_; +}; + +class ShortWriteError final : public BaseError { + public: + ShortWriteError(ssize_t expected, ssize_t actual) + : expected_(expected), actual_(actual) {} + + std::string what() const override; + + private: + const ssize_t expected_; + const ssize_t actual_; +}; + +class EOFError final : public BaseError { + public: + EOFError() {} + + std::string what() const override; +}; + +} // namespace tensorpipe diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/common/optional.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/common/optional.h new file mode 100644 index 0000000000000000000000000000000000000000..549849ab2fa621abdd05a945cc302c3d0e460a51 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/common/optional.h @@ -0,0 +1,1020 @@ +// Copyright (C) 2011 - 2012 Andrzej Krzemienski. +// +// Use, modification, and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The idea and interface is based on Boost.Optional library +// authored by Fernando Luis Cacciola Carballal + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#define TR2_OPTIONAL_REQUIRES(...) \ + typename std::enable_if<__VA_ARGS__::value, bool>::type = false + +#if defined __GNUC__ // NOTE: GNUC is also defined for Clang +#if (__GNUC__ == 4) && (__GNUC_MINOR__ >= 8) +#define TR2_OPTIONAL_GCC_4_8_AND_HIGHER___ +#elif (__GNUC__ > 4) +#define TR2_OPTIONAL_GCC_4_8_AND_HIGHER___ +#endif +# +#if (__GNUC__ == 4) && (__GNUC_MINOR__ >= 7) +#define TR2_OPTIONAL_GCC_4_7_AND_HIGHER___ +#elif (__GNUC__ > 4) +#define TR2_OPTIONAL_GCC_4_7_AND_HIGHER___ +#endif +# +#if (__GNUC__ == 4) && (__GNUC_MINOR__ == 8) && (__GNUC_PATCHLEVEL__ >= 1) +#define TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___ +#elif (__GNUC__ == 4) && (__GNUC_MINOR__ >= 9) +#define TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___ +#elif (__GNUC__ > 4) +#define TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___ +#endif +#endif +# +#if defined __clang_major__ +#if (__clang_major__ == 3 && __clang_minor__ >= 5) +#define TR2_OPTIONAL_CLANG_3_5_AND_HIGHTER_ +#elif (__clang_major__ > 3) +#define TR2_OPTIONAL_CLANG_3_5_AND_HIGHTER_ +#endif +#if defined TR2_OPTIONAL_CLANG_3_5_AND_HIGHTER_ +#define TR2_OPTIONAL_CLANG_3_4_2_AND_HIGHER_ +#elif ( \ + __clang_major__ == 3 && __clang_minor__ == 4 && __clang_patchlevel__ >= 2) +#define TR2_OPTIONAL_CLANG_3_4_2_AND_HIGHER_ +#endif +#endif +# +#if defined _MSC_VER +#if (_MSC_VER >= 1900) +#define TR2_OPTIONAL_MSVC_2015_AND_HIGHER___ +#endif +#endif + +#if defined __clang__ +#if (__clang_major__ > 2) || (__clang_major__ == 2) && (__clang_minor__ >= 9) +#define OPTIONAL_HAS_THIS_RVALUE_REFS 1 +#else +#define OPTIONAL_HAS_THIS_RVALUE_REFS 0 +#endif +#elif defined TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___ +#define OPTIONAL_HAS_THIS_RVALUE_REFS 1 +#elif defined TR2_OPTIONAL_MSVC_2015_AND_HIGHER___ +#define OPTIONAL_HAS_THIS_RVALUE_REFS 1 +#else +#define OPTIONAL_HAS_THIS_RVALUE_REFS 0 +#endif + +#if defined TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___ +#define OPTIONAL_HAS_CONSTEXPR_INIT_LIST 1 +#define OPTIONAL_CONSTEXPR_INIT_LIST constexpr +#else +#define OPTIONAL_HAS_CONSTEXPR_INIT_LIST 0 +#define OPTIONAL_CONSTEXPR_INIT_LIST +#endif + +#if defined TR2_OPTIONAL_CLANG_3_5_AND_HIGHTER_ && (defined __cplusplus) && \ + (__cplusplus != 201103L) +#define OPTIONAL_HAS_MOVE_ACCESSORS 1 +#else +#define OPTIONAL_HAS_MOVE_ACCESSORS 0 +#endif + +#// In C++11 constexpr implies const, so we need to make non-const members also non-constexpr +#if (defined __cplusplus) && (__cplusplus == 201103L) +#define OPTIONAL_MUTABLE_CONSTEXPR +#else +#define OPTIONAL_MUTABLE_CONSTEXPR constexpr +#endif + +namespace tensorpipe { + +// 20.5.4, optional for object types +template +class optional; + +// 20.5.5, optional for lvalue reference types +template +class optional; + +// workaround: std utility functions aren't constexpr yet +template +inline constexpr T&& constexpr_forward( + typename std::remove_reference::type& t) noexcept { + return static_cast(t); +} + +template +inline constexpr T&& constexpr_forward( + typename std::remove_reference::type&& t) noexcept { + static_assert(!std::is_lvalue_reference::value, "!!"); + return static_cast(t); +} + +template +inline constexpr typename std::remove_reference::type&& constexpr_move( + T&& t) noexcept { + return static_cast::type&&>(t); +} + +#if defined NDEBUG +#define TR2_OPTIONAL_ASSERTED_EXPRESSION(CHECK, EXPR) (EXPR) +#else +#define TR2_OPTIONAL_ASSERTED_EXPRESSION(CHECK, EXPR) \ + ((CHECK) ? (EXPR) : ([] { assert(!#CHECK); }(), (EXPR))) +#endif + +namespace detail_ { + +// static_addressof: a constexpr version of addressof +template +struct has_overloaded_addressof { + template + constexpr static bool has_overload(...) { + return false; + } + + template ().operator&())> + constexpr static bool has_overload(bool) { + return true; + } + + constexpr static bool value = has_overload(true); +}; + +template )> +constexpr T* static_addressof(T& ref) { + return &ref; +} + +template )> +T* static_addressof(T& ref) { + return std::addressof(ref); +} + +// the call to convert(b) has return type A and converts b to type A iff b +// decltype(b) is implicitly convertible to A +template +constexpr U convert(U v) { + return v; +} + +} // namespace detail_ + +constexpr struct trivial_init_t { +} trivial_init{}; + +// 20.5.6, In-place construction +constexpr struct in_place_t { +} in_place{}; + +// 20.5.7, Disengaged state indicator +struct nullopt_t { + struct init {}; + constexpr explicit nullopt_t(init) {} +}; +constexpr nullopt_t nullopt{nullopt_t::init()}; + +// 20.5.8, class bad_optional_access +class bad_optional_access : public std::logic_error { + public: + explicit bad_optional_access(const std::string& what_arg) + : logic_error{what_arg} {} + explicit bad_optional_access(const char* what_arg) : logic_error{what_arg} {} +}; + +template +union storage_t { + unsigned char dummy_; + T value_; + + constexpr storage_t(trivial_init_t) noexcept : dummy_(){}; + + template + constexpr storage_t(Args&&... args) + : value_(constexpr_forward(args)...) {} + + ~storage_t() {} +}; + +template +union constexpr_storage_t { + unsigned char dummy_; + T value_; + + constexpr constexpr_storage_t(trivial_init_t) noexcept : dummy_(){}; + + template + constexpr constexpr_storage_t(Args&&... args) + : value_(constexpr_forward(args)...) {} + + ~constexpr_storage_t() = default; +}; + +template +struct optional_base { + bool init_; + storage_t storage_; + + constexpr optional_base() noexcept : init_(false), storage_(trivial_init){}; + + explicit constexpr optional_base(const T& v) : init_(true), storage_(v) {} + + explicit constexpr optional_base(T&& v) + : init_(true), storage_(constexpr_move(v)) {} + + template + explicit optional_base(in_place_t, Args&&... args) + : init_(true), storage_(constexpr_forward(args)...) {} + + template < + class U, + class... Args, + TR2_OPTIONAL_REQUIRES(std::is_constructible>)> + explicit optional_base( + in_place_t, + std::initializer_list il, + Args&&... args) + : init_(true), storage_(il, std::forward(args)...) {} + + ~optional_base() { + if (init_) + storage_.value_.T::~T(); + } +}; + +template +struct constexpr_optional_base { + bool init_; + constexpr_storage_t storage_; + + constexpr constexpr_optional_base() noexcept + : init_(false), storage_(trivial_init){}; + + explicit constexpr constexpr_optional_base(const T& v) + : init_(true), storage_(v) {} + + explicit constexpr constexpr_optional_base(T&& v) + : init_(true), storage_(constexpr_move(v)) {} + + template + explicit constexpr constexpr_optional_base(in_place_t, Args&&... args) + : init_(true), storage_(constexpr_forward(args)...) {} + + template < + class U, + class... Args, + TR2_OPTIONAL_REQUIRES(std::is_constructible>)> + OPTIONAL_CONSTEXPR_INIT_LIST explicit constexpr_optional_base( + in_place_t, + std::initializer_list il, + Args&&... args) + : init_(true), storage_(il, std::forward(args)...) {} + + ~constexpr_optional_base() = default; +}; + +template +using OptionalBase = typename std::conditional< + std::is_trivially_destructible::value, // if possible + constexpr_optional_base::type>, // use base with trivial destructor + optional_base::type>>::type; + +template +class optional : private OptionalBase { + static_assert( + !std::is_same::type, nullopt_t>::value, + "bad T"); + static_assert( + !std::is_same::type, in_place_t>::value, + "bad T"); + + constexpr bool initialized() const noexcept { + return OptionalBase::init_; + } + typename std::remove_const::type* dataptr() { + return std::addressof(OptionalBase::storage_.value_); + } + constexpr const T* dataptr() const { + return detail_::static_addressof(OptionalBase::storage_.value_); + } + +#if OPTIONAL_HAS_THIS_RVALUE_REFS == 1 + constexpr const T& contained_val() const& { + return OptionalBase::storage_.value_; + } +#if OPTIONAL_HAS_MOVE_ACCESSORS == 1 + OPTIONAL_MUTABLE_CONSTEXPR T&& contained_val() && { + return std::move(OptionalBase::storage_.value_); + } + OPTIONAL_MUTABLE_CONSTEXPR T& contained_val() & { + return OptionalBase::storage_.value_; + } +#else + T& contained_val() & { + return OptionalBase::storage_.value_; + } + T&& contained_val() && { + return std::move(OptionalBase::storage_.value_); + } +#endif +#else + constexpr const T& contained_val() const { + return OptionalBase::storage_.value_; + } + T& contained_val() { + return OptionalBase::storage_.value_; + } +#endif + + void clear() noexcept { + if (initialized()) + dataptr()->T::~T(); + OptionalBase::init_ = false; + } + + template + void initialize(Args&&... args) noexcept( + noexcept(T(std::forward(args)...))) { + assert(!OptionalBase::init_); + ::new (static_cast(dataptr())) T(std::forward(args)...); + OptionalBase::init_ = true; + } + + template + void initialize(std::initializer_list il, Args&&... args) noexcept( + noexcept(T(il, std::forward(args)...))) { + assert(!OptionalBase::init_); + ::new (static_cast(dataptr())) T(il, std::forward(args)...); + OptionalBase::init_ = true; + } + + public: + typedef T value_type; + + // 20.5.5.1, constructors + constexpr optional() noexcept : OptionalBase(){}; + constexpr optional(nullopt_t) noexcept : OptionalBase(){}; + + optional(const optional& rhs) : OptionalBase() { + if (rhs.initialized()) { + ::new (static_cast(dataptr())) T(*rhs); + OptionalBase::init_ = true; + } + } + + optional(optional&& rhs) noexcept( + std::is_nothrow_move_constructible::value) + : OptionalBase() { + if (rhs.initialized()) { + ::new (static_cast(dataptr())) T(std::move(*rhs)); + OptionalBase::init_ = true; + } + } + + constexpr optional(const T& v) : OptionalBase(v) {} + + constexpr optional(T&& v) : OptionalBase(constexpr_move(v)) {} + + template + explicit constexpr optional(in_place_t, Args&&... args) + : OptionalBase(in_place_t{}, constexpr_forward(args)...) {} + + template < + class U, + class... Args, + TR2_OPTIONAL_REQUIRES(std::is_constructible>)> + OPTIONAL_CONSTEXPR_INIT_LIST explicit optional( + in_place_t, + std::initializer_list il, + Args&&... args) + : OptionalBase(in_place_t{}, il, constexpr_forward(args)...) {} + + // 20.5.4.2, Destructor + ~optional() = default; + + // 20.5.4.3, assignment + optional& operator=(nullopt_t) noexcept { + clear(); + return *this; + } + + optional& operator=(const optional& rhs) { + if (initialized() == true && rhs.initialized() == false) + clear(); + else if (initialized() == false && rhs.initialized() == true) + initialize(*rhs); + else if (initialized() == true && rhs.initialized() == true) + contained_val() = *rhs; + return *this; + } + + optional& operator=(optional&& rhs) noexcept( + std::is_nothrow_move_assignable::value&& + std::is_nothrow_move_constructible::value) { + if (initialized() == true && rhs.initialized() == false) + clear(); + else if (initialized() == false && rhs.initialized() == true) + initialize(std::move(*rhs)); + else if (initialized() == true && rhs.initialized() == true) + contained_val() = std::move(*rhs); + return *this; + } + + template + auto operator=(U&& v) -> typename std::enable_if< + std::is_same::type, T>::value, + optional&>::type { + if (initialized()) { + contained_val() = std::forward(v); + } else { + initialize(std::forward(v)); + } + return *this; + } + + template + void emplace(Args&&... args) { + clear(); + initialize(std::forward(args)...); + } + + template + void emplace(std::initializer_list il, Args&&... args) { + clear(); + initialize(il, std::forward(args)...); + } + + // 20.5.4.4, Swap + void swap(optional& rhs) noexcept( + std::is_nothrow_move_constructible::value&& noexcept( + std::swap(std::declval(), std::declval()))) { + if (initialized() == true && rhs.initialized() == false) { + rhs.initialize(std::move(**this)); + clear(); + } else if (initialized() == false && rhs.initialized() == true) { + initialize(std::move(*rhs)); + rhs.clear(); + } else if (initialized() == true && rhs.initialized() == true) { + using std::swap; + swap(**this, *rhs); + } + } + + // 20.5.4.5, Observers + + explicit constexpr operator bool() const noexcept { + return initialized(); + } + constexpr bool has_value() const noexcept { + return initialized(); + } + + constexpr T const* operator->() const { + return TR2_OPTIONAL_ASSERTED_EXPRESSION(initialized(), dataptr()); + } + +#if OPTIONAL_HAS_MOVE_ACCESSORS == 1 + + OPTIONAL_MUTABLE_CONSTEXPR T* operator->() { + assert(initialized()); + return dataptr(); + } + + constexpr T const& operator*() const& { + return TR2_OPTIONAL_ASSERTED_EXPRESSION(initialized(), contained_val()); + } + + OPTIONAL_MUTABLE_CONSTEXPR T& operator*() & { + assert(initialized()); + return contained_val(); + } + + OPTIONAL_MUTABLE_CONSTEXPR T&& operator*() && { + assert(initialized()); + return constexpr_move(contained_val()); + } + + constexpr T const& value() const& { + return initialized() + ? contained_val() + : (throw bad_optional_access("bad optional access"), contained_val()); + } + + OPTIONAL_MUTABLE_CONSTEXPR T& value() & { + return initialized() + ? contained_val() + : (throw bad_optional_access("bad optional access"), contained_val()); + } + + OPTIONAL_MUTABLE_CONSTEXPR T&& value() && { + if (!initialized()) + throw bad_optional_access("bad optional access"); + return std::move(contained_val()); + } + +#else + + T* operator->() { + assert(initialized()); + return dataptr(); + } + + constexpr T const& operator*() const { + return TR2_OPTIONAL_ASSERTED_EXPRESSION(initialized(), contained_val()); + } + + T& operator*() { + assert(initialized()); + return contained_val(); + } + + constexpr T const& value() const { + return initialized() + ? contained_val() + : (throw bad_optional_access("bad optional access"), contained_val()); + } + + T& value() { + return initialized() + ? contained_val() + : (throw bad_optional_access("bad optional access"), contained_val()); + } + +#endif + +#if OPTIONAL_HAS_THIS_RVALUE_REFS == 1 + + template + constexpr T value_or(V&& v) const& { + return *this ? **this : detail_::convert(constexpr_forward(v)); + } + +#if OPTIONAL_HAS_MOVE_ACCESSORS == 1 + + template + OPTIONAL_MUTABLE_CONSTEXPR T value_or(V&& v) && { + return *this + ? constexpr_move(const_cast&>(*this).contained_val()) + : detail_::convert(constexpr_forward(v)); + } + +#else + + template + T value_or(V&& v) && { + return *this + ? constexpr_move(const_cast&>(*this).contained_val()) + : detail_::convert(constexpr_forward(v)); + } + +#endif + +#else + + template + constexpr T value_or(V&& v) const { + return *this ? **this : detail_::convert(constexpr_forward(v)); + } + +#endif + + // 20.6.3.6, modifiers + void reset() noexcept { + clear(); + } +}; + +template +class optional { + static_assert(!std::is_same::value, "bad T"); + static_assert(!std::is_same::value, "bad T"); + T* ref; + + public: + // 20.5.5.1, construction/destruction + constexpr optional() noexcept : ref(nullptr) {} + + constexpr optional(nullopt_t) noexcept : ref(nullptr) {} + + constexpr optional(T& v) noexcept : ref(detail_::static_addressof(v)) {} + + optional(T&&) = delete; + + constexpr optional(const optional& rhs) noexcept : ref(rhs.ref) {} + + explicit constexpr optional(in_place_t, T& v) noexcept + : ref(detail_::static_addressof(v)) {} + + explicit optional(in_place_t, T&&) = delete; + + ~optional() = default; + + // 20.5.5.2, mutation + optional& operator=(nullopt_t) noexcept { + ref = nullptr; + return *this; + } + + // optional& operator=(const optional& rhs) noexcept { + // ref = rhs.ref; + // return *this; + // } + + // optional& operator=(optional&& rhs) noexcept { + // ref = rhs.ref; + // return *this; + // } + + template + auto operator=(U&& rhs) noexcept -> typename std::enable_if< + std::is_same::type, optional>::value, + optional&>::type { + ref = rhs.ref; + return *this; + } + + template + auto operator=(U&& rhs) noexcept -> typename std::enable_if< + !std::is_same::type, optional>::value, + optional&>::type = delete; + + void emplace(T& v) noexcept { + ref = detail_::static_addressof(v); + } + + void emplace(T&&) = delete; + + void swap(optional& rhs) noexcept { + std::swap(ref, rhs.ref); + } + + // 20.5.5.3, observers + constexpr T* operator->() const { + return TR2_OPTIONAL_ASSERTED_EXPRESSION(ref, ref); + } + + constexpr T& operator*() const { + return TR2_OPTIONAL_ASSERTED_EXPRESSION(ref, *ref); + } + + constexpr T& value() const { + return ref ? *ref + : (throw bad_optional_access("bad optional access"), *ref); + } + + explicit constexpr operator bool() const noexcept { + return ref != nullptr; + } + + constexpr bool has_value() const noexcept { + return ref != nullptr; + } + + template + constexpr typename std::decay::type value_or(V&& v) const { + return *this ? **this + : detail_::convert::type>( + constexpr_forward(v)); + } + + // x.x.x.x, modifiers + void reset() noexcept { + ref = nullptr; + } +}; + +template +class optional { + static_assert(sizeof(T) == 0, "optional rvalue references disallowed"); +}; + +// 20.5.8, Relational operators +template +constexpr bool operator==(const optional& x, const optional& y) { + return bool(x) != bool(y) ? false : bool(x) == false ? true : *x == *y; +} + +template +constexpr bool operator!=(const optional& x, const optional& y) { + return !(x == y); +} + +template +constexpr bool operator<(const optional& x, const optional& y) { + return (!y) ? false : (!x) ? true : *x < *y; +} + +template +constexpr bool operator>(const optional& x, const optional& y) { + return (y < x); +} + +template +constexpr bool operator<=(const optional& x, const optional& y) { + return !(y < x); +} + +template +constexpr bool operator>=(const optional& x, const optional& y) { + return !(x < y); +} + +// 20.5.9, Comparison with nullopt +template +constexpr bool operator==(const optional& x, nullopt_t) noexcept { + return (!x); +} + +template +constexpr bool operator==(nullopt_t, const optional& x) noexcept { + return (!x); +} + +template +constexpr bool operator!=(const optional& x, nullopt_t) noexcept { + return bool(x); +} + +template +constexpr bool operator!=(nullopt_t, const optional& x) noexcept { + return bool(x); +} + +template +constexpr bool operator<(const optional&, nullopt_t) noexcept { + return false; +} + +template +constexpr bool operator<(nullopt_t, const optional& x) noexcept { + return bool(x); +} + +template +constexpr bool operator<=(const optional& x, nullopt_t) noexcept { + return (!x); +} + +template +constexpr bool operator<=(nullopt_t, const optional&) noexcept { + return true; +} + +template +constexpr bool operator>(const optional& x, nullopt_t) noexcept { + return bool(x); +} + +template +constexpr bool operator>(nullopt_t, const optional&) noexcept { + return false; +} + +template +constexpr bool operator>=(const optional&, nullopt_t) noexcept { + return true; +} + +template +constexpr bool operator>=(nullopt_t, const optional& x) noexcept { + return (!x); +} + +// 20.5.10, Comparison with T +template +constexpr bool operator==(const optional& x, const T& v) { + return bool(x) ? *x == v : false; +} + +template +constexpr bool operator==(const T& v, const optional& x) { + return bool(x) ? v == *x : false; +} + +template +constexpr bool operator!=(const optional& x, const T& v) { + return bool(x) ? *x != v : true; +} + +template +constexpr bool operator!=(const T& v, const optional& x) { + return bool(x) ? v != *x : true; +} + +template +constexpr bool operator<(const optional& x, const T& v) { + return bool(x) ? *x < v : true; +} + +template +constexpr bool operator>(const T& v, const optional& x) { + return bool(x) ? v > *x : true; +} + +template +constexpr bool operator>(const optional& x, const T& v) { + return bool(x) ? *x > v : false; +} + +template +constexpr bool operator<(const T& v, const optional& x) { + return bool(x) ? v < *x : false; +} + +template +constexpr bool operator>=(const optional& x, const T& v) { + return bool(x) ? *x >= v : false; +} + +template +constexpr bool operator<=(const T& v, const optional& x) { + return bool(x) ? v <= *x : false; +} + +template +constexpr bool operator<=(const optional& x, const T& v) { + return bool(x) ? *x <= v : true; +} + +template +constexpr bool operator>=(const T& v, const optional& x) { + return bool(x) ? v >= *x : true; +} + +// Comparison of optional with T +template +constexpr bool operator==(const optional& x, const T& v) { + return bool(x) ? *x == v : false; +} + +template +constexpr bool operator==(const T& v, const optional& x) { + return bool(x) ? v == *x : false; +} + +template +constexpr bool operator!=(const optional& x, const T& v) { + return bool(x) ? *x != v : true; +} + +template +constexpr bool operator!=(const T& v, const optional& x) { + return bool(x) ? v != *x : true; +} + +template +constexpr bool operator<(const optional& x, const T& v) { + return bool(x) ? *x < v : true; +} + +template +constexpr bool operator>(const T& v, const optional& x) { + return bool(x) ? v > *x : true; +} + +template +constexpr bool operator>(const optional& x, const T& v) { + return bool(x) ? *x > v : false; +} + +template +constexpr bool operator<(const T& v, const optional& x) { + return bool(x) ? v < *x : false; +} + +template +constexpr bool operator>=(const optional& x, const T& v) { + return bool(x) ? *x >= v : false; +} + +template +constexpr bool operator<=(const T& v, const optional& x) { + return bool(x) ? v <= *x : false; +} + +template +constexpr bool operator<=(const optional& x, const T& v) { + return bool(x) ? *x <= v : true; +} + +template +constexpr bool operator>=(const T& v, const optional& x) { + return bool(x) ? v >= *x : true; +} + +// Comparison of optional with T +template +constexpr bool operator==(const optional& x, const T& v) { + return bool(x) ? *x == v : false; +} + +template +constexpr bool operator==(const T& v, const optional& x) { + return bool(x) ? v == *x : false; +} + +template +constexpr bool operator!=(const optional& x, const T& v) { + return bool(x) ? *x != v : true; +} + +template +constexpr bool operator!=(const T& v, const optional& x) { + return bool(x) ? v != *x : true; +} + +template +constexpr bool operator<(const optional& x, const T& v) { + return bool(x) ? *x < v : true; +} + +template +constexpr bool operator>(const T& v, const optional& x) { + return bool(x) ? v > *x : true; +} + +template +constexpr bool operator>(const optional& x, const T& v) { + return bool(x) ? *x > v : false; +} + +template +constexpr bool operator<(const T& v, const optional& x) { + return bool(x) ? v < *x : false; +} + +template +constexpr bool operator>=(const optional& x, const T& v) { + return bool(x) ? *x >= v : false; +} + +template +constexpr bool operator<=(const T& v, const optional& x) { + return bool(x) ? v <= *x : false; +} + +template +constexpr bool operator<=(const optional& x, const T& v) { + return bool(x) ? *x <= v : true; +} + +template +constexpr bool operator>=(const T& v, const optional& x) { + return bool(x) ? v >= *x : true; +} + +// 20.5.12, Specialized algorithms +template +void swap(optional& x, optional& y) noexcept(noexcept(x.swap(y))) { + x.swap(y); +} + +template +constexpr optional::type> make_optional(T&& v) { + return optional::type>(constexpr_forward(v)); +} + +template +constexpr optional make_optional(std::reference_wrapper v) { + return optional(v.get()); +} + +} // namespace tensorpipe + +namespace std { +template +struct hash> { + typedef typename hash::result_type result_type; + typedef tensorpipe::optional argument_type; + + constexpr result_type operator()(argument_type const& arg) const { + return arg ? std::hash{}(*arg) : result_type{}; + } +}; + +template +struct hash> { + typedef typename hash::result_type result_type; + typedef tensorpipe::optional argument_type; + + constexpr result_type operator()(argument_type const& arg) const { + return arg ? std::hash{}(*arg) : result_type{}; + } +}; +} // namespace std + +#undef TR2_OPTIONAL_REQUIRES +#undef TR2_OPTIONAL_ASSERTED_EXPRESSION diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/core/context.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/core/context.h new file mode 100644 index 0000000000000000000000000000000000000000..16b795b50abc1e20254c4ce5ad2eb0c1da940078 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/core/context.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include +#include + +#include + +#include + +namespace tensorpipe { + +class ContextImpl; +class Listener; +class Pipe; + +class ContextOptions { + public: + // The name should be a semantically meaningful description of this context. + // It will only be used for logging and debugging purposes, to identify the + // endpoints of a pipe. + ContextOptions&& name(std::string name) && { + name_ = std::move(name); + return std::move(*this); + } + + private: + std::string name_; + + friend ContextImpl; +}; + +class PipeOptions { + public: + // The name should be a semantically meaningful description of the context + // that the pipe is connecting to. It will only be used for logging and + // debugging purposes, to identify the endpoints of a pipe. + PipeOptions&& remoteName(std::string remoteName) && { + remoteName_ = std::move(remoteName); + return std::move(*this); + } + + private: + std::string remoteName_; + + friend ContextImpl; +}; + +class Context final { + public: + explicit Context(ContextOptions opts = ContextOptions()); + + void registerTransport( + int64_t priority, + std::string transport, + std::shared_ptr context); + + void registerChannel( + int64_t priority, + std::string channel, + std::shared_ptr context); + + std::shared_ptr listen(const std::vector& urls); + + std::shared_ptr connect( + const std::string& url, + PipeOptions opts = PipeOptions()); + + // Put the context in a terminal state, in turn closing all of its pipes and + // listeners, and release its resources. This may be done asynchronously, in + // background. + void close(); + + // Wait for all resources to be released and all background activity to stop. + void join(); + + ~Context(); + + private: + // The implementation is managed by a shared_ptr because each child object + // will also hold a shared_ptr to it. However, its lifetime is tied to the one + // of this public object since when the latter is destroyed the implementation + // is closed and joined. + const std::shared_ptr impl_; +}; + +} // namespace tensorpipe diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/core/error.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/core/error.h new file mode 100644 index 0000000000000000000000000000000000000000..54842e908ab0db832048e59ae4c9886a66eef469 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/core/error.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { + +class LogicError final : public BaseError { + public: + explicit LogicError(std::string reason) : reason_(std::move(reason)) {} + + std::string what() const override; + + private: + const std::string reason_; +}; + +class ContextClosedError final : public BaseError { + public: + explicit ContextClosedError() {} + + std::string what() const override; +}; + +class ListenerClosedError final : public BaseError { + public: + explicit ListenerClosedError() {} + + std::string what() const override; +}; + +class PipeClosedError final : public BaseError { + public: + explicit PipeClosedError() {} + + std::string what() const override; +}; + +} // namespace tensorpipe diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/core/listener.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/core/listener.h new file mode 100644 index 0000000000000000000000000000000000000000..d11e4af333e059bfa05e3a4968bd54a608833320 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/core/listener.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace tensorpipe { + +class ContextImpl; +class ListenerImpl; +class Pipe; + +// The listener. +// +// Listeners are used to produce pipes. Depending on the type of the +// context, listeners may use a variety of addresses to listen on. For +// example, for TCP/IP sockets they listen on an IPv4 or IPv6 address, +// for Unix domain sockets they listen on a path, etcetera. +// +// A pipe can only be accepted from this listener after it has been +// fully established. This means that both its connection and all its +// side channels have been established. +// +class Listener final { + // Use the passkey idiom to allow make_shared to call what should be a private + // constructor. See https://abseil.io/tips/134 for more information. + struct ConstructorToken {}; + + public: + Listener( + ConstructorToken token, + std::shared_ptr context, + std::string id, + const std::vector& urls); + + // + // Entry points for user code + // + + using accept_callback_fn = + std::function)>; + + void accept(accept_callback_fn fn); + + // Returns map with the materialized address of listeners by transport. + // + // If you don't bind a transport listener to a specific port or address, it + // may generate its address automatically. Then, in order to connect to the + // listener, the user must use a separate mechanism to communicate the + // materialized address to whoever wants to connect. + // + const std::map& addresses() const; + + // Returns materialized address for specific transport. + // + // See `addresses()` for more information. + // + const std::string& address(const std::string& transport) const; + + // Returns URL with materialized address for specific transport. + // + // See `addresses()` for more information. + // + std::string url(const std::string& transport) const; + + // Put the listener in a terminal state, aborting its pending operations and + // rejecting future ones, and release its resrouces. This may be carried out + // asynchronously, in background. Since the pipes may occasionally use the + // listener to open new connections, closing a listener may trigger errors + // in the pipes. + void close(); + + ~Listener(); + + private: + // Using a shared_ptr allows us to detach the lifetime of the implementation + // from the public object's one and perform the destruction asynchronously. + const std::shared_ptr impl_; + + // Allow context to access constructor token. + friend ContextImpl; +}; + +} // namespace tensorpipe diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/core/message.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/core/message.h new file mode 100644 index 0000000000000000000000000000000000000000..364962ebcae3ed521978f438d11d00459b49f8d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/core/message.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include + +#include +#include + +namespace tensorpipe { + +// Messages consist of a primary buffer and zero or more separate +// buffers. The primary buffer is always a host-side memory region that +// contains a serialized version of the message we're dealing with. This +// serialized message, in turn, may have references to the separate +// buffers that accompany the primary buffer. These separate buffers may +// point to any type of memory, host-side or device-side. +// +class Message final { + public: + std::string metadata; + + struct Payload { + void* data{nullptr}; + size_t length{0}; + + // Users may include arbitrary metadata in the following fields. + // This may contain allocation hints for the receiver, for example. + std::string metadata; + }; + + // Holds the payloads that are transferred over the primary connection. + std::vector payloads; + + struct Tensor { + tensorpipe::Buffer buffer; + size_t length{0}; + + // Users may optionally specify the target device, on which the receiver + // should allocate memory for this tensor. If left unset, the receiver will + // choose one at their convenience. + optional targetDevice; + + // Users may include arbitrary metadata in the following field. + // This may contain allocation hints for the receiver, for example. + std::string metadata; + }; + + // Holds the tensors that are offered to the side channels. + std::vector tensors; +}; + +// Descriptors consist of metadata required by the receiver to allocate memory +// for an incoming message. +class Descriptor final { + public: + std::string metadata; + + struct Payload { + size_t length{0}; + std::string metadata; + }; + std::vector payloads; + + struct Tensor { + size_t length{0}; + + // This is the sender-side device from which this tensor is being sent. + Device sourceDevice; + + // The sender may optionally specify a target device, in which case the + // receiver must allocate memory for this tensor on the specified device. + optional targetDevice; + + std::string metadata; + }; + std::vector tensors; +}; + +// Allocations consist of actual memory allocations provided by the receiver for +// an incoming message. They must match the length and target devices specified +// in the corresponding Descriptor. +class Allocation final { + public: + struct Payload { + void* data{nullptr}; + }; + std::vector payloads; + + struct Tensor { + tensorpipe::Buffer buffer; + }; + std::vector tensors; +}; + +} // namespace tensorpipe diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/core/pipe.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/core/pipe.h new file mode 100644 index 0000000000000000000000000000000000000000..be7ca2bfdf6c41a2b4c97854066cb39b73c94de5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/core/pipe.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include + +#include +#include +#include + +namespace tensorpipe { + +class ContextImpl; +class ListenerImpl; +class PipeImpl; + +// The pipe. +// +// Pipes represent a set of connections between a pair of processes. +// Unlike POSIX pipes, they are message oriented instead of byte +// oriented. Messages that are sent through the pipe may use whatever +// channels are at their disposal to make it happen. If the pair of +// processes happen to be colocated on the same machine, they may +// leverage a region of shared memory to communicate the primary +// buffer of a message. Secondary buffers may use shared memory as +// well, if they're located in CPU memory, or use a CUDA device to +// device copy if they're located in NVIDIA GPU memory. If the pair is +// located across the world, they may simply use a set of TCP +// connections to communicate. +// +class Pipe final { + // Use the passkey idiom to allow make_shared to call what should be a private + // constructor. See https://abseil.io/tips/134 for more information. + struct ConstructorToken {}; + + public: + // + // Initialization + // + + Pipe( + ConstructorToken token, + std::shared_ptr context, + std::string id, + std::string remoteName, + const std::string& url); + + Pipe(ConstructorToken token, std::shared_ptr impl); + + // + // Entry points for user code + // + + using read_descriptor_callback_fn = + std::function; + + void readDescriptor(read_descriptor_callback_fn fn); + + using read_callback_fn = std::function; + + void read(Allocation allocation, read_callback_fn fn); + + using write_callback_fn = std::function; + + void write(Message message, write_callback_fn fn); + + // Retrieve the user-defined name that was given to the constructor of the + // context on the remote side, if any (if not, this will be the empty string). + // This is intended to help in logging and debugging only. + const std::string& getRemoteName(); + + // Put the pipe in a terminal state, aborting its pending operations and + // rejecting future ones, and release its resrouces. This may be carried out + // asynchronously, in background. + void close(); + + ~Pipe(); + + private: + // Using a shared_ptr allows us to detach the lifetime of the implementation + // from the public object's one and perform the destruction asynchronously. + const std::shared_ptr impl_; + + // Allow context to access constructor token. + friend ContextImpl; + // Allow listener to access constructor token. + friend ListenerImpl; +}; + +} // namespace tensorpipe diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/context.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/context.h new file mode 100644 index 0000000000000000000000000000000000000000..9344a2e480b24bd77b8c108b8ef1adb1404af616 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/context.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include + +namespace tensorpipe { +namespace transport { + +class Connection; +class Listener; + +class Context { + public: + virtual std::shared_ptr connect(std::string addr) = 0; + + virtual std::shared_ptr listen(std::string addr) = 0; + + // Return whether the context is able to operate correctly. + // + // Some transport types may be unable to perform as intended under + // some circumstances (e.g., specialized hardware unavailable, lack + // of permissions). They can report it through this method in order + // for the core context to avoid registering them in the first place. + // + virtual bool isViable() const = 0; + + // Return string to describe the domain for this context. + // + // Two processes with a context of the same type can connect to each + // other if one side's domain descriptor is "accepted" by the other + // one, using the canCommunicateWithRemote method below. That method + // must be symmetric, and unless overridden defaults to string + // comparison. + // + // For example, for a transport that leverages TCP/IP, this may be + // as simple as the address family (assuming we can route between + // any two processes). For a transport that leverages shared memory, + // this descriptor must uniquely identify the machine, such that + // only co-located processes generate the same domain descriptor. + // + virtual const std::string& domainDescriptor() const = 0; + + // Compare local and remote domain descriptor for compatibility. + // + // Determine whether a connection can be opened between this context + // and a remote one that has the given domain descriptor. This + // function needs to be symmetric: if we called this method on the + // remote context with the local descriptor we should get the same + // answer. Unless overridden it defaults to string comparison. + // + virtual bool canCommunicateWithRemote( + const std::string& remoteDomainDescriptor) const { + return domainDescriptor() == remoteDomainDescriptor; + } + + // Tell the context what its identifier is. + // + // This is only supposed to be called from the high-level context or from + // channel contexts. It will only used for logging and debugging purposes. + virtual void setId(std::string id) = 0; + + virtual void close() = 0; + + virtual void join() = 0; + + virtual ~Context() = default; +}; + +} // namespace transport +} // namespace tensorpipe diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/error.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/error.h new file mode 100644 index 0000000000000000000000000000000000000000..6ba124bbeca26608fb6983cb1c7bde7cee79d202 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/error.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { +namespace transport { + +class ContextClosedError final : public BaseError { + public: + ContextClosedError() {} + + std::string what() const override; +}; + +class ListenerClosedError final : public BaseError { + public: + ListenerClosedError() {} + + std::string what() const override; +}; + +class ConnectionClosedError final : public BaseError { + public: + ConnectionClosedError() {} + + std::string what() const override; +}; + +class ContextNotViableError final : public BaseError { + public: + ContextNotViableError() {} + + std::string what() const override; +}; + +} // namespace transport +} // namespace tensorpipe diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/error.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/error.h new file mode 100644 index 0000000000000000000000000000000000000000..8b690ff6cbd127ca22011bde8672e2392f3ccad4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/error.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { +namespace transport { +namespace ibv { + +class IbvError final : public BaseError { + public: + explicit IbvError(std::string error) : error_(error) {} + + std::string what() const override; + + private: + std::string error_; +}; + +class GetaddrinfoError final : public BaseError { + public: + explicit GetaddrinfoError(int error) : error_(error) {} + + std::string what() const override; + + private: + int error_; +}; + +class NoAddrFoundError final : public BaseError { + public: + NoAddrFoundError() {} + + std::string what() const override; +}; + +} // namespace ibv +} // namespace transport +} // namespace tensorpipe diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/factory.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/factory.h new file mode 100644 index 0000000000000000000000000000000000000000..17372df56aabb476d48262ad6ad29909eb6e51c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/factory.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { +namespace transport { +namespace ibv { + +std::shared_ptr create(); + +} // namespace ibv +} // namespace transport +} // namespace tensorpipe diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/utility.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/utility.h new file mode 100644 index 0000000000000000000000000000000000000000..3cffa2f42b3f56ebc38677f6d781a38b81cc9704 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/utility.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include + +#include + +namespace tensorpipe { +namespace transport { +namespace ibv { + +std::tuple lookupAddrForIface(std::string iface); + +std::tuple lookupAddrForHostname(); + +} // namespace ibv +} // namespace transport +} // namespace tensorpipe diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/shm/factory.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/shm/factory.h new file mode 100644 index 0000000000000000000000000000000000000000..ae5e6db0a18adef56f21d747dd2fa28e0f6d47b6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/shm/factory.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { +namespace transport { +namespace shm { + +std::shared_ptr create(); + +} // namespace shm +} // namespace transport +} // namespace tensorpipe diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/error.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/error.h new file mode 100644 index 0000000000000000000000000000000000000000..19333e2e8647ad9758e92094c63f25ba8ee60177 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/error.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { +namespace transport { +namespace uv { + +class UVError final : public BaseError { + public: + explicit UVError(int error) : error_(error) {} + + std::string what() const override; + + private: + int error_; +}; + +class NoAddrFoundError final : public BaseError { + public: + NoAddrFoundError() {} + + std::string what() const override; +}; + +} // namespace uv +} // namespace transport +} // namespace tensorpipe diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/factory.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/factory.h new file mode 100644 index 0000000000000000000000000000000000000000..364a5b61015cc1a994078b63461ebcd71a874e21 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/factory.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { +namespace transport { +namespace uv { + +std::shared_ptr create(); + +} // namespace uv +} // namespace transport +} // namespace tensorpipe diff --git a/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/utility.h b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/utility.h new file mode 100644 index 0000000000000000000000000000000000000000..0a4afba0c995086738446a954dde27c29ea8031d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/utility.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include + +#include + +#include +#include + +namespace tensorpipe { +namespace transport { +namespace uv { + +std::tuple lookupAddrForIface(std::string iface); + +std::tuple lookupAddrForHostname(); + +// Try to replicate the same logic used by NCCL to find a node's own address. +// Roughly, it returns the "first" usable address it can find, and prioritizes +// the interfaces with an `ib` prefix and de-prioritizes those with a `docker` +// or `lo` prefix. It can optionally only return only IPv4 or IPv4 addresses. +std::tuple lookupAddrLikeNccl( + optional familyFilter = nullopt); + +} // namespace uv +} // namespace transport +} // namespace tensorpipe diff --git a/venv/lib/python3.10/site-packages/torch/sparse/__init__.py b/venv/lib/python3.10/site-packages/torch/sparse/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cfd2ece75b09e9a670328808dd00b05eec5d6ff4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/sparse/__init__.py @@ -0,0 +1,604 @@ +# The Tensor classes are added to this module by python_tensor.cpp +from typing import Optional, Tuple, List, Union, Any + +import torch +from torch._C import _add_docstr, _sparse # type: ignore[attr-defined] +from torch import Tensor + +# Semi structured sparsity support +from .semi_structured import ( + SparseSemiStructuredTensor, + SparseSemiStructuredTensorCUSPARSELT, + SparseSemiStructuredTensorCUTLASS, + to_sparse_semi_structured +) + +# A workaround to support both TorchScript and MyPy: +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from torch.types import _dtype as DType + DimOrDims = Optional[Union[int, Tuple[int], List[int]]] +else: + # The JIT doesn't understand Union, nor torch.dtype here + DType = int + DimOrDims = Optional[Tuple[int]] + + +__all__ = [ + 'addmm', + 'check_sparse_tensor_invariants', + 'mm', + 'sum', + 'softmax', + 'log_softmax', + 'SparseSemiStructuredTensor', + 'SparseSemiStructuredTensorCUTLASS', + 'SparseSemiStructuredTensorCUSPARSELT', + 'to_sparse_semi_structured', + 'as_sparse_gradcheck', +] + +addmm = _add_docstr(_sparse._sparse_addmm, r""" +sparse.addmm(mat, mat1, mat2, *, beta=1., alpha=1.) -> Tensor + +This function does exact same thing as :func:`torch.addmm` in the forward, +except that it supports backward for sparse COO matrix :attr:`mat1`. +When :attr:`mat1` is a COO tensor it must have `sparse_dim = 2`. +When inputs are COO tensors, this function also supports backward for both inputs. + +Supports both CSR and COO storage formats. + +.. note:: + This function doesn't support computing derivaties with respect to CSR matrices. + +Args: + mat (Tensor): a dense matrix to be added + mat1 (Tensor): a sparse matrix to be multiplied + mat2 (Tensor): a dense matrix to be multiplied + beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) +""") + + +mm = _add_docstr(_sparse._sparse_mm, r""" + Performs a matrix multiplication of the sparse matrix :attr:`mat1` + and the (sparse or strided) matrix :attr:`mat2`. Similar to :func:`torch.mm`, if :attr:`mat1` is a + :math:`(n \times m)` tensor, :attr:`mat2` is a :math:`(m \times p)` tensor, out will be a + :math:`(n \times p)` tensor. + When :attr:`mat1` is a COO tensor it must have `sparse_dim = 2`. + When inputs are COO tensors, this function also supports backward for both inputs. + + Supports both CSR and COO storage formats. + +.. note:: + This function doesn't support computing derivaties with respect to CSR matrices. + + This function also additionally accepts an optional :attr:`reduce` argument that allows + specification of an optional reduction operation, mathematically performs the following operation: + +.. math:: + + z_{ij} = \bigoplus_{k = 0}^{K - 1} x_{ik} y_{kj} + +where :math:`\bigoplus` defines the reduce operator. :attr:`reduce` is implemented only for +CSR storage format on CPU device. + +Args: + mat1 (Tensor): the first sparse matrix to be multiplied + mat2 (Tensor): the second matrix to be multiplied, which could be sparse or dense + reduce (str, optional): the reduction operation to apply for non-unique indices + (:obj:`"sum"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`). Default :obj:`"sum"`. + +Shape: + The format of the output tensor of this function follows: + - sparse x sparse -> sparse + - sparse x dense -> dense + +Example:: + + >>> a = torch.tensor([[1., 0, 2], [0, 3, 0]]).to_sparse().requires_grad_() + >>> a + tensor(indices=tensor([[0, 0, 1], + [0, 2, 1]]), + values=tensor([1., 2., 3.]), + size=(2, 3), nnz=3, layout=torch.sparse_coo, requires_grad=True) + >>> b = torch.tensor([[0, 1.], [2, 0], [0, 0]], requires_grad=True) + >>> b + tensor([[0., 1.], + [2., 0.], + [0., 0.]], requires_grad=True) + >>> y = torch.sparse.mm(a, b) + >>> y + tensor([[0., 1.], + [6., 0.]], grad_fn=) + >>> y.sum().backward() + >>> a.grad + tensor(indices=tensor([[0, 0, 1], + [0, 2, 1]]), + values=tensor([1., 0., 2.]), + size=(2, 3), nnz=3, layout=torch.sparse_coo) + >>> c = a.detach().to_sparse_csr() + >>> c + tensor(crow_indices=tensor([0, 2, 3]), + col_indices=tensor([0, 2, 1]), + values=tensor([1., 2., 3.]), size=(2, 3), nnz=3, + layout=torch.sparse_csr) + >>> y1 = torch.sparse.mm(c, b, 'sum') + >>> y1 + tensor([[0., 1.], + [6., 0.]], grad_fn=) + >>> y2 = torch.sparse.mm(c, b, 'max') + >>> y2 + tensor([[0., 1.], + [6., 0.]], grad_fn=) +""") + + +sampled_addmm = _add_docstr(_sparse.sparse_sampled_addmm, r""" +sparse.sampled_addmm(input, mat1, mat2, *, beta=1., alpha=1., out=None) -> Tensor + +Performs a matrix multiplication of the dense matrices :attr:`mat1` and :attr:`mat2` at the locations +specified by the sparsity pattern of :attr:`input`. The matrix :attr:`input` is added to the final result. + +Mathematically this performs the following operation: + +.. math:: + + \text{out} = \alpha\ (\text{mat1} \mathbin{@} \text{mat2})*\text{spy}(\text{input}) + \beta\ \text{input} + +where :math:`\text{spy}(\text{input})` is the sparsity pattern matrix of :attr:`input`, :attr:`alpha` +and :attr:`beta` are the scaling factors. +:math:`\text{spy}(\text{input})` has value 1 at the positions where :attr:`input` has non-zero values, and 0 elsewhere. + +.. note:: + :attr:`input` must be a sparse CSR tensor. :attr:`mat1` and :attr:`mat2` must be dense tensors. + +Args: + input (Tensor): a sparse CSR matrix of shape `(m, n)` to be added and used to compute + the sampled matrix multiplication + mat1 (Tensor): a dense matrix of shape `(m, k)` to be multiplied + mat2 (Tensor): a dense matrix of shape `(k, n)` to be multiplied + +Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Examples:: + + >>> input = torch.eye(3, device='cuda').to_sparse_csr() + >>> mat1 = torch.randn(3, 5, device='cuda') + >>> mat2 = torch.randn(5, 3, device='cuda') + >>> torch.sparse.sampled_addmm(input, mat1, mat2) + tensor(crow_indices=tensor([0, 1, 2, 3]), + col_indices=tensor([0, 1, 2]), + values=tensor([ 0.2847, -0.7805, -0.1900]), device='cuda:0', + size=(3, 3), nnz=3, layout=torch.sparse_csr) + >>> torch.sparse.sampled_addmm(input, mat1, mat2).to_dense() + tensor([[ 0.2847, 0.0000, 0.0000], + [ 0.0000, -0.7805, 0.0000], + [ 0.0000, 0.0000, -0.1900]], device='cuda:0') + >>> torch.sparse.sampled_addmm(input, mat1, mat2, beta=0.5, alpha=0.5) + tensor(crow_indices=tensor([0, 1, 2, 3]), + col_indices=tensor([0, 1, 2]), + values=tensor([ 0.1423, -0.3903, -0.0950]), device='cuda:0', + size=(3, 3), nnz=3, layout=torch.sparse_csr) +""") + +def sum(input: Tensor, dim: DimOrDims = None, + dtype: Optional[DType] = None) -> Tensor: + r"""Return the sum of each row of the given sparse tensor. + + Returns the sum of each row of the sparse tensor :attr:`input` in the given + dimensions :attr:`dim`. If :attr:`dim` is a list of dimensions, + reduce over all of them. When sum over all ``sparse_dim``, this method + returns a dense tensor instead of a sparse tensor. + + All summed :attr:`dim` are squeezed (see :func:`torch.squeeze`), resulting an output + tensor having :attr:`dim` fewer dimensions than :attr:`input`. + + During backward, only gradients at ``nnz`` locations of :attr:`input` + will propagate back. Note that the gradients of :attr:`input` is coalesced. + + Args: + input (Tensor): the input sparse tensor + dim (int or tuple of ints): a dimension or a list of dimensions to reduce. Default: reduce + over all dims. + dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. + Default: dtype of :attr:`input`. + + Example:: + + >>> nnz = 3 + >>> dims = [5, 5, 2, 3] + >>> I = torch.cat([torch.randint(0, dims[0], size=(nnz,)), + torch.randint(0, dims[1], size=(nnz,))], 0).reshape(2, nnz) + >>> V = torch.randn(nnz, dims[2], dims[3]) + >>> size = torch.Size(dims) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> S = torch.sparse_coo_tensor(I, V, size) + >>> S + tensor(indices=tensor([[2, 0, 3], + [2, 4, 1]]), + values=tensor([[[-0.6438, -1.6467, 1.4004], + [ 0.3411, 0.0918, -0.2312]], + + [[ 0.5348, 0.0634, -2.0494], + [-0.7125, -1.0646, 2.1844]], + + [[ 0.1276, 0.1874, -0.6334], + [-1.9682, -0.5340, 0.7483]]]), + size=(5, 5, 2, 3), nnz=3, layout=torch.sparse_coo) + + # when sum over only part of sparse_dims, return a sparse tensor + >>> torch.sparse.sum(S, [1, 3]) + tensor(indices=tensor([[0, 2, 3]]), + values=tensor([[-1.4512, 0.4073], + [-0.8901, 0.2017], + [-0.3183, -1.7539]]), + size=(5, 2), nnz=3, layout=torch.sparse_coo) + + # when sum over all sparse dim, return a dense tensor + # with summed dims squeezed + >>> torch.sparse.sum(S, [0, 1, 3]) + tensor([-2.6596, -1.1450]) + """ + if dtype is None: + if dim is not None: + return torch._sparse_sum(input, dim) + else: + return torch._sparse_sum(input) + else: + if dim is not None: + return torch._sparse_sum(input, dim, dtype=dtype) + else: + return torch._sparse_sum(input, dtype=dtype) + + +softmax = _add_docstr(_sparse._sparse_softmax, r""" +sparse.softmax(input, dim, *, dtype=None) -> Tensor + +Applies a softmax function. + +Softmax is defined as: + +:math:`\text{Softmax}(x_{i}) = \frac{exp(x_i)}{\sum_j exp(x_j)}` + +where :math:`i, j` run over sparse tensor indices and unspecified +entries are ignores. This is equivalent to defining unspecified +entries as negative infinity so that :math:`exp(x_k) = 0` when the +entry with index :math:`k` has not specified. + +It is applied to all slices along `dim`, and will re-scale them so +that the elements lie in the range `[0, 1]` and sum to 1. + +Args: + input (Tensor): input + dim (int): A dimension along which softmax will be computed. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. This is useful for preventing data type + overflows. Default: None +""") + + +log_softmax = _add_docstr(_sparse._sparse_log_softmax, r""" +sparse.log_softmax(input, dim, *, dtype=None) -> Tensor + +Applies a softmax function followed by logarithm. + +See :class:`~torch.sparse.softmax` for more details. + +Args: + input (Tensor): input + dim (int): A dimension along which softmax will be computed. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. This is useful for preventing data type + overflows. Default: None +""") + + +spdiags = _add_docstr( + _sparse._spdiags, + r""" +sparse.spdiags(diagonals, offsets, shape, layout=None) -> Tensor + +Creates a sparse 2D tensor by placing the values from rows of +:attr:`diagonals` along specified diagonals of the output + +The :attr:`offsets` tensor controls which diagonals are set. + +- If :attr:`offsets[i]` = 0, it is the main diagonal +- If :attr:`offsets[i]` < 0, it is below the main diagonal +- If :attr:`offsets[i]` > 0, it is above the main diagonal + +The number of rows in :attr:`diagonals` must match the length of :attr:`offsets`, +and an offset may not be repeated. + +Args: + diagonals (Tensor): Matrix storing diagonals row-wise + offsets (Tensor): The diagonals to be set, stored as a vector + shape (2-tuple of ints): The desired shape of the result +Keyword args: + layout (:class:`torch.layout`, optional): The desired layout of the + returned tensor. ``torch.sparse_coo``, ``torch.sparse_csc`` and ``torch.sparse_csr`` + are supported. Default: ``torch.sparse_coo`` + +Examples: + +Set the main and first two lower diagonals of a matrix:: + + >>> diags = torch.arange(9).reshape(3, 3) + >>> diags + tensor([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> s = torch.sparse.spdiags(diags, torch.tensor([0, -1, -2]), (3, 3)) + >>> s + tensor(indices=tensor([[0, 1, 2, 1, 2, 2], + [0, 1, 2, 0, 1, 0]]), + values=tensor([0, 1, 2, 3, 4, 6]), + size=(3, 3), nnz=6, layout=torch.sparse_coo) + >>> s.to_dense() + tensor([[0, 0, 0], + [3, 1, 0], + [6, 4, 2]]) + + +Change the output layout:: + + >>> diags = torch.arange(9).reshape(3, 3) + >>> diags + tensor([[0, 1, 2],[3, 4, 5], [6, 7, 8]) + >>> s = torch.sparse.spdiags(diags, torch.tensor([0, -1, -2]), (3, 3), layout=torch.sparse_csr) + >>> s + tensor(crow_indices=tensor([0, 1, 3, 6]), + col_indices=tensor([0, 0, 1, 0, 1, 2]), + values=tensor([0, 3, 1, 6, 4, 2]), size=(3, 3), nnz=6, + layout=torch.sparse_csr) + >>> s.to_dense() + tensor([[0, 0, 0], + [3, 1, 0], + [6, 4, 2]]) + +Set partial diagonals of a large output:: + + >>> diags = torch.tensor([[1, 2], [3, 4]]) + >>> offsets = torch.tensor([0, -1]) + >>> torch.sparse.spdiags(diags, offsets, (5, 5)).to_dense() + tensor([[1, 0, 0, 0, 0], + [3, 2, 0, 0, 0], + [0, 4, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]]) + +.. note:: + + When setting the values along a given diagonal the index into the diagonal + and the index into the row of :attr:`diagonals` is taken as the + column index in the output. This has the effect that when setting a diagonal + with a positive offset `k` the first value along that diagonal will be + the value in position `k` of the row of :attr:`diagonals` + +Specifying a positive offset:: + + >>> diags = torch.tensor([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) + >>> torch.sparse.spdiags(diags, torch.tensor([0, 1, 2]), (5, 5)).to_dense() + tensor([[1, 2, 3, 0, 0], + [0, 2, 3, 0, 0], + [0, 0, 3, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]]) +""") + + +class check_sparse_tensor_invariants: + """A tool to control checking sparse tensor invariants. + + The following options exists to manage sparsr tensor invariants + checking in sparse tensor construction: + + 1. Using a context manager: + + .. code:: python + + with torch.sparse.check_sparse_tensor_invariants(): + run_my_model() + + 2. Using a procedural approach: + + .. code:: python + + prev_checks_enabled = torch.sparse.check_sparse_tensor_invariants.is_enabled() + torch.sparse.check_sparse_tensor_invariants.enable() + + run_my_model() + + if not prev_checks_enabled: + torch.sparse.check_sparse_tensor_invariants.disable() + + 3. Using function decoration: + + .. code:: python + + @torch.sparse.check_sparse_tensor_invariants() + def run_my_model(): + ... + + run_my_model() + + 4. Using ``check_invariants`` keyword argument in sparse tensor constructor call. + For example: + + >>> torch.sparse_csr_tensor([0, 1, 3], [0, 1], [1, 2], check_invariants=True) + Traceback (most recent call last): + File "", line 1, in + RuntimeError: `crow_indices[..., -1] == nnz` is not satisfied. + """ + + @staticmethod + def is_enabled(): + r"""Return True if the sparse tensor invariants checking is enabled. + + .. note:: + + Use :func:`torch.sparse.check_sparse_tensor_invariants.enable` or + :func:`torch.sparse.check_sparse_tensor_invariants.disable` to + manage the state of the sparse tensor invariants checks. + """ + return torch._C._check_sparse_tensor_invariants() + + @staticmethod + def enable(): + r"""Enable sparse tensor invariants checking in sparse tensor constructors. + + .. note:: + + By default, the sparse tensor invariants checks are disabled. Use + :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled` to + retrieve the current state of sparse tensor invariants checking. + + .. note:: + + The sparse tensor invariants check flag is effective to all sparse + tensor constructors, both in Python and ATen. + + The flag can be locally overridden by the ``check_invariants`` + optional argument of the sparse tensor constructor functions. + """ + torch._C._set_check_sparse_tensor_invariants(True) + + @staticmethod + def disable(): + r"""Disable sparse tensor invariants checking in sparse tensor constructors. + + See :func:`torch.sparse.check_sparse_tensor_invariants.enable` for more information. + """ + torch._C._set_check_sparse_tensor_invariants(False) + + # context manager support + def __init__(self, enable=True): + self.state = enable + self.saved_state : Optional[bool] = None + + def __enter__(self): + if self.saved_state is not None: + raise RuntimeError('This context manager instance is already activated.' + ' Use a different context manager instance for context nesting.') + self.saved_state = self.is_enabled() + torch._C._set_check_sparse_tensor_invariants(self.state) + + def __exit__(self, type, value, traceback): + assert self.saved_state is not None + torch._C._set_check_sparse_tensor_invariants(self.saved_state) + self.saved_state = None + + # decorator support + def __call__(self, mth): + + def test_mth(*args, **kwargs): + with type(self)(self.state): + return mth(*args, **kwargs) + + return test_mth + + +def as_sparse_gradcheck(gradcheck): + """Decorate function, to extend gradcheck for sparse tensors. + + Decorator for torch.autograd.gradcheck or its functools.partial + variants that extends the gradcheck function with support to input + functions that operate on or/and return sparse tensors. + + The specified gradcheck function itself is guaranteed to operate + on strided tensors only. + + For example: + + >>> gradcheck = torch.sparse.as_sparse_gradcheck(torch.autograd.gradcheck) + >>> x = torch.tensor([[0, 1], [2, 3]], dtype=torch.float64).to_sparse_coo().requires_grad_(True) + >>> gradcheck(lambda x: x.to_sparse_csr(), x) + True + """ + + def gradcheck_with_sparse_support(func, inputs, **kwargs): + """ + Create gradcheck with support for sparse tensors. + + Same as :func:`torch.autograd.gradcheck` but with sparse tensors inputs and outputs support. + """ + masked = kwargs.pop('masked', False) + sparse_layouts = {torch.sparse_coo, torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc} + sparse_compressed_layouts = {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc} + sparse_block_layouts = {torch.sparse_bsr, torch.sparse_bsc} + STRIDED_REPRESENTATION = '__STRIDED_REPRESENTATION__' + + def convert_to_strided_representation(args): + """Convert differentiable non-strided tensors to a representation containing differentiable strided tensors.""" + if not isinstance(args, (list, tuple)): + args = args, + new_args: List[Any] = [] + for obj in args: + if isinstance(obj, torch.Tensor) and obj.requires_grad and obj.layout in sparse_layouts: + d = dict(layout=obj.layout, shape=obj.shape) + if not masked: + # Materialize unspecified elements with zero values + batch_dim = obj.ndim - obj.dense_dim() - obj.sparse_dim() + blocksize = obj.values().shape[batch_dim + 1:batch_dim + 3] if obj.layout in sparse_block_layouts else None + full_mask = torch.ones(obj.shape, device=obj.device, dtype=torch.bool).to_sparse( + layout=obj.layout, blocksize=blocksize, dense_dim=obj.dense_dim()) + obj = obj.to_dense().sparse_mask(full_mask) + if obj.layout is torch.sparse_coo: + d.update(indices=obj._indices(), is_coalesced=obj.is_coalesced()) + values = obj._values() + elif obj.layout in {torch.sparse_csr, torch.sparse_bsr}: + d.update(compressed_indices=obj.crow_indices(), plain_indices=obj.col_indices()) + values = obj.values() + else: + d.update(compressed_indices=obj.ccol_indices(), plain_indices=obj.row_indices()) + values = obj.values() + new_args.extend((STRIDED_REPRESENTATION, d, values.requires_grad_(True))) + else: + new_args.append(obj) + return tuple(new_args) + + def restore_from_strided_representation(args): + """Restore non-strided differentiable tensosr from their strided representations.""" + new_args = [] + args = list(args) + while args: + a = args.pop(0) + if a == STRIDED_REPRESENTATION: + d, values = args.pop(0), args.pop(0) + if d['layout'] is torch.sparse_coo: + a = torch.sparse_coo_tensor(d['indices'], values, size=d['shape'], is_coalesced=d['is_coalesced']) + elif d['layout'] in sparse_compressed_layouts: + a = torch.sparse_compressed_tensor(d['compressed_indices'], d['plain_indices'], values, + size=d['shape'], layout=d['layout']) + else: + raise NotImplementedError(f'conversion of {d["layout"]} strided representation to tensor') + new_args.append(a) + return tuple(new_args) + + def func_wrapper(*args, **kwargs): + restored_args = restore_from_strided_representation(args) + + # convert differentiable output sparse tensors to strided + # tensors: + outputs = func(*restored_args, **kwargs) + + strided_outputs = tuple(outputs) if isinstance(outputs, (list, tuple)) else (outputs,) + strided_outputs = tuple((o.to_dense(masked_grad=masked) + if isinstance(o, torch.Tensor) and o.requires_grad and o.layout in sparse_layouts else o) + for o in strided_outputs) + + return strided_outputs if isinstance(outputs, (list, tuple)) else strided_outputs[0] + + args = (func_wrapper, convert_to_strided_representation(inputs)) + + return gradcheck(*args, **kwargs) + + return gradcheck_with_sparse_support diff --git a/venv/lib/python3.10/site-packages/torch/sparse/_semi_structured_ops.py b/venv/lib/python3.10/site-packages/torch/sparse/_semi_structured_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..eaa609b342cab390090557c9650c569e4a1404a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/sparse/_semi_structured_ops.py @@ -0,0 +1,166 @@ +import contextlib + +import torch + +__all__ = [ + "fallback_dispatcher", + "semi_sparse_values", + "semi_sparse_indices", + "semi_sparse_t", + "semi_sparse_view", + "semi_sparse_detach", + "semi_sparse_mm", + "semi_sparse_addmm", + "semi_sparse_linear", +] + + +@contextlib.contextmanager +def no_dispatch(): + guard = torch._C._DisableTorchDispatch() + try: + yield + finally: + del guard + + +def fallback_dispatcher(func, types, args, kwargs): + with no_dispatch(): + return func(*args) + + +def semi_sparse_values(func, types, args=(), kwargs=None) -> torch.Tensor: + assert len(args) == 1 + A = args[0] + assert isinstance(A, torch.sparse.SparseSemiStructuredTensor) + assert A.packed is not None + if A.meta is None: + m, k = A.shape + num_kept_elements = m * k // 2 + return A.packed[:num_kept_elements:].view(m, -1) + else: + return A.packed.detach() + + +def semi_sparse_indices(func, types, args=(), kwargs=None) -> torch.Tensor: + assert len(args) == 1 + A = args[0] + assert isinstance(A, torch.sparse.SparseSemiStructuredTensor) + assert A.packed is not None + if A.meta is None: + m, k = A.shape + num_kept_elements = m * k // 2 + metadata = A.packed[num_kept_elements:].view(m, -1) + return metadata.view(torch.int32 if A.dtype == torch.int32 else torch.int16) + else: + return A.meta + + +def semi_sparse_t(func, types, args=(), kwargs=None) -> torch.Tensor: + assert len(args) == 1 + self = args[0] + assert isinstance(self, torch.sparse.SparseSemiStructuredTensor) + assert len(self.shape) == 2 + # Because we cannot go from the compressed representation back to the dense representation currently, + # we just keep track of how many times we have been transposed. Depending on whether the sparse matrix + # is the first or second argument, we expect an even / odd number of calls to transpose respectively. + return self.__class__( + torch.Size([self.shape[-1], self.shape[0]]), + packed=self.packed_t, + meta=self.meta_t, + packed_t=self.packed, + meta_t=self.meta, + threads_masks=self.threads_masks.transpose(0, 1) + if self.threads_masks is not None + else None, + fuse_transpose_cusparselt=args[0].fuse_transpose_cusparselt, + alg_id_cusparselt=args[0].alg_id_cusparselt, + ) + + +def semi_sparse_view(func, types, args=(), kwargs=None) -> torch.Tensor: + assert len(args) == 2 + self, shape = args + if tuple(shape) != self.shape: + raise NotImplementedError( + f"`view` is not implemented for SparseSemiStructuredTensor, except for the dummy case (shape={shape})" + ) + return self + + +def semi_sparse_detach(func, types, args, kwargs) -> torch.Tensor: + assert len(args) == 1 + self = args[0] + return self.__class__( + shape=self.shape, + packed=self.packed, + meta=self.meta, + packed_t=self.packed_t, + meta_t=self.meta_t, + threads_masks=self.threads_masks, + requires_grad=False, + ) + + +def semi_sparse_mm(func, types, args=(), kwargs=None) -> torch.Tensor: + assert len(args) == 2 + A, B = args + if A.ndim != 2 or B.ndim != 2: + raise NotImplementedError( + "`SparseSemiStructuredTensor` matmul: Broadcasting is not implemented" + ) + if isinstance(A, torch.sparse.SparseSemiStructuredTensor): + row, col = B.shape + B_padded = A._pad_dense_input(B) + res = A._mm(B_padded) + return res[:, :col] + else: + B_t = B.t() + assert isinstance(B_t, torch.sparse.SparseSemiStructuredTensor) + row, col = A.shape + A_padded = B._pad_dense_input(A) + res = B_t._mm(A_padded.t()).t() + return res[:row, :] + + +def semi_sparse_addmm(func, types, args=(), kwargs=None) -> torch.Tensor: + assert len(args) == 3 + bias, A, B = args + if A.ndim != 2 or B.ndim != 2: + raise NotImplementedError( + "`SparseSemiStructuredTensor` matmul: Broadcasting is not implemented" + ) + if bias.ndim != 1: + raise NotImplementedError( + f"`SparseSemiStructuredTensor` matmul: only bias dim=1 supported. Shape={bias.shape}" + ) + if isinstance(A, torch.sparse.SparseSemiStructuredTensor): + raise NotImplementedError( + "`SparseSemiStructuredTensor` matmul: only operand B of `addmm` can be sparse" + ) + B_t = B.t() + assert isinstance(B_t, torch.sparse.SparseSemiStructuredTensor) + row, col = A.shape + A_padded = B_t._pad_dense_input(A) + result = B_t._mm(A_padded.t(), bias=bias).t() + return result[:row, :] + + +def semi_sparse_linear(func, types, args=(), kwargs=None) -> torch.Tensor: + assert len(args) in [2, 3] + A, B = args[:2] + bias = args[2] if len(args) == 3 else None + + shape = A.shape + A_2d = A.view(-1, shape[-1]) + + if bias is None: + res = A_2d @ B.t() + else: + res = semi_sparse_addmm( + func=None, + types=None, + args=[bias, A_2d, B.t()], + ) + + return res.view(*shape[:-1], -1)