diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate-0.4.1.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/evaluate-0.4.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..15dd6bc60ce5f9ebf707e59854bf021bfd3de221 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate-0.4.1.dist-info/RECORD @@ -0,0 +1,64 @@ +../../../bin/evaluate-cli,sha256=ebVljaZgWxr1sBQVsnDdhaYD2rXNcaw-u7JiBl8yibA,255 +evaluate-0.4.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +evaluate-0.4.1.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358 +evaluate-0.4.1.dist-info/METADATA,sha256=gyoxlsBnA-d8Kb9Bj8RKeRRdIp8gH4ILqfUeBdj4va8,9412 +evaluate-0.4.1.dist-info/RECORD,, +evaluate-0.4.1.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92 +evaluate-0.4.1.dist-info/entry_points.txt,sha256=m2P3heof0lsg47nq6tYW_yUtxTfimd3RuD26Yk8KMkM,70 +evaluate-0.4.1.dist-info/top_level.txt,sha256=wBEoxird-u8p4OKDwq5z9rlfH-ybeez8rjaKNLNJ3B0,9 +evaluate/__init__.py,sha256=UNd1S0HL23X2WHwt00PRuBJG3ESebsSvJQTYqunzZYk,1754 +evaluate/__pycache__/__init__.cpython-310.pyc,, +evaluate/__pycache__/config.cpython-310.pyc,, +evaluate/__pycache__/hub.cpython-310.pyc,, +evaluate/__pycache__/info.cpython-310.pyc,, +evaluate/__pycache__/inspect.cpython-310.pyc,, +evaluate/__pycache__/loading.cpython-310.pyc,, +evaluate/__pycache__/module.cpython-310.pyc,, +evaluate/__pycache__/naming.cpython-310.pyc,, +evaluate/__pycache__/saving.cpython-310.pyc,, +evaluate/__pycache__/visualization.cpython-310.pyc,, +evaluate/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +evaluate/commands/__pycache__/__init__.cpython-310.pyc,, +evaluate/commands/__pycache__/evaluate_cli.cpython-310.pyc,, +evaluate/commands/evaluate_cli.py,sha256=w7GWb48JPjoC0BX7Jn12qtxQUBYOlZNhdg4YegA93Fw,4491 +evaluate/config.py,sha256=g4g-S6hVAw0Ys9As7gKaFP66pZeh8hoJJ5GEXaLSWV8,6648 +evaluate/evaluation_suite/__init__.py,sha256=TjcFihBDf_ZQAoIjSXPEC0iFBeEC_LFqCfXKbrkyhWs,4941 +evaluate/evaluation_suite/__pycache__/__init__.cpython-310.pyc,, +evaluate/evaluator/__init__.py,sha256=JoWqRP-qCgNzDre6nO8zpJ2Iyp0eUkN7eDKPOPUXz2g,5788 +evaluate/evaluator/__pycache__/__init__.cpython-310.pyc,, +evaluate/evaluator/__pycache__/audio_classification.cpython-310.pyc,, +evaluate/evaluator/__pycache__/automatic_speech_recognition.cpython-310.pyc,, +evaluate/evaluator/__pycache__/base.cpython-310.pyc,, +evaluate/evaluator/__pycache__/image_classification.cpython-310.pyc,, +evaluate/evaluator/__pycache__/question_answering.cpython-310.pyc,, +evaluate/evaluator/__pycache__/text2text_generation.cpython-310.pyc,, +evaluate/evaluator/__pycache__/text_classification.cpython-310.pyc,, +evaluate/evaluator/__pycache__/text_generation.cpython-310.pyc,, +evaluate/evaluator/__pycache__/token_classification.cpython-310.pyc,, +evaluate/evaluator/__pycache__/utils.cpython-310.pyc,, +evaluate/evaluator/audio_classification.py,sha256=v5myOnm0PN8BWVnm4nWCzcyklaLtdnbOS3EJ09TPFhg,5804 +evaluate/evaluator/automatic_speech_recognition.py,sha256=jOveYJXsH-t5SzGe7FzXhnHeDKFhqWZUtK3S1l9XYus,4392 +evaluate/evaluator/base.py,sha256=TkkPa6jJWQfNgIK_FVEF24VUfuBn5aZ7Wo1hAfvJEhA,22881 +evaluate/evaluator/image_classification.py,sha256=RJ7NUS91hjZkr5JqhqtYsr5dxBkChA3Qim6An8fHT50,4751 +evaluate/evaluator/question_answering.py,sha256=ArF5BKfE9J9uC-q1GQwbvkAHw1ThgA997ERKmPS-Z4g,9566 +evaluate/evaluator/text2text_generation.py,sha256=M2itKYfIz9z_9J-Y7sXyx4HKMhQbdYwbv8oThSw8Yzw,9676 +evaluate/evaluator/text_classification.py,sha256=g1MUwa3TCUCUBGvZDmdeJ_l8BAOgbn0Q0y4TDvep8Uk,6676 +evaluate/evaluator/text_generation.py,sha256=4ZnHweTUpvNZhaprewTPms__00I8Tnje586ZDCG_ZlU,2679 +evaluate/evaluator/token_classification.py,sha256=XMzteW1coN2e3KWmpWj-OGafj22pzMa7UiHylooirHk,11546 +evaluate/evaluator/utils.py,sha256=HDKdLWLHtfpP-Hhe9cf1TFVIRsmfNgLHifDcGYujKZs,2451 +evaluate/hub.py,sha256=ZX6VYZU0EkjTWmABuJ6Zg6oHXIT2dHkHy0u8RgyL9UQ,4550 +evaluate/info.py,sha256=l5gXfqHhj77-XvFhz57Mns-Ev-lNJsLxsyYPHPvSzj0,5490 +evaluate/inspect.py,sha256=vVSCLr7HWLxIpXzwpDPuiE5XwiP5QQ82oGkdok7aO7o,4969 +evaluate/loading.py,sha256=IdxAMbbjyAID8NFLDuOjU0WK5Vw_Ep4HoziYeu1ySMI,35228 +evaluate/module.py,sha256=Va2FrSJnTXr6P5bspjp3SXgnvdvPm6yEcAasaTX9LJU,46290 +evaluate/naming.py,sha256=Lpw8JmoJfiWs4xDUMEDzcIKO9Nw9RS2lzjeuUP-9acA,2827 +evaluate/saving.py,sha256=UoixNIHmWEceJREvGZlJNViVjRkgNf3MRflwnnhnNUA,2159 +evaluate/utils/__init__.py,sha256=kdFi2pVFSXm_y4EvvuQNnlPUkOPmGLNtc9YTfxAmdsI,1201 +evaluate/utils/__pycache__/__init__.cpython-310.pyc,, +evaluate/utils/__pycache__/file_utils.cpython-310.pyc,, +evaluate/utils/__pycache__/gradio.cpython-310.pyc,, +evaluate/utils/__pycache__/logging.cpython-310.pyc,, +evaluate/utils/file_utils.py,sha256=uGkXJYWQBKNALhdxktpQ_844jCjKLFQg6l_3KKK2zGI,22602 +evaluate/utils/gradio.py,sha256=UXGRxiPsJ41Xm5gGF7Jf_1vTOPopE_wDoBIyBS0S8d4,4434 +evaluate/utils/logging.py,sha256=nRy963i3_-H0Qcer6ETgnTFiJoQhojSiapeXQ9-eUyk,6698 +evaluate/visualization.py,sha256=m-mD6vxOIQ-_KXTues2tB4r7c4jdygBybHJeidP-jgw,9293 diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate-0.4.1.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/evaluate-0.4.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..7e688737d490be3643d705bc16b5a77f7bd567b7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate-0.4.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/env-llmeval/lib/python3.10/site-packages/evaluate-0.4.1.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/evaluate-0.4.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..e6adde7833123ecc7f347ea1f5492568989dfd45 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/evaluate-0.4.1.dist-info/top_level.txt @@ -0,0 +1 @@ +evaluate diff --git a/env-llmeval/lib/python3.10/site-packages/frozenlist/__init__.py b/env-llmeval/lib/python3.10/site-packages/frozenlist/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c0f71368bab1a1bf44abe10d73a3e005c747a3bb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/frozenlist/__init__.py @@ -0,0 +1,95 @@ +import os +import sys +import types +from collections.abc import MutableSequence +from functools import total_ordering +from typing import Type + +__version__ = "1.4.1" + +__all__ = ("FrozenList", "PyFrozenList") # type: Tuple[str, ...] + + +NO_EXTENSIONS = bool(os.environ.get("FROZENLIST_NO_EXTENSIONS")) # type: bool + + +@total_ordering +class FrozenList(MutableSequence): + __slots__ = ("_frozen", "_items") + + if sys.version_info >= (3, 9): + __class_getitem__ = classmethod(types.GenericAlias) + else: + + @classmethod + def __class_getitem__(cls: Type["FrozenList"]) -> Type["FrozenList"]: + return cls + + def __init__(self, items=None): + self._frozen = False + if items is not None: + items = list(items) + else: + items = [] + self._items = items + + @property + def frozen(self): + return self._frozen + + def freeze(self): + self._frozen = True + + def __getitem__(self, index): + return self._items[index] + + def __setitem__(self, index, value): + if self._frozen: + raise RuntimeError("Cannot modify frozen list.") + self._items[index] = value + + def __delitem__(self, index): + if self._frozen: + raise RuntimeError("Cannot modify frozen list.") + del self._items[index] + + def __len__(self): + return self._items.__len__() + + def __iter__(self): + return self._items.__iter__() + + def __reversed__(self): + return self._items.__reversed__() + + def __eq__(self, other): + return list(self) == other + + def __le__(self, other): + return list(self) <= other + + def insert(self, pos, item): + if self._frozen: + raise RuntimeError("Cannot modify frozen list.") + self._items.insert(pos, item) + + def __repr__(self): + return f"" + + def __hash__(self): + if self._frozen: + return hash(tuple(self)) + else: + raise RuntimeError("Cannot hash unfrozen list.") + + +PyFrozenList = FrozenList + + +if not NO_EXTENSIONS: + try: + from ._frozenlist import FrozenList as CFrozenList # type: ignore + except ImportError: # pragma: no cover + pass + else: + FrozenList = CFrozenList # type: ignore diff --git a/env-llmeval/lib/python3.10/site-packages/frozenlist/__init__.pyi b/env-llmeval/lib/python3.10/site-packages/frozenlist/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ae803ef6aad72f57e7379db5a2044a95f214df7b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/frozenlist/__init__.pyi @@ -0,0 +1,47 @@ +from typing import ( + Generic, + Iterable, + Iterator, + List, + MutableSequence, + Optional, + TypeVar, + Union, + overload, +) + +_T = TypeVar("_T") +_Arg = Union[List[_T], Iterable[_T]] + +class FrozenList(MutableSequence[_T], Generic[_T]): + def __init__(self, items: Optional[_Arg[_T]] = None) -> None: ... + @property + def frozen(self) -> bool: ... + def freeze(self) -> None: ... + @overload + def __getitem__(self, i: int) -> _T: ... + @overload + def __getitem__(self, s: slice) -> FrozenList[_T]: ... + @overload + def __setitem__(self, i: int, o: _T) -> None: ... + @overload + def __setitem__(self, s: slice, o: Iterable[_T]) -> None: ... + @overload + def __delitem__(self, i: int) -> None: ... + @overload + def __delitem__(self, i: slice) -> None: ... + def __len__(self) -> int: ... + def __iter__(self) -> Iterator[_T]: ... + def __reversed__(self) -> Iterator[_T]: ... + def __eq__(self, other: object) -> bool: ... + def __le__(self, other: FrozenList[_T]) -> bool: ... + def __ne__(self, other: object) -> bool: ... + def __lt__(self, other: FrozenList[_T]) -> bool: ... + def __ge__(self, other: FrozenList[_T]) -> bool: ... + def __gt__(self, other: FrozenList[_T]) -> bool: ... + def insert(self, pos: int, item: _T) -> None: ... + def __repr__(self) -> str: ... + def __hash__(self) -> int: ... + +# types for C accelerators are the same +CFrozenList = PyFrozenList = FrozenList diff --git a/env-llmeval/lib/python3.10/site-packages/frozenlist/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/frozenlist/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d8bbf82d98c7b48a6e9c3d157b7c3b560e2e7b3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/frozenlist/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..cf57bc3beeb2ac6d919ddd374b7adb629ebd9a23 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/frozenlist/_frozenlist.pyx b/env-llmeval/lib/python3.10/site-packages/frozenlist/_frozenlist.pyx new file mode 100644 index 0000000000000000000000000000000000000000..9ee846c1aeb17ac8521f44bcb8617f189b89e5fe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/frozenlist/_frozenlist.pyx @@ -0,0 +1,123 @@ +import sys +import types +from collections.abc import MutableSequence + + +cdef class FrozenList: + + if sys.version_info >= (3, 9): + __class_getitem__ = classmethod(types.GenericAlias) + else: + @classmethod + def __class_getitem__(cls): + return cls + + cdef readonly bint frozen + cdef list _items + + def __init__(self, items=None): + self.frozen = False + if items is not None: + items = list(items) + else: + items = [] + self._items = items + + cdef object _check_frozen(self): + if self.frozen: + raise RuntimeError("Cannot modify frozen list.") + + cdef inline object _fast_len(self): + return len(self._items) + + def freeze(self): + self.frozen = True + + def __getitem__(self, index): + return self._items[index] + + def __setitem__(self, index, value): + self._check_frozen() + self._items[index] = value + + def __delitem__(self, index): + self._check_frozen() + del self._items[index] + + def __len__(self): + return self._fast_len() + + def __iter__(self): + return self._items.__iter__() + + def __reversed__(self): + return self._items.__reversed__() + + def __richcmp__(self, other, op): + if op == 0: # < + return list(self) < other + if op == 1: # <= + return list(self) <= other + if op == 2: # == + return list(self) == other + if op == 3: # != + return list(self) != other + if op == 4: # > + return list(self) > other + if op == 5: # => + return list(self) >= other + + def insert(self, pos, item): + self._check_frozen() + self._items.insert(pos, item) + + def __contains__(self, item): + return item in self._items + + def __iadd__(self, items): + self._check_frozen() + self._items += list(items) + return self + + def index(self, item): + return self._items.index(item) + + def remove(self, item): + self._check_frozen() + self._items.remove(item) + + def clear(self): + self._check_frozen() + self._items.clear() + + def extend(self, items): + self._check_frozen() + self._items += list(items) + + def reverse(self): + self._check_frozen() + self._items.reverse() + + def pop(self, index=-1): + self._check_frozen() + return self._items.pop(index) + + def append(self, item): + self._check_frozen() + return self._items.append(item) + + def count(self, item): + return self._items.count(item) + + def __repr__(self): + return ''.format(self.frozen, + self._items) + + def __hash__(self): + if self.frozen: + return hash(tuple(self._items)) + else: + raise RuntimeError("Cannot hash unfrozen list.") + + +MutableSequence.register(FrozenList) diff --git a/env-llmeval/lib/python3.10/site-packages/frozenlist/py.typed b/env-llmeval/lib/python3.10/site-packages/frozenlist/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..f5642f79f21d872f010979dcf6f0c4a415acc19d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/frozenlist/py.typed @@ -0,0 +1 @@ +Marker diff --git a/env-llmeval/lib/python3.10/site-packages/python_dateutil-2.9.0.post0.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/python_dateutil-2.9.0.post0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/python_dateutil-2.9.0.post0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C/_VariableFunctions.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_C/_VariableFunctions.pyi new file mode 100644 index 0000000000000000000000000000000000000000..79c00ba48bf5507aa11384e0897456c9ee5e99ff --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_C/_VariableFunctions.pyi @@ -0,0 +1,2027 @@ +# @generated from torch/_C/_VariableFunctions.pyi.in +# mypy: disable-error-code="type-arg" + +import builtins +from typing import ( + Any, + Callable, + ContextManager, + Iterator, + List, + Literal, + NamedTuple, + Optional, + overload, + Sequence, + Tuple, + TypeVar, + Union, +) + +import torch +from torch import contiguous_format, Generator, inf, memory_format, strided, SymInt, Tensor +from torch.types import ( + _bool, + _complex, + _device, + _dtype, + _float, + _int, + _layout, + _qscheme, + _size, + Device, + Number, +) + +from torch._prims_common import DeviceLikeType + +@overload +def __and__(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def __and__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +@overload +def __lshift__(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def __lshift__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +@overload +def __or__(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def __or__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +@overload +def __rshift__(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def __rshift__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +@overload +def __xor__(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def __xor__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +def _adaptive_avg_pool2d(input: Tensor, output_size: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]]) -> Tensor: ... +def _adaptive_avg_pool3d(input: Tensor, output_size: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]]) -> Tensor: ... +def _add_batch_dim(input: Tensor, batch_dim: _int, level: _int) -> Tensor: ... +@overload +def _add_relu(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def _add_relu(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ... +@overload +def _add_relu_(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ... +@overload +def _add_relu_(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ... +def _addmm_activation(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, use_gelu: _bool = False, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def _aminmax(input: Tensor) -> Tuple[Tensor, Tensor]: ... +@overload +def _aminmax(input: Tensor, dim: _int, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ... +def _amp_foreach_non_finite_check_and_unscale_(self: Union[Tuple[Tensor, ...], List[Tensor]], found_inf: Tensor, inv_scale: Tensor) -> None: ... +def _amp_update_scale_(input: Tensor, growth_tracker: Tensor, found_inf: Tensor, scale_growth_factor: _float, scale_backoff_factor: _float, growth_interval: _int) -> Tensor: ... +@overload +def _assert_async(input: Tensor) -> None: ... +@overload +def _assert_async(input: Tensor, assert_msg: str) -> None: ... +def _assert_tensor_metadata(a: Tensor, size: Optional[Sequence[Union[_int, SymInt]]] = None, stride: Optional[Sequence[Union[_int, SymInt]]] = None, dtype: Optional[_dtype] = None) -> None: ... +def _batch_norm_impl_index(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, _int]: ... +def _cast_Byte(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Char(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Double(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Float(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Half(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Int(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Long(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Short(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _choose_qparams_per_tensor(input: Tensor, reduce_range: _bool = False) -> Tuple[_float, _int]: ... +def _coalesce(input: Tensor) -> Tensor: ... +def _compute_linear_combination(input: Tensor, coefficients: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _conj(input: Tensor) -> Tensor: ... +def _conj_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _conj_physical(input: Tensor) -> Tensor: ... +def _convert_indices_from_coo_to_csr(input: Tensor, size: _int, *, out_int32: _bool = False, out: Optional[Tensor] = None) -> Tensor: ... +def _convert_indices_from_csr_to_coo(crow_indices: Tensor, col_indices: Tensor, *, out_int32: _bool = False, transpose: _bool = False, out: Optional[Tensor] = None) -> Tensor: ... +def _convert_weight_to_int4pack(input: Tensor, innerKTiles: _int) -> Tensor: ... +@overload +def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: _size, groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool) -> Tensor: ... +@overload +def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool, allow_tf32: _bool) -> Tensor: ... +def _convolution_mode(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: str, dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def _copy_from(input: Tensor, dst: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _copy_from_and_resize(input: Tensor, dst: Tensor) -> Tensor: ... +def _cslt_compress(input: Tensor) -> Tensor: ... +def _cslt_sparse_mm(compressed_A: Tensor, dense_B: Tensor, bias: Optional[Tensor] = None, alpha: Optional[Tensor] = None, out_dtype: Optional[_dtype] = None, transpose_result: _bool = False) -> Tensor: ... +@overload +def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int = 0, zero_infinity: _bool = False) -> Tuple[Tensor, Tensor]: ... +@overload +def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int = 0, zero_infinity: _bool = False) -> Tuple[Tensor, Tensor]: ... +@overload +def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ... +@overload +def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ... +def _cudnn_init_dropout_state(dropout: _float, train: _bool, dropout_seed: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def _cudnn_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, weight_buf: Optional[Tensor], hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: Union[_int, SymInt], proj_size: Union[_int, SymInt], num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: Sequence[Union[_int, SymInt]], dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ... +def _cudnn_rnn_flatten_weight(weight_arr: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, input_size: Union[_int, SymInt], mode: _int, hidden_size: Union[_int, SymInt], proj_size: Union[_int, SymInt], num_layers: _int, batch_first: _bool, bidirectional: _bool) -> Tensor: ... +def _cufft_clear_plan_cache(device_index: _int) -> None: ... +def _cufft_get_plan_cache_max_size(device_index: _int) -> _int: ... +def _cufft_get_plan_cache_size(device_index: _int) -> _int: ... +def _cufft_set_plan_cache_max_size(device_index: _int, max_size: _int) -> None: ... +def _cummax_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ... +def _cummin_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ... +def _debug_has_internal_overlap(input: Tensor) -> _int: ... +def _dim_arange(like: Tensor, dim: _int) -> Tensor: ... +def _dirichlet_grad(x: Tensor, alpha: Tensor, total: Tensor) -> Tensor: ... +def _disable_functionalization(): ... +@overload +def _efficientzerotensor(size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def _efficientzerotensor(*size: _int, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def _embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False, padding_idx: _int = -1) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +def _embedding_bag_forward_only(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False, padding_idx: _int = -1) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +@overload +def _empty_affine_quantized(size: Sequence[Union[_int, SymInt]], *, scale: _float = 1, zero_point: _int = 0, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def _empty_affine_quantized(*size: _int, scale: _float = 1, zero_point: _int = 0, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def _empty_per_channel_affine_quantized(size: Sequence[Union[_int, SymInt]], *, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def _empty_per_channel_affine_quantized(*size: _int, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def _enable_functionalization(*, reapply_views: _bool = False): ... +def _euclidean_dist(x1: Tensor, x2: Tensor) -> Tensor: ... +def _fake_quantize_learnable_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int, grad_factor: _float = 1.0) -> Tensor: ... +def _fake_quantize_learnable_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int, grad_factor: _float = 1.0) -> Tensor: ... +def _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(input: Tensor, scale: Tensor, zero_point: Tensor, fake_quant_enabled: Tensor, quant_min: _int, quant_max: _int) -> torch.return_types._fake_quantize_per_tensor_affine_cachemask_tensor_qparams: ... +def _fft_c2c(input: Tensor, dim: Sequence[Union[_int, SymInt]], normalization: _int, forward: _bool, *, out: Optional[Tensor] = None) -> Tensor: ... +def _fft_c2r(input: Tensor, dim: _size, normalization: _int, last_dim_size: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor: ... +def _fft_r2c(input: Tensor, dim: _size, normalization: _int, onesided: _bool, *, out: Optional[Tensor] = None) -> Tensor: ... +def _fill_mem_eff_dropout_mask_(input: Tensor, dropout_p: _float, seed: _int, offset: _int) -> Tensor: ... +def _foobar(input: Tensor, arg1: _bool = True, arg2: _bool = True, *, arg3: _bool = True) -> Tensor: ... +def _foreach_abs(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_abs_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_acos(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_acos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +@overload +def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> List[Tensor]: ... +@overload +def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> List[Tensor]: ... +@overload +def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor, *, alpha: Union[Number, _complex] = 1) -> List[Tensor]: ... +@overload +def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> List[Tensor]: ... +@overload +def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> None: ... +@overload +def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor, *, alpha: Union[Number, _complex] = 1) -> None: ... +@overload +def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> List[Tensor]: ... +@overload +def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> List[Tensor]: ... +@overload +def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> List[Tensor]: ... +@overload +def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> None: ... +@overload +def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> None: ... +@overload +def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> List[Tensor]: ... +@overload +def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> List[Tensor]: ... +@overload +def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> List[Tensor]: ... +@overload +def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> None: ... +@overload +def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> None: ... +def _foreach_asin(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_asin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_atan(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_atan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_ceil(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_ceil_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +@overload +def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> List[Tensor]: ... +@overload +def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> List[Tensor]: ... +@overload +def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +@overload +def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +@overload +def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> List[Tensor]: ... +@overload +def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> List[Tensor]: ... +@overload +def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +@overload +def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_copy_(self: Union[Tuple[Tensor, ...], List[Tensor]], src: Union[Tuple[Tensor, ...], List[Tensor]], non_blocking: _bool = False) -> None: ... +def _foreach_cos(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_cos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_cosh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_cosh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +@overload +def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> List[Tensor]: ... +@overload +def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> List[Tensor]: ... +@overload +def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> List[Tensor]: ... +@overload +def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +@overload +def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> None: ... +@overload +def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_erf(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_erf_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_erfc(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_erfc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_exp(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_exp_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_expm1(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_expm1_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_floor(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_floor_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_frac(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_frac_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +@overload +def _foreach_lerp(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weight: Union[Number, _complex]) -> List[Tensor]: ... +@overload +def _foreach_lerp(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weights: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +@overload +def _foreach_lerp_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weight: Union[Number, _complex]) -> None: ... +@overload +def _foreach_lerp_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weights: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_lgamma(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_lgamma_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_log(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_log10(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_log10_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_log1p(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_log1p_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_log2(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_log2_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_log_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +@overload +def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> List[Tensor]: ... +@overload +def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> List[Tensor]: ... +@overload +def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +@overload +def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +@overload +def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> List[Tensor]: ... +@overload +def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> List[Tensor]: ... +@overload +def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +@overload +def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +@overload +def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> List[Tensor]: ... +@overload +def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> List[Tensor]: ... +@overload +def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> List[Tensor]: ... +@overload +def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +@overload +def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> None: ... +@overload +def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_neg(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_neg_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_norm(self: Union[Tuple[Tensor, ...], List[Tensor]], ord: Union[Number, _complex] = 2) -> List[Tensor]: ... +@overload +def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Sequence[Union[Number, _complex]]) -> List[Tensor]: ... +@overload +def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Number, _complex]) -> List[Tensor]: ... +@overload +def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +@overload +def _foreach_pow(self: Union[Number, _complex], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +@overload +def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Number, _complex]) -> None: ... +@overload +def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_reciprocal(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_reciprocal_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_round(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_round_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_sigmoid(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_sigmoid_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_sign(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_sign_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_sin(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_sin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_sinh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_sinh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_sqrt(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_sqrt_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +@overload +def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> List[Tensor]: ... +@overload +def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> List[Tensor]: ... +@overload +def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> List[Tensor]: ... +@overload +def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> None: ... +@overload +def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +def _foreach_tan(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_tan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_tanh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_tanh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_trunc(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _foreach_trunc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_zero_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _from_functional_tensor(t: Tensor) -> Tensor: ... +def _functional_assert_async(input: Tensor, assert_msg: str, dep_token: Tensor) -> Tensor: ... +def _functional_sym_constrain_range(size: Union[Number, _complex], min: Optional[_int], max: Optional[_int], dep_token: Tensor) -> Tensor: ... +def _functional_sym_constrain_range_for_size(size: Union[Number, _complex], min: Optional[_int], max: Optional[_int], dep_token: Tensor) -> Tensor: ... +def _functionalize_are_all_mutations_hidden_from_autograd(t: Tensor) -> _bool: ... +def _functionalize_are_all_mutations_under_no_grad_or_inference_mode(t: Tensor) -> _bool: ... +def _functionalize_commit_update(t: Tensor) -> None: ... +def _functionalize_mark_mutation_hidden_from_autograd(t: Tensor) -> None: ... +def _functionalize_replace(self_: Tensor, other: Tensor) -> None: ... +def _functionalize_sync(t: Tensor) -> None: ... +@overload +def _fused_adam_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: Tensor, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ... +@overload +def _fused_adam_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ... +@overload +def _fused_adamw_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: Tensor, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ... +@overload +def _fused_adamw_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ... +def _fused_dropout(input: Tensor, p: _float, generator: Optional[Generator] = None) -> Tuple[Tensor, Tensor]: ... +def _fused_moving_avg_obs_fq_helper(input: Tensor, observer_on: Tensor, fake_quant_on: Tensor, running_min: Tensor, running_max: Tensor, scale: Tensor, zero_point: Tensor, averaging_const: _float, quant_min: _int, quant_max: _int, ch_axis: _int, per_row_fake_quant: _bool = False, symmetric_quant: _bool = False) -> torch.return_types._fused_moving_avg_obs_fq_helper: ... +def _fused_sdp_choice(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: _float = 0.0, is_causal: _bool = False, *, scale: Optional[_float] = None) -> _int: ... +def _fw_primal_copy(input: Tensor, level: _int, *, out: Optional[Tensor] = None) -> Tensor: ... +def _grid_sampler_2d_cpu_fallback(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ... +def _has_compatible_shallow_copy_type(input: Tensor, from_: Tensor) -> _bool: ... +def _histogramdd_bin_edges(input: Tensor, bins: _size, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> List[Tensor]: ... +def _histogramdd_from_bin_cts(input: Tensor, bins: _size, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> Tensor: ... +def _histogramdd_from_bin_tensors(input: Tensor, bins: Union[Tuple[Tensor, ...], List[Tensor]], *, weight: Optional[Tensor] = None, density: _bool = False) -> Tensor: ... +def _index_put_impl_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False, unsafe: _bool = False) -> Tensor: ... +def _indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _int_mm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _is_all_true(input: Tensor) -> Tensor: ... +def _is_any_true(input: Tensor) -> Tensor: ... +def _is_functional_tensor(t: Tensor) -> _bool: ... +def _is_zerotensor(input: Tensor) -> _bool: ... +def _linalg_check_errors(info: Tensor, api_name: str, *, is_matrix: _bool) -> None: ... +def _linalg_det(A: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_det: ... +def _linalg_eigh(A: Tensor, UPLO: str = "L", compute_v: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_eigh: ... +def _linalg_slogdet(A: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_slogdet: ... +def _linalg_solve_ex(A: Tensor, B: Tensor, *, left: _bool = True, check_errors: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_solve_ex: ... +def _linalg_svd(A: Tensor, full_matrices: _bool = False, compute_uv: _bool = True, *, driver: Optional[str] = None, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_svd: ... +def _log_softmax(input: Tensor, dim: _int, half_to_float: _bool, *, out: Optional[Tensor] = None) -> Tensor: ... +def _log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input_dtype: _dtype, *, out: Optional[Tensor] = None) -> Tensor: ... +def _logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor: ... +def _lstm_mps(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: ... +def _lu_with_info(input: Tensor, pivot: _bool = True, check_errors: _bool = True) -> torch.return_types._lu_with_info: ... +def _make_dep_token(*, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def _make_dual(primal: Tensor, tangent: Tensor, level: _int) -> Tensor: ... +def _make_dual_copy(primal: Tensor, tangent: Tensor, level: _int, *, out: Optional[Tensor] = None) -> Tensor: ... +def _make_per_channel_quantized_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int) -> Tensor: ... +def _make_per_tensor_quantized_tensor(input: Tensor, scale: _float, zero_point: _int) -> Tensor: ... +def _masked_scale(input: Tensor, mask: Tensor, scale: _float) -> Tensor: ... +def _masked_softmax(input: Tensor, mask: Tensor, dim: Optional[_int] = None, mask_type: Optional[_int] = None) -> Tensor: ... +def _mixed_dtypes_linear(input: Tensor, weight: Tensor, scale: Tensor, *, bias: Optional[Tensor] = None, activation: Optional[str] = None) -> Tensor: ... +def _mkldnn_reshape(input: Tensor, shape: _size) -> Tensor: ... +def _mkldnn_transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ... +def _mkldnn_transpose_(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ... +def _mps_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def _mps_convolution_transpose(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +@overload +def _native_batch_norm_legit(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Tensor, running_var: Tensor, training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ... +@overload +def _native_batch_norm_legit(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ... +def _native_batch_norm_legit_no_training(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Tensor, running_var: Tensor, momentum: _float, eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ... +def _native_multi_head_attention(query: Tensor, key: Tensor, value: Tensor, embed_dim: _int, num_head: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, mask: Optional[Tensor] = None, need_weights: _bool = True, average_attn_weights: _bool = True, mask_type: Optional[_int] = None) -> Tuple[Tensor, Tensor]: ... +def _neg_view(input: Tensor) -> Tensor: ... +def _neg_view_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _nested_from_padded(padded: Tensor, cpu_nested_shape_example: Tensor, fuse_transform_0213: _bool = False) -> Tensor: ... +def _nested_from_padded_and_nested_example(padded: Tensor, nt_example: Tensor) -> Tensor: ... +def _nested_tensor_from_mask(t: Tensor, mask: Tensor, mask_check: _bool = True) -> Tensor: ... +def _nested_tensor_from_mask_left_aligned(t: Tensor, mask: Tensor) -> _bool: ... +def _nested_tensor_from_tensor_list(list: Union[Tuple[Tensor, ...], List[Tensor]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = None) -> Tensor: ... +def _nested_tensor_softmax_with_shape(input: Tensor, query: Tensor) -> Tensor: ... +def _nested_view_from_buffer(input: Tensor, nested_size: Tensor, nested_strides: Tensor, offsets: Tensor) -> Tensor: ... +def _nested_view_from_buffer_copy(input: Tensor, nested_size: Tensor, nested_strides: Tensor, offsets: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _nnpack_available() -> _bool: ... +def _nnpack_spatial_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]], stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ... +def _pack_padded_sequence(input: Tensor, lengths: Tensor, batch_first: _bool) -> Tuple[Tensor, Tensor]: ... +def _pad_packed_sequence(data: Tensor, batch_sizes: Tensor, batch_first: _bool, padding_value: Union[Number, _complex], total_length: _int) -> Tuple[Tensor, Tensor]: ... +def _pin_memory(input: Tensor, device: Optional[Optional[DeviceLikeType]] = None) -> Tensor: ... +def _prelu_kernel(input: Tensor, weight: Tensor) -> Tensor: ... +def _propagate_xla_data(input: Tensor, output: Tensor) -> None: ... +def _remove_batch_dim(input: Tensor, level: _int, batch_size: _int, out_dim: _int) -> Tensor: ... +def _reshape_alias_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None) -> Tensor: ... +def _reshape_from_tensor(input: Tensor, shape: Tensor) -> Tensor: ... +def _resize_output_(input: Tensor, size: Sequence[Union[_int, SymInt]], device: Optional[DeviceLikeType]) -> Tensor: ... +def _rowwise_prune(weight: Tensor, mask: Tensor, compressed_indices_dtype: _dtype) -> Tuple[Tensor, Tensor]: ... +def _sample_dirichlet(input: Tensor, generator: Optional[Generator] = None) -> Tensor: ... +def _saturate_weight_to_fp16(weight: Tensor) -> Tensor: ... +def _scaled_dot_product_attention_math(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: _float = 0.0, is_causal: _bool = False, dropout_mask: Optional[Tensor] = None, *, scale: Optional[_float] = None) -> Tuple[Tensor, Tensor]: ... +def _scaled_dot_product_efficient_attention(query: Tensor, key: Tensor, value: Tensor, attn_bias: Optional[Tensor], compute_log_sumexp: _bool, dropout_p: _float = 0.0, is_causal: _bool = False, *, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_efficient_attention: ... +def _scaled_dot_product_flash_attention(query: Tensor, key: Tensor, value: Tensor, dropout_p: _float = 0.0, is_causal: _bool = False, return_debug_mask: _bool = False, *, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_flash_attention: ... +def _scaled_mm(input: Tensor, mat2: Tensor, *, bias: Optional[Tensor] = None, out_dtype: Optional[_dtype] = None, scale_a: Optional[Tensor] = None, scale_b: Optional[Tensor] = None, scale_result: Optional[Tensor] = None, use_fast_accum: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor]: ... +def _shape_as_tensor(input: Tensor) -> Tensor: ... +def _sobol_engine_draw(quasi: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int, dtype: Optional[_dtype]) -> Tuple[Tensor, Tensor]: ... +def _sobol_engine_ff_(input: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int) -> Tensor: ... +def _sobol_engine_initialize_state_(input: Tensor, dimension: _int) -> Tensor: ... +def _sobol_engine_scramble_(input: Tensor, ltm: Tensor, dimension: _int) -> Tensor: ... +def _softmax(input: Tensor, dim: _int, half_to_float: _bool, *, out: Optional[Tensor] = None) -> Tensor: ... +def _softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input_dtype: _dtype, *, grad_input: Optional[Tensor] = None) -> Tensor: ... +def _sparse_broadcast_to(input: Tensor, size: _size) -> Tensor: ... +def _sparse_broadcast_to_copy(input: Tensor, size: _size, *, out: Optional[Tensor] = None) -> Tensor: ... +def _sparse_csr_prod(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ... +def _sparse_csr_sum(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ... +def _sparse_log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ... +def _sparse_semi_structured_linear(input: Tensor, weight: Tensor, meta: Tensor, *, bias: Optional[Tensor] = None, activation: Optional[str] = None) -> Tensor: ... +def _sparse_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ... +def _sparse_sparse_matmul(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def _sparse_sum(input: Tensor) -> Tensor: ... +@overload +def _sparse_sum(input: Tensor, *, dtype: _dtype) -> Tensor: ... +@overload +def _sparse_sum(input: Tensor, dim: Union[_int, _size]) -> Tensor: ... +@overload +def _sparse_sum(input: Tensor, dim: Union[_int, _size], *, dtype: _dtype) -> Tensor: ... +def _stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ... +def _standard_gamma(input: Tensor, generator: Optional[Generator] = None) -> Tensor: ... +def _standard_gamma_grad(input: Tensor, output: Tensor) -> Tensor: ... +def _sync(t: Tensor) -> None: ... +@overload +def _test_autograd_multiple_dispatch(input: Tensor) -> Tensor: ... +@overload +def _test_autograd_multiple_dispatch(input: Tensor, b: _bool) -> Tensor: ... +def _test_autograd_multiple_dispatch_view(input: Tensor) -> Tensor: ... +def _test_autograd_multiple_dispatch_view_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _test_check_tensor(input: Tensor) -> Tensor: ... +def _test_functorch_fallback(input: Tensor, other: Tensor) -> Tensor: ... +def _test_serialization_subcmul(input: Tensor, other: Tensor, alpha: Union[Number, _complex] = 1) -> Tensor: ... +def _to_cpu(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def _to_functional_tensor(t: Tensor) -> Tensor: ... +def _to_sparse_semi_structured(dense: Tensor) -> Tuple[Tensor, Tensor]: ... +def _transform_bias_rescale_qkv(qkv: Tensor, qkv_bias: Tensor, num_heads: _int) -> Tuple[Tensor, Tensor, Tensor]: ... +def _transformer_encoder_layer_fwd(src: Tensor, embed_dim: _int, num_heads: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, use_gelu: _bool, norm_first: _bool, eps: _float, norm_weight_1: Tensor, norm_bias_1: Tensor, norm_weight_2: Tensor, norm_bias_2: Tensor, ffn_weight_1: Tensor, ffn_bias_1: Tensor, ffn_weight_2: Tensor, ffn_bias_2: Tensor, mask: Optional[Tensor] = None, mask_type: Optional[_int] = None) -> Tensor: ... +def _trilinear(i1: Tensor, i2: Tensor, i3: Tensor, expand1: _size, expand2: _size, expand3: _size, sumdim: _size, unroll_dim: _int = 1) -> Tensor: ... +def _triton_multi_head_attention(query: Tensor, key: Tensor, value: Tensor, embed_dim: _int, num_head: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, mask: Optional[Tensor] = None) -> Tensor: ... +def _triton_scaled_dot_attention(q: Tensor, k: Tensor, v: Tensor, dropout_p: _float = 0.0) -> Tensor: ... +def _unique(input: Tensor, sorted: _bool = True, return_inverse: _bool = False) -> Tuple[Tensor, Tensor]: ... +def _unique2(input: Tensor, sorted: _bool = True, return_inverse: _bool = False, return_counts: _bool = False) -> Tuple[Tensor, Tensor, Tensor]: ... +def _unpack_dual(dual: Tensor, level: _int) -> torch.return_types._unpack_dual: ... +def _unsafe_index(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]]) -> Tensor: ... +def _unsafe_index_put(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ... +@overload +def _use_cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int) -> _bool: ... +@overload +def _use_cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int) -> _bool: ... +def _use_cudnn_rnn_flatten_weight() -> _bool: ... +def _validate_compressed_sparse_indices(is_crow: _bool, compressed_idx: Tensor, plain_idx: Tensor, cdim: _int, dim: _int, nnz: _int) -> None: ... +def _validate_sparse_bsc_tensor_args(ccol_indices: Tensor, row_indices: Tensor, values: Tensor, size: _size) -> None: ... +def _validate_sparse_bsr_tensor_args(crow_indices: Tensor, col_indices: Tensor, values: Tensor, size: _size) -> None: ... +def _validate_sparse_compressed_tensor_args(compressed_indices: Tensor, plain_indices: Tensor, values: Tensor, size: _size, layout: _layout) -> None: ... +def _validate_sparse_coo_tensor_args(indices: Tensor, values: Tensor, size: _size, is_coalesced: Optional[_bool] = None) -> None: ... +def _validate_sparse_csc_tensor_args(ccol_indices: Tensor, row_indices: Tensor, values: Tensor, size: _size) -> None: ... +def _validate_sparse_csr_tensor_args(crow_indices: Tensor, col_indices: Tensor, values: Tensor, size: _size) -> None: ... +def _values_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _weight_int4pack_mm(input: Tensor, mat2: Tensor, qGroupSize: _int, qScaleAndZeros: Tensor) -> Tensor: ... +def _weight_norm(v: Tensor, g: Tensor, dim: _int = 0) -> Tensor: ... +def _weight_norm_interface(v: Tensor, g: Tensor, dim: _int = 0) -> Tuple[Tensor, Tensor]: ... +def abs(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def abs_(input: Tensor) -> Tensor: ... +def absolute(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def acos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def acos_(input: Tensor) -> Tensor: ... +def acosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def acosh_(input: Tensor) -> Tensor: ... +def adaptive_avg_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tensor: ... +def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ... +@overload +def add(input: Union[Tensor, Number], other: Union[Tensor, Number], *, alpha: Optional[Number] = 1, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def add(self: Tensor, alpha: Union[Number, _complex], other: Tensor) -> Tensor: ... +@overload +def add(self: Tensor, alpha: Union[Number, _complex], other: Tensor, *, out: Tensor) -> Tensor: ... +@overload +def addbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor) -> Tensor: ... +@overload +def addbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ... +@overload +def addbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def addbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor: ... +@overload +def addbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ... +@overload +def addcdiv(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor) -> Tensor: ... +@overload +def addcdiv(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor: ... +@overload +def addcdiv(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def addcmul(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor) -> Tensor: ... +@overload +def addcmul(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor: ... +@overload +def addcmul(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def addmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor) -> Tensor: ... +@overload +def addmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor, *, out: Tensor) -> Tensor: ... +@overload +def addmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def addmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor) -> Tensor: ... +@overload +def addmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor, *, out: Tensor) -> Tensor: ... +@overload +def addmv(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor) -> Tensor: ... +@overload +def addmv(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor, *, out: Tensor) -> Tensor: ... +@overload +def addmv(input: Tensor, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def addmv(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor) -> Tensor: ... +@overload +def addmv(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor, *, out: Tensor) -> Tensor: ... +@overload +def addmv_(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor) -> Tensor: ... +@overload +def addmv_(input: Tensor, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: ... +@overload +def addmv_(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor) -> Tensor: ... +@overload +def addr(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], vec1: Tensor, vec2: Tensor) -> Tensor: ... +@overload +def addr(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], vec1: Tensor, vec2: Tensor, *, out: Tensor) -> Tensor: ... +@overload +def addr(input: Tensor, vec1: Tensor, vec2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def addr(beta: Union[Number, _complex], self: Tensor, vec1: Tensor, vec2: Tensor) -> Tensor: ... +@overload +def addr(beta: Union[Number, _complex], self: Tensor, vec1: Tensor, vec2: Tensor, *, out: Tensor) -> Tensor: ... +def adjoint(input: Tensor) -> Tensor: ... +def affine_grid_generator(theta: Tensor, size: Sequence[Union[_int, SymInt]], align_corners: _bool) -> Tensor: ... +def alias_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def all(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def all(input: Tensor, dim: Optional[_size] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def all(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def all(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +def allclose(input: Tensor, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> _bool: ... +def alpha_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def alpha_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def amax(input: Tensor, dim: Union[_int, _size] = (), keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +def amin(input: Tensor, dim: Union[_int, _size] = (), keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +def aminmax(input: Tensor, *, dim: Optional[_int] = None, keepdim: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.aminmax: ... +def angle(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def any(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def any(input: Tensor, dim: Optional[_size] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def any(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def any(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def arange(start: Number, end: Number, step: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ... +@overload +def arange(start: Number, end: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ... +@overload +def arange(end: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ... +@overload +def arange(end: Union[Number, _complex], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def arange(start: Union[Number, _complex], end: Union[Number, _complex], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def arange(start: Union[Number, _complex], end: Union[Number, _complex], step: Union[Number, _complex] = 1, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def arccos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def arccos_(input: Tensor) -> Tensor: ... +def arccosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def arccosh_(input: Tensor) -> Tensor: ... +def arcsin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def arcsin_(input: Tensor) -> Tensor: ... +def arcsinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def arcsinh_(input: Tensor) -> Tensor: ... +def arctan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def arctan2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def arctan_(input: Tensor) -> Tensor: ... +def arctanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def arctanh_(input: Tensor) -> Tensor: ... +def argmax(input: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +def argmin(input: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def argsort(input: Tensor, *, stable: _bool, dim: _int = -1, descending: _bool = False) -> Tensor: ... +@overload +def argsort(input: Tensor, dim: _int = -1, descending: _bool = False) -> Tensor: ... +@overload +def argsort(input: Tensor, dim: Union[str, ellipsis, None], descending: _bool = False) -> Tensor: ... +def argwhere(input: Tensor) -> Tensor: ... +def as_strided(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: ... +def as_strided_(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: ... +def as_strided_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None, *, out: Optional[Tensor] = None) -> Tensor: ... +def as_strided_scatter(input: Tensor, src: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: ... +def as_tensor(data: Any, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None) -> Tensor: ... +def asarray(obj: Any, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, copy: Optional[_bool] = None, requires_grad: _bool = False) -> Tensor: ... +def asin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def asin_(input: Tensor) -> Tensor: ... +def asinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def asinh_(input: Tensor) -> Tensor: ... +def atan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def atan2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def atan_(input: Tensor) -> Tensor: ... +def atanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def atanh_(input: Tensor) -> Tensor: ... +def avg_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, ceil_mode: _bool = False, count_include_pad: _bool = True) -> Tensor: ... +@overload +def baddbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor) -> Tensor: ... +@overload +def baddbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ... +@overload +def baddbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def baddbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor: ... +@overload +def baddbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ... +@overload +def bartlett_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def bartlett_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tensor: ... +def batch_norm_backward_elemt(grad_out: Tensor, input: Tensor, mean: Tensor, invstd: Tensor, weight: Optional[Tensor], sum_dy: Tensor, sum_dy_xmu: Tensor, count: Tensor) -> Tensor: ... +def batch_norm_backward_reduce(grad_out: Tensor, input: Tensor, mean: Tensor, invstd: Tensor, weight: Optional[Tensor], input_g: _bool, weight_g: _bool, bias_g: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +def batch_norm_elemt(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], mean: Tensor, invstd: Tensor, eps: _float, *, out: Optional[Tensor] = None) -> Tensor: ... +def batch_norm_gather_stats(input: Tensor, mean: Tensor, invstd: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float, eps: _float, count: _int) -> Tuple[Tensor, Tensor]: ... +def batch_norm_gather_stats_with_counts(input: Tensor, mean: Tensor, invstd: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float, eps: _float, counts: Tensor) -> Tuple[Tensor, Tensor]: ... +def batch_norm_stats(input: Tensor, eps: _float) -> Tuple[Tensor, Tensor]: ... +def batch_norm_update_stats(input: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float) -> Tuple[Tensor, Tensor]: ... +@overload +def bernoulli(input: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def bernoulli(input: Tensor, p: _float, *, generator: Optional[Generator] = None) -> Tensor: ... +def bilinear(input1: Tensor, input2: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor: ... +def binary_cross_entropy_with_logits(input: Tensor, target: Tensor, weight: Optional[Tensor] = None, pos_weight: Optional[Tensor] = None, reduction: _int = 1) -> Tensor: ... +def bincount(input: Tensor, weights: Optional[Tensor] = None, minlength: _int = 0) -> Tensor: ... +def binomial(count: Tensor, prob: Tensor, generator: Optional[Generator] = None) -> Tensor: ... +@overload +def bitwise_and(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def bitwise_and(self: Union[Number, _complex], other: Tensor) -> Tensor: ... +@overload +def bitwise_and(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def bitwise_left_shift(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def bitwise_left_shift(self: Union[Number, _complex], other: Tensor) -> Tensor: ... +@overload +def bitwise_left_shift(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +def bitwise_not(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def bitwise_or(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def bitwise_or(self: Union[Number, _complex], other: Tensor) -> Tensor: ... +@overload +def bitwise_or(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def bitwise_right_shift(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def bitwise_right_shift(self: Union[Number, _complex], other: Tensor) -> Tensor: ... +@overload +def bitwise_right_shift(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def bitwise_xor(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def bitwise_xor(self: Union[Number, _complex], other: Tensor) -> Tensor: ... +@overload +def bitwise_xor(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def blackman_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def blackman_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def bmm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def broadcast_to(input: Tensor, size: Sequence[Union[_int, SymInt]]) -> Tensor: ... +@overload +def bucketize(input: Tensor, boundaries: Tensor, *, out_int32: _bool = False, right: _bool = False, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def bucketize(self: Union[Number, _complex], boundaries: Tensor, *, out_int32: _bool = False, right: _bool = False) -> Tensor: ... +def can_cast(from_: _dtype, to: _dtype) -> _bool: ... +@overload +def cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor: ... +def ccol_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def ceil(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def ceil_(input: Tensor) -> Tensor: ... +def celu(input: Tensor, alpha: Union[Number, _complex] = 1.0) -> Tensor: ... +def celu_(input: Tensor, alpha: Union[Number, _complex] = 1.0) -> Tensor: ... +def channel_shuffle(input: Tensor, groups: Union[_int, SymInt]) -> Tensor: ... +def cholesky(input: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +def cholesky_inverse(input: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +def cholesky_solve(input: Tensor, input2: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +def choose_qparams_optimized(input: Tensor, numel: _int, n_bins: _int, ratio: _float, bit_width: _int) -> Tuple[Tensor, Tensor]: ... +def chunk(input: Tensor, chunks: _int, dim: _int = 0) -> List[Tensor]: ... +@overload +def clamp(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def clamp(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def clamp_(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: ... +@overload +def clamp_(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: ... +@overload +def clamp_max(input: Tensor, max: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def clamp_max(input: Tensor, max: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def clamp_max_(input: Tensor, max: Tensor) -> Tensor: ... +@overload +def clamp_max_(input: Tensor, max: Union[Number, _complex]) -> Tensor: ... +@overload +def clamp_min(input: Tensor, min: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def clamp_min(input: Tensor, min: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def clamp_min_(input: Tensor, min: Tensor) -> Tensor: ... +@overload +def clamp_min_(input: Tensor, min: Union[Number, _complex]) -> Tensor: ... +@overload +def clip(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def clip(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def clip_(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: ... +@overload +def clip_(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: ... +def clone(input: Tensor, *, memory_format: Optional[memory_format] = None) -> Tensor: ... +def col_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def column_stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor: ... +def combinations(input: Tensor, r: _int = 2, with_replacement: _bool = False) -> Tensor: ... +def complex(real: Tensor, imag: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def concat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def concat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def concatenate(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def concatenate(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor: ... +def conj(input: Tensor) -> Tensor: ... +def conj_physical(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def conj_physical_(input: Tensor) -> Tensor: ... +def constant_pad_nd(input: Tensor, pad: Sequence[Union[_int, SymInt]], value: Union[Number, _complex] = 0) -> Tensor: ... +@overload +def conv1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ... +@overload +def conv1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ... +@overload +def conv2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ... +@overload +def conv2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ... +@overload +def conv3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ... +@overload +def conv3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ... +def conv_tbc(input: Tensor, weight: Tensor, bias: Tensor, pad: _int = 0) -> Tensor: ... +def conv_transpose1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ... +def conv_transpose2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ... +def conv_transpose3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ... +def convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +@overload +def copysign(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def copysign(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +def corrcoef(input: Tensor) -> Tensor: ... +def cos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def cos_(input: Tensor) -> Tensor: ... +def cosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def cosh_(input: Tensor) -> Tensor: ... +def cosine_embedding_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: _float = 0.0, reduction: _int = 1) -> Tensor: ... +def cosine_similarity(x1: Tensor, x2: Tensor, dim: _int = 1, eps: _float = 1e-08) -> Tensor: ... +@overload +def count_nonzero(input: Tensor, dim: Optional[_int] = None) -> Tensor: ... +@overload +def count_nonzero(input: Tensor, dim: _size) -> Tensor: ... +def cov(input: Tensor, *, correction: _int = 1, fweights: Optional[Tensor] = None, aweights: Optional[Tensor] = None) -> Tensor: ... +def cross(input: Tensor, other: Tensor, dim: Optional[_int] = None, *, out: Optional[Tensor] = None) -> Tensor: ... +def crow_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int = 0, reduction: _int = 1, zero_infinity: _bool = False) -> Tensor: ... +@overload +def ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int = 0, reduction: _int = 1, zero_infinity: _bool = False) -> Tensor: ... +def cudnn_affine_grid_generator(theta: Tensor, N: _int, C: _int, H: _int, W: _int) -> Tensor: ... +def cudnn_batch_norm(input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, exponential_average_factor: _float, epsilon: _float) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +def cudnn_convolution(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, allow_tf32: _bool) -> Tensor: ... +def cudnn_convolution_add_relu(input: Tensor, weight: Tensor, z: Tensor, alpha: Optional[Union[Number, _complex]], bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def cudnn_convolution_relu(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def cudnn_convolution_transpose(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, allow_tf32: _bool) -> Tensor: ... +def cudnn_grid_sampler(input: Tensor, grid: Tensor) -> Tensor: ... +def cudnn_is_acceptable(input: Tensor) -> _bool: ... +@overload +def cummax(input: Tensor, dim: _int, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummax: ... +@overload +def cummax(input: Tensor, dim: Union[str, ellipsis, None], *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummax: ... +@overload +def cummin(input: Tensor, dim: _int, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummin: ... +@overload +def cummin(input: Tensor, dim: Union[str, ellipsis, None], *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummin: ... +@overload +def cumprod(input: Tensor, dim: _int, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def cumprod(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def cumsum(input: Tensor, dim: _int, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def cumsum(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def cumulative_trapezoid(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor: ... +@overload +def cumulative_trapezoid(y: Tensor, *, dx: Union[Number, _complex] = 1, dim: _int = -1) -> Tensor: ... +def deg2rad(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def deg2rad_(input: Tensor) -> Tensor: ... +@overload +def dequantize(input: Tensor) -> Tensor: ... +@overload +def dequantize(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ... +def det(input: Tensor) -> Tensor: ... +def detach(input: Tensor) -> Tensor: ... +def detach_(input: Tensor) -> Tensor: ... +def detach_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def diag(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ... +def diag_embed(input: Tensor, offset: _int = 0, dim1: _int = -2, dim2: _int = -1) -> Tensor: ... +def diagflat(input: Tensor, offset: _int = 0) -> Tensor: ... +@overload +def diagonal(input: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor: ... +@overload +def diagonal(input: Tensor, *, outdim: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None], dim2: Union[str, ellipsis, None], offset: _int = 0) -> Tensor: ... +def diagonal_copy(input: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1, *, out: Optional[Tensor] = None) -> Tensor: ... +def diagonal_scatter(input: Tensor, src: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor: ... +def diff(input: Tensor, n: _int = 1, dim: _int = -1, prepend: Optional[Tensor] = None, append: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor: ... +def digamma(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def dist(input: Tensor, other: Tensor, p: Union[Number, _complex] = 2) -> Tensor: ... +def div(input: Union[Tensor, Number], other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def divide(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def divide(input: Tensor, other: Tensor, *, rounding_mode: Optional[str], out: Optional[Tensor] = None) -> Tensor: ... +@overload +def divide(input: Tensor, other: Union[Number, _complex], *, rounding_mode: Optional[str]) -> Tensor: ... +@overload +def divide(input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +def dot(input: Tensor, tensor: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def dsmm(input: Tensor, mat2: Tensor) -> Tensor: ... +@overload +def dsplit(input: Tensor, sections: _int) -> List[Tensor]: ... +@overload +def dsplit(input: Tensor, indices: _size) -> List[Tensor]: ... +def dstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor: ... +def embedding(weight: Tensor, indices: Tensor, padding_idx: Union[_int, SymInt] = -1, scale_grad_by_freq: _bool = False, sparse: _bool = False) -> Tensor: ... +@overload +def embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool, mode: _int, sparse: _bool, per_sample_weights: Optional[Tensor], include_last_offset: _bool, padding_idx: Optional[_int]) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +@overload +def embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +def embedding_renorm_(input: Tensor, indices: Tensor, max_norm: _float, norm_type: _float) -> Tensor: ... +@overload +def empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def empty(*size: _int, memory_format: Optional[memory_format] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def empty_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def empty_permuted(size: Sequence[Union[_int, SymInt]], physical_layout: _size, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def empty_quantized(size: _size, qtensor: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def empty_strided(size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def eq(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def eq(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +def equal(input: Tensor, other: Tensor) -> _bool: ... +def erf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def erf_(input: Tensor) -> Tensor: ... +def erfc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def erfc_(input: Tensor) -> Tensor: ... +def erfinv(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def exp(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def exp2(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def exp2_(input: Tensor) -> Tensor: ... +def exp_(input: Tensor) -> Tensor: ... +def expand_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], *, implicit: _bool = False, out: Optional[Tensor] = None) -> Tensor: ... +def expm1(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def expm1_(input: Tensor) -> Tensor: ... +@overload +def eye(n: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def eye(n: Union[_int, SymInt], m: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def fake_quantize_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int) -> Tensor: ... +@overload +def fake_quantize_per_tensor_affine(input: Tensor, scale: _float, zero_point: _int, quant_min: _int, quant_max: _int) -> Tensor: ... +@overload +def fake_quantize_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int) -> Tensor: ... +def fbgemm_linear_fp16_weight(input: Tensor, packed_weight: Tensor, bias: Tensor) -> Tensor: ... +def fbgemm_linear_fp16_weight_fp32_activation(input: Tensor, packed_weight: Tensor, bias: Tensor) -> Tensor: ... +def fbgemm_linear_int8_weight(input: Tensor, weight: Tensor, packed: Tensor, col_offsets: Tensor, weight_scale: Union[Number, _complex], weight_zero_point: Union[Number, _complex], bias: Tensor) -> Tensor: ... +def fbgemm_linear_int8_weight_fp32_activation(input: Tensor, weight: Tensor, packed: Tensor, col_offsets: Tensor, weight_scale: Union[Number, _complex], weight_zero_point: Union[Number, _complex], bias: Tensor) -> Tensor: ... +def fbgemm_linear_quantize_weight(input: Tensor) -> Tuple[Tensor, Tensor, _float, _int]: ... +def fbgemm_pack_gemm_matrix_fp16(input: Tensor) -> Tensor: ... +@overload +def fbgemm_pack_quantized_matrix(input: Tensor) -> Tensor: ... +@overload +def fbgemm_pack_quantized_matrix(input: Tensor, K: _int, N: _int) -> Tensor: ... +def feature_alpha_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def feature_alpha_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def feature_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def feature_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ... +@overload +def fill(input: Tensor, value: Tensor) -> Tensor: ... +@overload +def fill(input: Tensor, value: Union[Number, _complex]) -> Tensor: ... +@overload +def fill_(input: Tensor, value: Tensor) -> Tensor: ... +@overload +def fill_(input: Tensor, value: Union[Number, _complex]) -> Tensor: ... +def fix(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def fix_(input: Tensor) -> Tensor: ... +@overload +def flatten(input: Tensor, start_dim: _int = 0, end_dim: _int = -1) -> Tensor: ... +@overload +def flatten(input: Tensor, start_dim: _int, end_dim: _int, out_dim: Union[str, ellipsis, None]) -> Tensor: ... +@overload +def flatten(input: Tensor, start_dim: Union[str, ellipsis, None], end_dim: Union[str, ellipsis, None], out_dim: Union[str, ellipsis, None]) -> Tensor: ... +@overload +def flatten(input: Tensor, dims: Sequence[Union[str, ellipsis, None]], out_dim: Union[str, ellipsis, None]) -> Tensor: ... +def flip(input: Tensor, dims: _size) -> Tensor: ... +def fliplr(input: Tensor) -> Tensor: ... +def flipud(input: Tensor) -> Tensor: ... +@overload +def float_power(input: Tensor, exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def float_power(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def float_power(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +def floor(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def floor_(input: Tensor) -> Tensor: ... +def floor_divide(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor] = None) -> Tensor: ... +def fmax(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def fmin(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def fmod(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def fmod(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +def frac(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def frac_(input: Tensor) -> Tensor: ... +def frexp(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.frexp: ... +def frobenius_norm(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +def from_file(filename: str, shared: Optional[_bool] = None, size: Optional[_int] = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def from_numpy(ndarray) -> Tensor: ... +def frombuffer(buffer: Any, *, dtype: _dtype, count: int = -1, offset: int = 0, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False) -> Tensor: ... +@overload +def full(size: _size, fill_value: Union[Number, _complex], *, out: Optional[Tensor] = None, layout: _layout = strided, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ... +@overload +def full(size: _size, fill_value: Union[Number, _complex], *, names: List[Union[str, None]], layout: _layout = strided, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ... +@overload +def full(size: Sequence[Union[_int, SymInt]], fill_value: Union[Number, _complex], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def full(size: _size, fill_value: Union[Number, _complex], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def full_like(input: Tensor, fill_value: Union[Number, _complex], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def fused_moving_avg_obs_fake_quant(input: Tensor, observer_on: Tensor, fake_quant_on: Tensor, running_min: Tensor, running_max: Tensor, scale: Tensor, zero_point: Tensor, averaging_const: _float, quant_min: _int, quant_max: _int, ch_axis: _int, per_row_fake_quant: _bool = False, symmetric_quant: _bool = False) -> Tensor: ... +@overload +def gather(input: Tensor, dim: _int, index: Tensor, *, sparse_grad: _bool = False, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def gather(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, *, sparse_grad: _bool = False, out: Optional[Tensor] = None) -> Tensor: ... +def gcd(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def gcd_(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def ge(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def ge(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +def geqrf(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.geqrf: ... +def ger(input: Tensor, vec2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def get_default_dtype() -> _dtype: ... +def get_num_interop_threads() -> _int: ... +def get_num_threads() -> _int: ... +@overload +def gradient(input: Tensor, *, spacing: Optional[Union[Number, _complex]] = None, dim: Optional[_int] = None, edge_order: _int = 1) -> List[Tensor]: ... +@overload +def gradient(input: Tensor, *, spacing: Sequence[Union[Number, _complex]], dim: Optional[_int] = None, edge_order: _int = 1) -> List[Tensor]: ... +@overload +def gradient(input: Tensor, *, spacing: Sequence[Union[Number, _complex]], dim: _size, edge_order: _int = 1) -> List[Tensor]: ... +@overload +def gradient(input: Tensor, *, spacing: Union[Tuple[Tensor, ...], List[Tensor]], dim: Optional[_int] = None, edge_order: _int = 1) -> List[Tensor]: ... +@overload +def gradient(input: Tensor, *, spacing: Union[Number, _complex], dim: _size, edge_order: _int = 1) -> List[Tensor]: ... +@overload +def gradient(input: Tensor, *, spacing: Union[Tuple[Tensor, ...], List[Tensor]], dim: _size, edge_order: _int = 1) -> List[Tensor]: ... +@overload +def gradient(input: Tensor, *, dim: _size, edge_order: _int = 1) -> List[Tensor]: ... +@overload +def greater(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def greater(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def greater_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def greater_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +def grid_sampler(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ... +def grid_sampler_2d(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ... +def grid_sampler_3d(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ... +def group_norm(input: Tensor, num_groups: _int, weight: Optional[Tensor] = None, bias: Optional[Tensor] = None, eps: _float = 1e-05, cudnn_enabled: _bool = True) -> Tensor: ... +@overload +def gru(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ... +@overload +def gru(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ... +def gru_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ... +@overload +def gt(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def gt(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def hamming_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def hamming_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def hamming_window(window_length: _int, periodic: _bool, alpha: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def hamming_window(window_length: _int, periodic: _bool, alpha: _float, beta: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def hann_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def hann_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def hardshrink(input: Tensor, lambd: Union[Number, _complex] = 0.5, *, out: Optional[Tensor] = None) -> Tensor: ... +def heaviside(input: Tensor, values: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def hinge_embedding_loss(input: Tensor, target: Tensor, margin: _float = 1.0, reduction: _int = 1) -> Tensor: ... +def histc(input: Tensor, bins: _int = 100, min: Union[Number, _complex] = 0, max: Union[Number, _complex] = 0, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def histogram(input: Tensor, bins: Tensor, *, weight: Optional[Tensor] = None, density: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.histogram: ... +@overload +def histogram(input: Tensor, bins: _int = 100, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.histogram: ... +@overload +def histogramdd(input: Tensor, bins: _int, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd: ... +@overload +def histogramdd(input: Tensor, bins: _size, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd: ... +@overload +def histogramdd(input: Tensor, bins: Union[Tuple[Tensor, ...], List[Tensor]], range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd: ... +def hsmm(input: Tensor, mat2: Tensor) -> Tensor: ... +@overload +def hsplit(input: Tensor, sections: _int) -> List[Tensor]: ... +@overload +def hsplit(input: Tensor, indices: _size) -> List[Tensor]: ... +def hspmm(mat1: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def hstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor: ... +def hypot(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def i0(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def i0_(input: Tensor) -> Tensor: ... +def igamma(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def igammac(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def imag(input: Tensor) -> Tensor: ... +@overload +def index_add(input: Tensor, dim: _int, index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def index_add(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ... +@overload +def index_copy(input: Tensor, dim: _int, index: Tensor, source: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def index_copy(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor: ... +@overload +def index_fill(input: Tensor, dim: _int, index: Tensor, value: Tensor) -> Tensor: ... +@overload +def index_fill(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor: ... +@overload +def index_fill(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor: ... +@overload +def index_fill(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor: ... +def index_put(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ... +def index_put_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ... +def index_reduce(input: Tensor, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool = True, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def index_select(input: Tensor, dim: _int, index: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def index_select(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def init_num_threads() -> None: ... +def inner(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def instance_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], use_input_stats: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tensor: ... +def int_repr(input: Tensor) -> Tensor: ... +def inverse(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def is_complex(input: Tensor) -> _bool: ... +def is_conj(input: Tensor) -> _bool: ... +def is_distributed(input: Tensor) -> _bool: ... +def is_floating_point(input: Tensor) -> _bool: ... +def is_grad_enabled() -> _bool: ... +def is_inference(input: Tensor) -> _bool: ... +def is_inference_mode_enabled() -> _bool: ... +def is_neg(input: Tensor) -> _bool: ... +def is_nonzero(input: Tensor) -> _bool: ... +def is_same_size(input: Tensor, other: Tensor) -> _bool: ... +def is_signed(input: Tensor) -> _bool: ... +def is_vulkan_available() -> _bool: ... +def isclose(input: Tensor, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> Tensor: ... +def isfinite(input: Tensor) -> Tensor: ... +@overload +def isin(elements: Tensor, test_elements: Tensor, *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def isin(element: Union[Number, _complex], test_elements: Tensor, *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def isin(elements: Tensor, test_element: Union[Number, _complex], *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor: ... +def isinf(input: Tensor) -> Tensor: ... +def isnan(input: Tensor) -> Tensor: ... +def isneginf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def isposinf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def isreal(input: Tensor) -> Tensor: ... +def istft(input: Tensor, n_fft: _int, hop_length: Optional[_int] = None, win_length: Optional[_int] = None, window: Optional[Tensor] = None, center: _bool = True, normalized: _bool = False, onesided: Optional[_bool] = None, length: Optional[_int] = None, return_complex: _bool = False) -> Tensor: ... +@overload +def kaiser_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def kaiser_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def kaiser_window(window_length: _int, periodic: _bool, beta: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def kl_div(input: Tensor, target: Tensor, reduction: _int = 1, *, log_target: _bool = False) -> Tensor: ... +def kron(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def kthvalue(input: Tensor, k: _int, dim: _int = -1, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.kthvalue: ... +@overload +def kthvalue(input: Tensor, k: _int, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.kthvalue: ... +def layer_norm(input: Tensor, normalized_shape: Sequence[Union[_int, SymInt]], weight: Optional[Tensor] = None, bias: Optional[Tensor] = None, eps: _float = 1e-05, cudnn_enable: _bool = True) -> Tensor: ... +def lcm(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def lcm_(input: Tensor, other: Tensor) -> Tensor: ... +def ldexp(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def ldexp_(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def le(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def le(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def lerp(input: Tensor, end: Tensor, weight: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def lerp(input: Tensor, end: Tensor, weight: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def less(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def less(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def less_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def less_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +def lgamma(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def linspace(start: Number, end: Number, steps: Optional[_int] = None, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ... +@overload +def linspace(start: Tensor, end: Tensor, steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def linspace(start: Union[Number, _complex], end: Tensor, steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def linspace(start: Tensor, end: Union[Number, _complex], steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def linspace(start: Union[Number, _complex], end: Union[Number, _complex], steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def log(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def log10(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def log10_(input: Tensor) -> Tensor: ... +def log1p(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def log1p_(input: Tensor) -> Tensor: ... +def log2(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def log2_(input: Tensor) -> Tensor: ... +def log_(input: Tensor) -> Tensor: ... +@overload +def log_softmax(input: Tensor, dim: _int, dtype: Optional[_dtype] = None, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def log_softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: ... +def logaddexp(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def logaddexp2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def logcumsumexp(input: Tensor, dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor: ... +def logdet(input: Tensor) -> Tensor: ... +def logical_and(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def logical_not(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def logical_or(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def logical_xor(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def logit(input: Tensor, eps: Optional[_float] = None, *, out: Optional[Tensor] = None) -> Tensor: ... +def logit_(input: Tensor, eps: Optional[_float] = None) -> Tensor: ... +@overload +def logspace(start: Number, end: Number, steps: Optional[_int] = None, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ... +@overload +def logspace(start: Tensor, end: Tensor, steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def logspace(start: Union[Number, _complex], end: Tensor, steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def logspace(start: Tensor, end: Union[Number, _complex], steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def logspace(start: Union[Number, _complex], end: Union[Number, _complex], steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def logsumexp(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def logsumexp(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def lstm(data: Tensor, batch_sizes: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor, Tensor]: ... +@overload +def lstm(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor, Tensor]: ... +def lstm_cell(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ... +@overload +def lt(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def lt(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +def lu_solve(input: Tensor, LU_data: Tensor, LU_pivots: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def lu_unpack(LU_data: Tensor, LU_pivots: Tensor, unpack_data: _bool = True, unpack_pivots: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.lu_unpack: ... +def margin_ranking_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: _float = 0.0, reduction: _int = 1) -> Tensor: ... +@overload +def masked_fill(input: Tensor, mask: Tensor, value: Tensor) -> Tensor: ... +@overload +def masked_fill(input: Tensor, mask: Tensor, value: Union[Number, _complex]) -> Tensor: ... +def masked_scatter(input: Tensor, mask: Tensor, source: Tensor) -> Tensor: ... +def masked_select(input: Tensor, mask: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def matmul(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def matrix_exp(input: Tensor) -> Tensor: ... +def matrix_power(input: Tensor, n: _int, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def max(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def max(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def max(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.max: ... +@overload +def max(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.max: ... +def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ... +def max_pool1d_with_indices(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tuple[Tensor, Tensor]: ... +def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ... +def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ... +def maximum(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def mean(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor: ... +@overload +def mean(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def median(input: Tensor) -> Tensor: ... +@overload +def median(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.median: ... +@overload +def median(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.median: ... +@overload +def min(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def min(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def min(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.min: ... +@overload +def min(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.min: ... +def minimum(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def miopen_batch_norm(input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, exponential_average_factor: _float, epsilon: _float) -> Tuple[Tensor, Tensor, Tensor]: ... +def miopen_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ... +def miopen_convolution_add_relu(input: Tensor, weight: Tensor, z: Tensor, alpha: Optional[Union[Number, _complex]], bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def miopen_convolution_relu(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def miopen_convolution_transpose(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ... +def miopen_depthwise_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ... +def miopen_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: _int, num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: _size, dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ... +def mkldnn_adaptive_avg_pool2d(input: Tensor, output_size: Union[_int, _size], *, out: Optional[Tensor] = None) -> Tensor: ... +def mkldnn_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def mkldnn_linear_backward_weights(grad_output: Tensor, input: Tensor, weight: Tensor, bias_defined: _bool) -> Tuple[Tensor, Tensor]: ... +def mkldnn_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ... +def mkldnn_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ... +def mkldnn_rnn_layer(input: Tensor, weight0: Tensor, weight1: Tensor, weight2: Tensor, weight3: Tensor, hx_: Tensor, cx_: Tensor, reverse: _bool, batch_sizes: _size, mode: _int, hidden_size: _int, num_layers: _int, has_biases: _bool, bidirectional: _bool, batch_first: _bool, train: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +def mm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def mode(input: Tensor, dim: _int = -1, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.mode: ... +@overload +def mode(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.mode: ... +@overload +def moveaxis(input: Tensor, source: _int, destination: _int) -> Tensor: ... +@overload +def moveaxis(input: Tensor, source: _size, destination: _size) -> Tensor: ... +@overload +def movedim(input: Tensor, source: _int, destination: _int) -> Tensor: ... +@overload +def movedim(input: Tensor, source: _size, destination: _size) -> Tensor: ... +def msort(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def mul(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor] = None) -> Tensor: ... +def multinomial(input: Tensor, num_samples: _int, replacement: _bool = False, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def multiply(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def multiply(input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +def mv(input: Tensor, vec: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def mvlgamma(input: Tensor, p: _int, *, out: Optional[Tensor] = None) -> Tensor: ... +def nan_to_num(input: Tensor, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None, *, out: Optional[Tensor] = None) -> Tensor: ... +def nan_to_num_(input: Tensor, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None) -> Tensor: ... +def nanmean(input: Tensor, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def nanmedian(input: Tensor) -> Tensor: ... +@overload +def nanmedian(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.nanmedian: ... +@overload +def nanmedian(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.nanmedian: ... +@overload +def nanquantile(input: Tensor, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor: ... +@overload +def nanquantile(input: Tensor, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor: ... +def nansum(input: Tensor, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def narrow(input: Tensor, dim: _int, start: Tensor, length: Union[_int, SymInt]) -> Tensor: ... +@overload +def narrow(input: Tensor, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt]) -> Tensor: ... +def narrow_copy(input: Tensor, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor: ... +def native_batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ... +def native_channel_shuffle(input: Tensor, groups: Union[_int, SymInt]) -> Tensor: ... +def native_dropout(input: Tensor, p: _float, train: Optional[_bool]) -> Tuple[Tensor, Tensor]: ... +def native_group_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], N: Union[_int, SymInt], C: Union[_int, SymInt], HxW: Union[_int, SymInt], group: _int, eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ... +def native_layer_norm(input: Tensor, normalized_shape: Sequence[Union[_int, SymInt]], weight: Optional[Tensor], bias: Optional[Tensor], eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ... +@overload +def native_norm(input: Tensor, p: Optional[Union[Number, _complex]], dim: Union[_int, _size], keepdim: _bool, dtype: Optional[_dtype]) -> Tensor: ... +@overload +def native_norm(input: Tensor, p: Union[Number, _complex] = 2) -> Tensor: ... +@overload +def ne(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def ne(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +def neg(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def neg_(input: Tensor) -> Tensor: ... +def negative(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def negative_(input: Tensor) -> Tensor: ... +def nextafter(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def nonzero(input: Tensor, *, as_tuple: Literal[False] = False, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def nonzero(input: Tensor, *, as_tuple: Literal[True]) -> Tuple[Tensor, ...]: ... +def nonzero_static(input: Tensor, *, size: _int, fill_value: _int = -1, out: Optional[Tensor] = None) -> Tensor: ... +def norm_except_dim(v: Tensor, pow: _int = 2, dim: _int = 0) -> Tensor: ... +@overload +def normal(mean: Tensor, std: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def normal(mean: Tensor, std: _float = 1, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def normal(mean: _float, std: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def normal(mean: _float, std: _float, size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def not_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def not_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def nuclear_norm(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def nuclear_norm(input: Tensor, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +def numel(self: Tensor) -> _int: ... +@overload +def ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def ones(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def ones_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def orgqr(input: Tensor, input2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def ormqr(input: Tensor, input2: Tensor, input3: Tensor, left: _bool = True, transpose: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +def outer(input: Tensor, vec2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def pairwise_distance(x1: Tensor, x2: Tensor, p: _float = 2, eps: _float = 1e-06, keepdim: _bool = False) -> Tensor: ... +def pdist(input: Tensor, p: _float = 2) -> Tensor: ... +def permute(input: Tensor, dims: _size) -> Tensor: ... +def permute_copy(input: Tensor, dims: _size, *, out: Optional[Tensor] = None) -> Tensor: ... +def pinverse(input: Tensor, rcond: _float = 1e-15) -> Tensor: ... +def pixel_shuffle(input: Tensor, upscale_factor: _int) -> Tensor: ... +def pixel_unshuffle(input: Tensor, downscale_factor: _int) -> Tensor: ... +def poisson(input: Tensor, generator: Optional[Generator] = None) -> Tensor: ... +def poisson_nll_loss(input: Tensor, target: Tensor, log_input: _bool, full: _bool, eps: _float, reduction: _int) -> Tensor: ... +def polar(abs: Tensor, angle: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def polygamma(n: _int, input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def positive(input: Tensor) -> Tensor: ... +@overload +def pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def pow(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def pow(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +def prelu(input: Tensor, weight: Tensor) -> Tensor: ... +@overload +def prod(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor: ... +@overload +def prod(input: Tensor, dim: _int, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def prod(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ... +def promote_types(type1: _dtype, type2: _dtype) -> _dtype: ... +def put(input: Tensor, index: Tensor, source: Tensor, accumulate: _bool = False) -> Tensor: ... +def q_per_channel_axis(input: Tensor) -> _int: ... +def q_per_channel_scales(input: Tensor) -> Tensor: ... +def q_per_channel_zero_points(input: Tensor) -> Tensor: ... +def q_scale(input: Tensor) -> _float: ... +def q_zero_point(input: Tensor) -> _int: ... +def qr(input: Tensor, some: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.qr: ... +@overload +def quantile(input: Tensor, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor: ... +@overload +def quantile(input: Tensor, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor: ... +def quantize_per_channel(input: Tensor, scales: Tensor, zero_points: Tensor, axis: _int, dtype: _dtype) -> Tensor: ... +@overload +def quantize_per_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, dtype: _dtype) -> Tensor: ... +@overload +def quantize_per_tensor(input: Tensor, scale: _float, zero_point: _int, dtype: _dtype) -> Tensor: ... +@overload +def quantize_per_tensor(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scales: Tensor, zero_points: Tensor, dtype: _dtype) -> List[Tensor]: ... +def quantize_per_tensor_dynamic(input: Tensor, dtype: _dtype, reduce_range: _bool) -> Tensor: ... +def quantized_batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], mean: Tensor, var: Tensor, eps: _float, output_scale: _float, output_zero_point: _int) -> Tensor: ... +def quantized_gru_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ... +def quantized_lstm_cell(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tuple[Tensor, Tensor]: ... +def quantized_max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ... +def quantized_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ... +def quantized_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ... +def quantized_rnn_relu_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ... +def quantized_rnn_tanh_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ... +def rad2deg(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def rad2deg_(input: Tensor) -> Tensor: ... +@overload +def rand(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def rand(*size: _int, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def rand(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def rand(*size: _int, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def rand(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def rand(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def rand(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def rand(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def rand_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def randint(low: _int, high: _int, size: _size, *, generator: Optional[Generator] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ... +@overload +def randint(high: _int, size: _size, *, generator: Optional[Generator] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ... +@overload +def randint(high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def randint(high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def randint(low: Union[_int, SymInt], high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def randint(low: Union[_int, SymInt], high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def randint_like(input: Tensor, high: Union[_int, SymInt], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def randint_like(input: Tensor, low: Union[_int, SymInt], high: Union[_int, SymInt], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def randn(*size: _int, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def randn(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def randn(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def randn_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def randperm(n: Union[_int, SymInt], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def randperm(n: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def range(start: Number, end: Number, step: Number = 1, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ... +def ravel(input: Tensor) -> Tensor: ... +def real(input: Tensor) -> Tensor: ... +def reciprocal(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def reciprocal_(input: Tensor) -> Tensor: ... +def relu(input: Tensor) -> Tensor: ... +def relu_(input: Tensor) -> Tensor: ... +@overload +def remainder(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def remainder(self: Union[Number, _complex], other: Tensor) -> Tensor: ... +@overload +def remainder(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +def renorm(input: Tensor, p: Union[Number, _complex], dim: _int, maxnorm: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def repeat_interleave(input: Tensor, repeats: Tensor, dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor: ... +@overload +def repeat_interleave(repeats: Tensor, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor: ... +@overload +def repeat_interleave(input: Tensor, repeats: Union[_int, SymInt], dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor: ... +def reshape(input: Tensor, shape: Sequence[Union[_int, SymInt]]) -> Tensor: ... +def resize_as_(input: Tensor, the_template: Tensor, *, memory_format: Optional[memory_format] = None) -> Tensor: ... +def resize_as_sparse_(input: Tensor, the_template: Tensor) -> Tensor: ... +def resolve_conj(input: Tensor) -> Tensor: ... +def resolve_neg(input: Tensor) -> Tensor: ... +@overload +def result_type(tensor: Tensor, other: Tensor) -> _dtype: ... +@overload +def result_type(scalar: Union[Number, _complex], tensor: Tensor) -> _dtype: ... +@overload +def result_type(tensor: Tensor, other: Union[Number, _complex]) -> _dtype: ... +@overload +def result_type(scalar1: Union[Number, _complex], scalar2: Union[Number, _complex]) -> _dtype: ... +@overload +def rnn_relu(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ... +@overload +def rnn_relu(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ... +def rnn_relu_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ... +@overload +def rnn_tanh(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ... +@overload +def rnn_tanh(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ... +def rnn_tanh_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ... +def roll(input: Tensor, shifts: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]], dims: Union[_int, _size] = ()) -> Tensor: ... +def rot90(input: Tensor, k: _int = 1, dims: _size = (0,1)) -> Tensor: ... +@overload +def round(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def round(input: Tensor, *, decimals: _int, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def round_(input: Tensor) -> Tensor: ... +@overload +def round_(input: Tensor, *, decimals: _int) -> Tensor: ... +def row_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def row_stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor: ... +def rrelu(input: Tensor, lower: Union[Number, _complex] = 0.125, upper: Union[Number, _complex] = 0.3333333333333333, training: _bool = False, generator: Optional[Generator] = None) -> Tensor: ... +def rrelu_(input: Tensor, lower: Union[Number, _complex] = 0.125, upper: Union[Number, _complex] = 0.3333333333333333, training: _bool = False, generator: Optional[Generator] = None) -> Tensor: ... +def rsqrt(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def rsqrt_(input: Tensor) -> Tensor: ... +@overload +def rsub(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ... +@overload +def rsub(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ... +def saddmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Number = 1, alpha: Number = 1, out: Optional[Tensor] = None) -> Tensor: ... +def scalar_tensor(s: Union[Number, _complex], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def scatter(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, reduce: str, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def scatter(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def scatter(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex], *, reduce: str, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def scatter(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: ... +@overload +def scatter(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def scatter(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor: ... +@overload +def scatter_add(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def scatter_add(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: ... +def scatter_reduce(input: Tensor, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool = True, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def searchsorted(sorted_sequence: Tensor, input: Tensor, *, out_int32: _bool = False, right: _bool = False, side: Optional[str] = None, sorter: Optional[Tensor] = None, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def searchsorted(sorted_sequence: Tensor, self: Union[Number, _complex], *, out_int32: _bool = False, right: _bool = False, side: Optional[str] = None, sorter: Optional[Tensor] = None, out: Optional[Tensor] = None) -> Tensor: ... +def segment_reduce(data: Tensor, reduce: str, *, lengths: Optional[Tensor] = None, indices: Optional[Tensor] = None, offsets: Optional[Tensor] = None, axis: _int = 0, unsafe: _bool = False, initial: Optional[Union[Number, _complex]] = None) -> Tensor: ... +@overload +def select(input: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor: ... +@overload +def select(input: Tensor, dim: Union[str, ellipsis, None], index: _int) -> Tensor: ... +def select_copy(input: Tensor, dim: _int, index: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor: ... +def select_scatter(input: Tensor, src: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor: ... +def selu(input: Tensor) -> Tensor: ... +def selu_(input: Tensor) -> Tensor: ... +def set_flush_denormal(mode: _bool) -> _bool: ... +def set_num_interop_threads(num: _int) -> None: ... +def set_num_threads(num: _int) -> None: ... +def sgn(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def sigmoid(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def sigmoid_(input: Tensor) -> Tensor: ... +def sign(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def signbit(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def sin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def sin_(input: Tensor) -> Tensor: ... +def sinc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def sinc_(input: Tensor) -> Tensor: ... +def sinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def sinh_(input: Tensor) -> Tensor: ... +def slice_copy(input: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1, *, out: Optional[Tensor] = None) -> Tensor: ... +def slice_scatter(input: Tensor, src: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1) -> Tensor: ... +def slogdet(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.slogdet: ... +def smm(input: Tensor, mat2: Tensor) -> Tensor: ... +@overload +def softmax(input: Tensor, dim: _int, dtype: Optional[_dtype] = None, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: ... +@overload +def sort(input: Tensor, *, stable: Optional[_bool], dim: _int = -1, descending: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort: ... +@overload +def sort(input: Tensor, dim: _int = -1, descending: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort: ... +@overload +def sort(input: Tensor, *, stable: Optional[_bool], dim: Union[str, ellipsis, None], descending: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort: ... +@overload +def sort(input: Tensor, dim: Union[str, ellipsis, None], descending: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort: ... +def sparse_bsc_tensor(ccol_indices: Union[Tensor, List], row_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor: ... +def sparse_bsr_tensor(crow_indices: Union[Tensor, List], col_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor: ... +def sparse_compressed_tensor(compressed_indices: Union[Tensor, List], plain_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor: ... +def sparse_coo_tensor(indices: Tensor, values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None, is_coalesced: Optional[_bool] = None) -> Tensor: ... +def sparse_csc_tensor(ccol_indices: Union[Tensor, List], row_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor: ... +def sparse_csr_tensor(crow_indices: Union[Tensor, List], col_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor: ... +def split_copy(input: Tensor, split_size: Union[_int, SymInt], dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None: ... +def split_with_sizes(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> List[Tensor]: ... +def split_with_sizes_copy(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None: ... +def spmm(input: Tensor, mat2: Tensor) -> Tensor: ... +def sqrt(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def sqrt_(input: Tensor) -> Tensor: ... +def square(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def square_(input: Tensor) -> Tensor: ... +@overload +def squeeze(input: Tensor) -> Tensor: ... +@overload +def squeeze(input: Tensor, dim: _int) -> Tensor: ... +@overload +def squeeze(input: Tensor, dim: _size) -> Tensor: ... +@overload +def squeeze(input: Tensor, dim: Union[str, ellipsis, None]) -> Tensor: ... +@overload +def squeeze_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def squeeze_copy(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def squeeze_copy(input: Tensor, dim: _size, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def sspaddmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor) -> Tensor: ... +@overload +def sspaddmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def sspaddmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor) -> Tensor: ... +def stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def std(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def std(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def std(input: Tensor, unbiased: _bool = True) -> Tensor: ... +@overload +def std(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def std(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def std_mean(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ... +@overload +def std_mean(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ... +@overload +def std_mean(input: Tensor, unbiased: _bool = True) -> Tuple[Tensor, Tensor]: ... +@overload +def std_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ... +@overload +def std_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ... +@overload +def sub(input: Union[Tensor, Number], other: Union[Tensor, Number], *, alpha: Optional[Number] = 1, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def sub(self: Tensor, alpha: Union[Number, _complex], other: Tensor) -> Tensor: ... +@overload +def sub(self: Tensor, alpha: Union[Number, _complex], other: Tensor, *, out: Tensor) -> Tensor: ... +@overload +def subtract(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def subtract(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ... +@overload +def sum(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor: ... +@overload +def sum(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def sum(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ... +def svd(input: Tensor, some: _bool = True, compute_uv: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.svd: ... +def swapaxes(input: Tensor, axis0: _int, axis1: _int) -> Tensor: ... +def swapdims(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ... +def sym_constrain_range(size: Union[Number, _complex], *, min: Optional[_int] = None, max: Optional[_int] = None) -> None: ... +def sym_constrain_range_for_size(size: Union[Number, _complex], *, min: Optional[_int] = None, max: Optional[_int] = None) -> None: ... +def t(input: Tensor) -> Tensor: ... +def t_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def take(input: Tensor, index: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def take_along_dim(input: Tensor, indices: Tensor, dim: Optional[_int] = None, *, out: Optional[Tensor] = None) -> Tensor: ... +def tan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def tan_(input: Tensor) -> Tensor: ... +def tanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def tanh_(input: Tensor) -> Tensor: ... +def tensor(data: Any, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ... +@overload +def tensor_split(input: Tensor, tensor_indices_or_sections: Tensor, dim: _int = 0) -> List[Tensor]: ... +@overload +def tensor_split(input: Tensor, sections: Union[_int, SymInt], dim: _int = 0) -> List[Tensor]: ... +@overload +def tensor_split(input: Tensor, indices: Sequence[Union[_int, SymInt]], dim: _int = 0) -> List[Tensor]: ... +def threshold(input: Tensor, threshold: Union[Number, _complex], value: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +def threshold_(input: Tensor, threshold: Union[Number, _complex], value: Union[Number, _complex]) -> Tensor: ... +def tile(input: Tensor, dims: Sequence[Union[_int, SymInt]]) -> Tensor: ... +def topk(input: Tensor, k: Union[_int, SymInt], dim: _int = -1, largest: _bool = True, sorted: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.topk: ... +def trace(input: Tensor) -> Tensor: ... +@overload +def transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ... +@overload +def transpose(input: Tensor, dim0: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None]) -> Tensor: ... +def transpose_copy(input: Tensor, dim0: _int, dim1: _int, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def trapezoid(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor: ... +@overload +def trapezoid(y: Tensor, *, dx: Union[Number, _complex] = 1, dim: _int = -1) -> Tensor: ... +@overload +def trapz(y: Tensor, *, dx: _float = 1, dim: _int = -1) -> Tensor: ... +@overload +def trapz(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor: ... +def triangular_solve(input: Tensor, A: Tensor, upper: _bool = True, transpose: _bool = False, unitriangular: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.triangular_solve: ... +def tril(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ... +def tril_indices(row: _int, col: _int, offset: _int = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def triplet_margin_loss(anchor: Tensor, positive: Tensor, negative: Tensor, margin: _float = 1.0, p: _float = 2, eps: _float = 1e-06, swap: _bool = False, reduction: _int = 1) -> Tensor: ... +def triu(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ... +def triu_indices(row: _int, col: _int, offset: _int = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def true_divide(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor] = None) -> Tensor: ... +def trunc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def trunc_(input: Tensor) -> Tensor: ... +@overload +def unbind(input: Tensor, dim: _int = 0) -> List[Tensor]: ... +@overload +def unbind(input: Tensor, dim: Union[str, ellipsis, None]) -> List[Tensor]: ... +def unbind_copy(input: Tensor, dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None: ... +@overload +def unflatten(input: Tensor, dim: Union[str, ellipsis, None], sizes: Sequence[Union[_int, SymInt]], names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ... +@overload +def unflatten(input: Tensor, dim: _int, sizes: Sequence[Union[_int, SymInt]]) -> Tensor: ... +def unfold_copy(input: Tensor, dimension: _int, size: _int, step: _int, *, out: Optional[Tensor] = None) -> Tensor: ... +def unique_dim(input: Tensor, dim: _int, sorted: _bool = True, return_inverse: _bool = False, return_counts: _bool = False) -> Tuple[Tensor, Tensor, Tensor]: ... +def unsafe_chunk(input: Tensor, chunks: _int, dim: _int = 0) -> List[Tensor]: ... +def unsafe_split(input: Tensor, split_size: Union[_int, SymInt], dim: _int = 0) -> List[Tensor]: ... +def unsafe_split_with_sizes(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> List[Tensor]: ... +def unsqueeze(input: Tensor, dim: _int) -> Tensor: ... +def unsqueeze_copy(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor: ... +def values_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def vander(x: Tensor, N: Optional[_int] = None, increasing: _bool = False) -> Tensor: ... +@overload +def var(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def var(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def var(input: Tensor, unbiased: _bool = True) -> Tensor: ... +@overload +def var(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def var(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def var_mean(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ... +@overload +def var_mean(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ... +@overload +def var_mean(input: Tensor, unbiased: _bool = True) -> Tuple[Tensor, Tensor]: ... +@overload +def var_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ... +@overload +def var_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ... +def vdot(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def view_as_complex(input: Tensor) -> Tensor: ... +def view_as_complex_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def view_as_real(input: Tensor) -> Tensor: ... +def view_as_real_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def view_copy(input: Tensor, dtype: _dtype, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def view_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def vsplit(input: Tensor, sections: _int) -> List[Tensor]: ... +@overload +def vsplit(input: Tensor, indices: _size) -> List[Tensor]: ... +def vstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def where(condition: Tensor) -> List[Tensor]: ... +@overload +def where(condition: Tensor, input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def where(condition: Tensor, self: Union[Number, _complex], other: Tensor) -> Tensor: ... +@overload +def where(condition: Tensor, input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +@overload +def where(condition: Tensor, self: Union[Number, _complex], other: Union[Number, _complex]) -> Tensor: ... +@overload +def xlogy(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def xlogy(self: Union[Number, _complex], other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def xlogy(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def xlogy_(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def xlogy_(input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +def zero_(input: Tensor) -> Tensor: ... +@overload +def zeros(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def zeros(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def zeros(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def zeros(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def zeros_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... + +__all__ = ['__and__', '__lshift__', '__or__', '__rshift__', '__xor__', '_adaptive_avg_pool2d', + '_adaptive_avg_pool3d', '_add_batch_dim', '_add_relu', '_add_relu_', '_addmm_activation', + '_aminmax', '_amp_foreach_non_finite_check_and_unscale_', '_amp_update_scale_', '_assert_async', + '_assert_tensor_metadata', '_batch_norm_impl_index', '_cast_Byte', '_cast_Char', '_cast_Double', + '_cast_Float', '_cast_Half', '_cast_Int', '_cast_Long', '_cast_Short', + '_choose_qparams_per_tensor', '_coalesce', '_compute_linear_combination', '_conj', '_conj_copy', + '_conj_physical', '_convert_indices_from_coo_to_csr', '_convert_indices_from_csr_to_coo', + '_convert_weight_to_int4pack', '_convolution', '_convolution_mode', '_copy_from', + '_copy_from_and_resize', '_cslt_compress', '_cslt_sparse_mm', '_ctc_loss', '_cudnn_ctc_loss', + '_cudnn_init_dropout_state', '_cudnn_rnn', '_cudnn_rnn_flatten_weight', '_cufft_clear_plan_cache', + '_cufft_get_plan_cache_max_size', '_cufft_get_plan_cache_size', '_cufft_set_plan_cache_max_size', + '_cummax_helper', '_cummin_helper', '_debug_has_internal_overlap', '_dim_arange', + '_dirichlet_grad', '_disable_functionalization', '_efficientzerotensor', '_embedding_bag', + '_embedding_bag_forward_only', '_empty_affine_quantized', '_empty_per_channel_affine_quantized', + '_enable_functionalization', '_euclidean_dist', '_fake_quantize_learnable_per_channel_affine', + '_fake_quantize_learnable_per_tensor_affine', + '_fake_quantize_per_tensor_affine_cachemask_tensor_qparams', + '_fake_quantize_per_tensor_affine_cachemask_tensor_qparams', '_fft_c2c', '_fft_c2r', '_fft_r2c', + '_fill_mem_eff_dropout_mask_', '_foobar', '_foreach_abs', '_foreach_abs_', '_foreach_acos', + '_foreach_acos_', '_foreach_add', '_foreach_add_', '_foreach_addcdiv', '_foreach_addcdiv_', + '_foreach_addcmul', '_foreach_addcmul_', '_foreach_asin', '_foreach_asin_', '_foreach_atan', + '_foreach_atan_', '_foreach_ceil', '_foreach_ceil_', '_foreach_clamp_max', '_foreach_clamp_max_', + '_foreach_clamp_min', '_foreach_clamp_min_', '_foreach_copy_', '_foreach_cos', '_foreach_cos_', + '_foreach_cosh', '_foreach_cosh_', '_foreach_div', '_foreach_div_', '_foreach_erf', + '_foreach_erf_', '_foreach_erfc', '_foreach_erfc_', '_foreach_exp', '_foreach_exp_', + '_foreach_expm1', '_foreach_expm1_', '_foreach_floor', '_foreach_floor_', '_foreach_frac', + '_foreach_frac_', '_foreach_lerp', '_foreach_lerp_', '_foreach_lgamma', '_foreach_lgamma_', + '_foreach_log', '_foreach_log10', '_foreach_log10_', '_foreach_log1p', '_foreach_log1p_', + '_foreach_log2', '_foreach_log2_', '_foreach_log_', '_foreach_maximum', '_foreach_maximum_', + '_foreach_minimum', '_foreach_minimum_', '_foreach_mul', '_foreach_mul_', '_foreach_neg', + '_foreach_neg_', '_foreach_norm', '_foreach_pow', '_foreach_pow_', '_foreach_reciprocal', + '_foreach_reciprocal_', '_foreach_round', '_foreach_round_', '_foreach_sigmoid', + '_foreach_sigmoid_', '_foreach_sign', '_foreach_sign_', '_foreach_sin', '_foreach_sin_', + '_foreach_sinh', '_foreach_sinh_', '_foreach_sqrt', '_foreach_sqrt_', '_foreach_sub', + '_foreach_sub_', '_foreach_tan', '_foreach_tan_', '_foreach_tanh', '_foreach_tanh_', + '_foreach_trunc', '_foreach_trunc_', '_foreach_zero_', '_from_functional_tensor', + '_functional_assert_async', '_functional_sym_constrain_range', + '_functional_sym_constrain_range_for_size', + '_functionalize_are_all_mutations_hidden_from_autograd', + '_functionalize_are_all_mutations_under_no_grad_or_inference_mode', '_functionalize_commit_update', + '_functionalize_mark_mutation_hidden_from_autograd', '_functionalize_replace', + '_functionalize_sync', '_fused_adam_', '_fused_adamw_', '_fused_dropout', + '_fused_moving_avg_obs_fq_helper', '_fused_moving_avg_obs_fq_helper', '_fused_sdp_choice', + '_fw_primal_copy', '_grid_sampler_2d_cpu_fallback', '_has_compatible_shallow_copy_type', + '_histogramdd_bin_edges', '_histogramdd_from_bin_cts', '_histogramdd_from_bin_tensors', + '_index_put_impl_', '_indices_copy', '_int_mm', '_is_all_true', '_is_any_true', + '_is_functional_tensor', '_is_zerotensor', '_linalg_check_errors', '_linalg_det', '_linalg_det', + '_linalg_eigh', '_linalg_eigh', '_linalg_slogdet', '_linalg_slogdet', '_linalg_solve_ex', + '_linalg_solve_ex', '_linalg_svd', '_linalg_svd', '_log_softmax', '_log_softmax_backward_data', + '_logcumsumexp', '_lstm_mps', '_lu_with_info', '_lu_with_info', '_make_dep_token', '_make_dual', + '_make_dual_copy', '_make_per_channel_quantized_tensor', '_make_per_tensor_quantized_tensor', + '_masked_scale', '_masked_softmax', '_mixed_dtypes_linear', '_mkldnn_reshape', '_mkldnn_transpose', + '_mkldnn_transpose_', '_mps_convolution', '_mps_convolution_transpose', '_native_batch_norm_legit', + '_native_batch_norm_legit_no_training', '_native_multi_head_attention', '_neg_view', + '_neg_view_copy', '_nested_from_padded', '_nested_from_padded_and_nested_example', + '_nested_tensor_from_mask', '_nested_tensor_from_mask_left_aligned', + '_nested_tensor_from_tensor_list', '_nested_tensor_softmax_with_shape', '_nested_view_from_buffer', + '_nested_view_from_buffer_copy', '_nnpack_available', '_nnpack_spatial_convolution', + '_pack_padded_sequence', '_pad_packed_sequence', '_pin_memory', '_prelu_kernel', + '_propagate_xla_data', '_remove_batch_dim', '_reshape_alias_copy', '_reshape_from_tensor', + '_resize_output_', '_rowwise_prune', '_sample_dirichlet', '_saturate_weight_to_fp16', + '_scaled_dot_product_attention_math', '_scaled_dot_product_efficient_attention', + '_scaled_dot_product_efficient_attention', '_scaled_dot_product_flash_attention', + '_scaled_dot_product_flash_attention', '_scaled_mm', '_shape_as_tensor', '_sobol_engine_draw', + '_sobol_engine_ff_', '_sobol_engine_initialize_state_', '_sobol_engine_scramble_', '_softmax', + '_softmax_backward_data', '_sparse_broadcast_to', '_sparse_broadcast_to_copy', '_sparse_csr_prod', + '_sparse_csr_sum', '_sparse_log_softmax_backward_data', '_sparse_semi_structured_linear', + '_sparse_softmax_backward_data', '_sparse_sparse_matmul', '_sparse_sum', '_stack', + '_standard_gamma', '_standard_gamma_grad', '_sync', '_test_autograd_multiple_dispatch', + '_test_autograd_multiple_dispatch_view', '_test_autograd_multiple_dispatch_view_copy', + '_test_check_tensor', '_test_functorch_fallback', '_test_serialization_subcmul', '_to_cpu', + '_to_functional_tensor', '_to_sparse_semi_structured', '_transform_bias_rescale_qkv', + '_transformer_encoder_layer_fwd', '_trilinear', '_triton_multi_head_attention', + '_triton_scaled_dot_attention', '_unique', '_unique2', '_unpack_dual', '_unpack_dual', + '_unsafe_index', '_unsafe_index_put', '_use_cudnn_ctc_loss', '_use_cudnn_rnn_flatten_weight', + '_validate_compressed_sparse_indices', '_validate_sparse_bsc_tensor_args', + '_validate_sparse_bsr_tensor_args', '_validate_sparse_compressed_tensor_args', + '_validate_sparse_coo_tensor_args', '_validate_sparse_csc_tensor_args', + '_validate_sparse_csr_tensor_args', '_values_copy', '_weight_int4pack_mm', '_weight_norm', + '_weight_norm_interface', 'abs', 'abs_', 'absolute', 'acos', 'acos_', 'acosh', 'acosh_', + 'adaptive_avg_pool1d', 'adaptive_max_pool1d', 'add', 'addbmm', 'addcdiv', 'addcmul', 'addmm', + 'addmv', 'addmv_', 'addr', 'adjoint', 'affine_grid_generator', 'alias_copy', 'all', 'allclose', + 'alpha_dropout', 'alpha_dropout_', 'amax', 'amin', 'aminmax', 'aminmax', 'angle', 'any', 'arange', + 'arccos', 'arccos_', 'arccosh', 'arccosh_', 'arcsin', 'arcsin_', 'arcsinh', 'arcsinh_', 'arctan', + 'arctan2', 'arctan_', 'arctanh', 'arctanh_', 'argmax', 'argmin', 'argsort', 'argwhere', + 'as_strided', 'as_strided_', 'as_strided_copy', 'as_strided_scatter', 'as_tensor', 'asarray', + 'asin', 'asin_', 'asinh', 'asinh_', 'atan', 'atan2', 'atan_', 'atanh', 'atanh_', 'avg_pool1d', + 'baddbmm', 'bartlett_window', 'batch_norm', 'batch_norm_backward_elemt', + 'batch_norm_backward_reduce', 'batch_norm_elemt', 'batch_norm_gather_stats', + 'batch_norm_gather_stats_with_counts', 'batch_norm_stats', 'batch_norm_update_stats', 'bernoulli', + 'bilinear', 'binary_cross_entropy_with_logits', 'bincount', 'binomial', 'bitwise_and', + 'bitwise_left_shift', 'bitwise_not', 'bitwise_or', 'bitwise_right_shift', 'bitwise_xor', + 'blackman_window', 'bmm', 'broadcast_to', 'bucketize', 'can_cast', 'cat', 'ccol_indices_copy', + 'ceil', 'ceil_', 'celu', 'celu_', 'channel_shuffle', 'cholesky', 'cholesky_inverse', + 'cholesky_solve', 'choose_qparams_optimized', 'chunk', 'clamp', 'clamp_', 'clamp_max', + 'clamp_max_', 'clamp_min', 'clamp_min_', 'clip', 'clip_', 'clone', 'col_indices_copy', + 'column_stack', 'combinations', 'complex', 'concat', 'concatenate', 'conj', 'conj_physical', + 'conj_physical_', 'constant_pad_nd', 'conv1d', 'conv2d', 'conv3d', 'conv_tbc', 'conv_transpose1d', + 'conv_transpose2d', 'conv_transpose3d', 'convolution', 'copysign', 'corrcoef', 'cos', 'cos_', + 'cosh', 'cosh_', 'cosine_embedding_loss', 'cosine_similarity', 'count_nonzero', 'cov', 'cross', + 'crow_indices_copy', 'ctc_loss', 'cudnn_affine_grid_generator', 'cudnn_batch_norm', + 'cudnn_convolution', 'cudnn_convolution_add_relu', 'cudnn_convolution_relu', + 'cudnn_convolution_transpose', 'cudnn_grid_sampler', 'cudnn_is_acceptable', 'cummax', 'cummax', + 'cummin', 'cummin', 'cumprod', 'cumsum', 'cumulative_trapezoid', 'deg2rad', 'deg2rad_', + 'dequantize', 'det', 'detach', 'detach_', 'detach_copy', 'diag', 'diag_embed', 'diagflat', + 'diagonal', 'diagonal_copy', 'diagonal_scatter', 'diff', 'digamma', 'dist', 'div', 'divide', 'dot', + 'dropout', 'dropout_', 'dsmm', 'dsplit', 'dstack', 'embedding', 'embedding_bag', + 'embedding_renorm_', 'empty', 'empty_like', 'empty_permuted', 'empty_quantized', 'empty_strided', + 'eq', 'equal', 'erf', 'erf_', 'erfc', 'erfc_', 'erfinv', 'exp', 'exp2', 'exp2_', 'exp_', + 'expand_copy', 'expm1', 'expm1_', 'eye', 'fake_quantize_per_channel_affine', + 'fake_quantize_per_tensor_affine', 'fbgemm_linear_fp16_weight', + 'fbgemm_linear_fp16_weight_fp32_activation', 'fbgemm_linear_int8_weight', + 'fbgemm_linear_int8_weight_fp32_activation', 'fbgemm_linear_quantize_weight', + 'fbgemm_pack_gemm_matrix_fp16', 'fbgemm_pack_quantized_matrix', 'feature_alpha_dropout', + 'feature_alpha_dropout_', 'feature_dropout', 'feature_dropout_', 'fill', 'fill_', 'fix', 'fix_', + 'flatten', 'flip', 'fliplr', 'flipud', 'float_power', 'floor', 'floor_', 'floor_divide', 'fmax', + 'fmin', 'fmod', 'frac', 'frac_', 'frexp', 'frexp', 'frobenius_norm', 'from_file', 'from_numpy', + 'frombuffer', 'full', 'full_like', 'fused_moving_avg_obs_fake_quant', 'gather', 'gcd', 'gcd_', + 'ge', 'geqrf', 'geqrf', 'ger', 'get_default_dtype', 'get_num_interop_threads', 'get_num_threads', + 'gradient', 'greater', 'greater_equal', 'grid_sampler', 'grid_sampler_2d', 'grid_sampler_3d', + 'group_norm', 'gru', 'gru_cell', 'gt', 'hamming_window', 'hann_window', 'hardshrink', 'heaviside', + 'hinge_embedding_loss', 'histc', 'histogram', 'histogram', 'histogramdd', 'histogramdd', 'hsmm', + 'hsplit', 'hspmm', 'hstack', 'hypot', 'i0', 'i0_', 'igamma', 'igammac', 'imag', 'index_add', + 'index_copy', 'index_fill', 'index_put', 'index_put_', 'index_reduce', 'index_select', + 'indices_copy', 'init_num_threads', 'inner', 'instance_norm', 'int_repr', 'inverse', 'is_complex', + 'is_conj', 'is_distributed', 'is_floating_point', 'is_grad_enabled', 'is_inference', + 'is_inference_mode_enabled', 'is_neg', 'is_nonzero', 'is_same_size', 'is_signed', + 'is_vulkan_available', 'isclose', 'isfinite', 'isin', 'isinf', 'isnan', 'isneginf', 'isposinf', + 'isreal', 'istft', 'kaiser_window', 'kl_div', 'kron', 'kthvalue', 'kthvalue', 'layer_norm', 'lcm', + 'lcm_', 'ldexp', 'ldexp_', 'le', 'lerp', 'less', 'less_equal', 'lgamma', 'linspace', 'log', + 'log10', 'log10_', 'log1p', 'log1p_', 'log2', 'log2_', 'log_', 'log_softmax', 'logaddexp', + 'logaddexp2', 'logcumsumexp', 'logdet', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', + 'logit', 'logit_', 'logspace', 'logsumexp', 'lstm', 'lstm_cell', 'lt', 'lu_solve', 'lu_unpack', + 'lu_unpack', 'margin_ranking_loss', 'masked_fill', 'masked_scatter', 'masked_select', 'matmul', + 'matrix_exp', 'matrix_power', 'max', 'max', 'max_pool1d', 'max_pool1d_with_indices', 'max_pool2d', + 'max_pool3d', 'maximum', 'mean', 'median', 'median', 'min', 'min', 'minimum', 'miopen_batch_norm', + 'miopen_convolution', 'miopen_convolution_add_relu', 'miopen_convolution_relu', + 'miopen_convolution_transpose', 'miopen_depthwise_convolution', 'miopen_rnn', + 'mkldnn_adaptive_avg_pool2d', 'mkldnn_convolution', 'mkldnn_linear_backward_weights', + 'mkldnn_max_pool2d', 'mkldnn_max_pool3d', 'mkldnn_rnn_layer', 'mm', 'mode', 'mode', 'moveaxis', + 'movedim', 'msort', 'mul', 'multinomial', 'multiply', 'mv', 'mvlgamma', 'nan_to_num', + 'nan_to_num_', 'nanmean', 'nanmedian', 'nanmedian', 'nanquantile', 'nansum', 'narrow', + 'narrow_copy', 'native_batch_norm', 'native_channel_shuffle', 'native_dropout', + 'native_group_norm', 'native_layer_norm', 'native_norm', 'ne', 'neg', 'neg_', 'negative', + 'negative_', 'nextafter', 'nonzero', 'nonzero_static', 'norm_except_dim', 'normal', 'not_equal', + 'nuclear_norm', 'numel', 'ones', 'ones_like', 'orgqr', 'ormqr', 'outer', 'pairwise_distance', + 'pdist', 'permute', 'permute_copy', 'pinverse', 'pixel_shuffle', 'pixel_unshuffle', 'poisson', + 'poisson_nll_loss', 'polar', 'polygamma', 'positive', 'pow', 'prelu', 'prod', 'promote_types', + 'put', 'q_per_channel_axis', 'q_per_channel_scales', 'q_per_channel_zero_points', 'q_scale', + 'q_zero_point', 'qr', 'qr', 'quantile', 'quantize_per_channel', 'quantize_per_tensor', + 'quantize_per_tensor_dynamic', 'quantized_batch_norm', 'quantized_gru_cell', 'quantized_lstm_cell', + 'quantized_max_pool1d', 'quantized_max_pool2d', 'quantized_max_pool3d', 'quantized_rnn_relu_cell', + 'quantized_rnn_tanh_cell', 'rad2deg', 'rad2deg_', 'rand', 'rand_like', 'randint', 'randint_like', + 'randn', 'randn_like', 'randperm', 'range', 'ravel', 'real', 'reciprocal', 'reciprocal_', 'relu', + 'relu_', 'remainder', 'renorm', 'repeat_interleave', 'reshape', 'resize_as_', 'resize_as_sparse_', + 'resolve_conj', 'resolve_neg', 'result_type', 'rnn_relu', 'rnn_relu_cell', 'rnn_tanh', + 'rnn_tanh_cell', 'roll', 'rot90', 'round', 'round_', 'row_indices_copy', 'row_stack', 'rrelu', + 'rrelu_', 'rsqrt', 'rsqrt_', 'rsub', 'saddmm', 'scalar_tensor', 'scatter', 'scatter_add', + 'scatter_reduce', 'searchsorted', 'segment_reduce', 'select', 'select_copy', 'select_scatter', + 'selu', 'selu_', 'set_flush_denormal', 'set_num_interop_threads', 'set_num_threads', 'sgn', + 'sigmoid', 'sigmoid_', 'sign', 'signbit', 'sin', 'sin_', 'sinc', 'sinc_', 'sinh', 'sinh_', + 'slice_copy', 'slice_scatter', 'slogdet', 'slogdet', 'smm', 'softmax', 'sort', 'sort', + 'sparse_bsc_tensor', 'sparse_bsr_tensor', 'sparse_compressed_tensor', 'sparse_coo_tensor', + 'sparse_csc_tensor', 'sparse_csr_tensor', 'split_copy', 'split_with_sizes', + 'split_with_sizes_copy', 'spmm', 'sqrt', 'sqrt_', 'square', 'square_', 'squeeze', 'squeeze_copy', + 'sspaddmm', 'stack', 'std', 'std_mean', 'sub', 'subtract', 'sum', 'svd', 'svd', 'swapaxes', + 'swapdims', 'sym_constrain_range', 'sym_constrain_range_for_size', 't', 't_copy', 'take', + 'take_along_dim', 'tan', 'tan_', 'tanh', 'tanh_', 'tensor', 'tensor_split', 'threshold', + 'threshold_', 'tile', 'topk', 'topk', 'trace', 'transpose', 'transpose_copy', 'trapezoid', 'trapz', + 'triangular_solve', 'triangular_solve', 'tril', 'tril_indices', 'triplet_margin_loss', 'triu', + 'triu_indices', 'true_divide', 'trunc', 'trunc_', 'unbind', 'unbind_copy', 'unflatten', + 'unfold_copy', 'unique_dim', 'unsafe_chunk', 'unsafe_split', 'unsafe_split_with_sizes', + 'unsqueeze', 'unsqueeze_copy', 'values_copy', 'vander', 'var', 'var_mean', 'vdot', + 'view_as_complex', 'view_as_complex_copy', 'view_as_real', 'view_as_real_copy', 'view_copy', + 'vsplit', 'vstack', 'where', 'xlogy', 'xlogy_', 'zero_', 'zeros', 'zeros_like'] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C/_autograd.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_C/_autograd.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4b62950fe85c131f0faf83562d5822ea68133298 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_C/_autograd.pyi @@ -0,0 +1,123 @@ +from enum import Enum +from typing import Any, Callable, List, Optional, Set + +import torch + +from ._profiler import ( + _ProfilerEvent, + ActiveProfilerType, + ProfilerActivity, + ProfilerConfig, +) + +# Defined in tools/autograd/init.cpp + +class DeviceType(Enum): + CPU = ... + CUDA = ... + MKLDNN = ... + OPENGL = ... + OPENCL = ... + IDEEP = ... + HIP = ... + FPGA = ... + ORT = ... + XLA = ... + MPS = ... + HPU = ... + Meta = ... + Vulkan = ... + Metal = ... + PrivateUse1 = ... + +class ProfilerEvent: + def cpu_elapsed_us(self, other: ProfilerEvent) -> float: ... + def cpu_memory_usage(self) -> int: ... + def cuda_elapsed_us(self, other: ProfilerEvent) -> float: ... + def privateuse1_elapsed_us(self, other: ProfilerEvent) -> float: ... + def cuda_memory_usage(self) -> int: ... + def device(self) -> int: ... + def handle(self) -> int: ... + def has_cuda(self) -> bool: ... + def is_remote(self) -> bool: ... + def kind(self) -> int: ... + def name(self) -> str: ... + def node_id(self) -> int: ... + def sequence_nr(self) -> int: ... + def shapes(self) -> List[List[int]]: ... + def thread_id(self) -> int: ... + def flops(self) -> float: ... + def is_async(self) -> bool: ... + +class _KinetoEvent: + def name(self) -> str: ... + def device_index(self) -> int: ... + def start_us(self) -> int: ... + def duration_us(self) -> int: ... + def is_async(self) -> bool: ... + def linked_correlation_id(self) -> int: ... + def shapes(self) -> List[List[int]]: ... + def dtypes(self) -> List[str]: ... + def concrete_inputs(self) -> List[Any]: ... + def device_type(self) -> DeviceType: ... + def start_thread_id(self) -> int: ... + def end_thread_id(self) -> int: ... + def correlation_id(self) -> int: ... + def fwd_thread_id(self) -> int: ... + def stack(self) -> List[str]: ... + def scope(self) -> int: ... + def sequence_nr(self) -> int: ... + def flops(self) -> int: ... + def cuda_elapsed_us(self) -> int: ... + def privateuse1_elapsed_us(self) -> int: ... + +class _ProfilerResult: + def events(self) -> List[_KinetoEvent]: ... + def legacy_events(self) -> List[List[ProfilerEvent]]: ... + def save(self, path: str) -> None: ... + def experimental_event_tree(self) -> List[_ProfilerEvent]: ... + def trace_start_us(self) -> int: ... + +class SavedTensor: ... + +def _enable_profiler( + config: ProfilerConfig, + activities: Set[ProfilerActivity], +) -> None: ... +def _prepare_profiler( + config: ProfilerConfig, + activities: Set[ProfilerActivity], +) -> None: ... +def _disable_profiler() -> _ProfilerResult: ... +def _profiler_enabled() -> bool: ... +def _add_metadata_json(key: str, value: str) -> None: ... +def _kineto_step() -> None: ... +def _get_sequence_nr() -> int: ... +def kineto_available() -> bool: ... +def _record_function_with_args_enter(name: str, *args) -> torch.Tensor: ... +def _record_function_with_args_exit(handle: torch.Tensor) -> None: ... +def _supported_activities() -> Set[ProfilerActivity]: ... +def _enable_record_function(enable: bool) -> None: ... +def _set_empty_test_observer(is_global: bool, sampling_prob: float) -> None: ... +def _push_saved_tensors_default_hooks( + pack_hook: Callable[[torch.Tensor], Any], + unpack_hook: Callable[[Any], torch.Tensor], +) -> None: ... +def _pop_saved_tensors_default_hooks() -> None: ... +def _unsafe_set_version_counter(t: torch.Tensor, prev_version: int) -> None: ... +def _enable_profiler_legacy(config: ProfilerConfig) -> None: ... +def _disable_profiler_legacy() -> List[List[ProfilerEvent]]: ... +def _profiler_type() -> ActiveProfilerType: ... +def _saved_tensors_hooks_enable() -> None: ... +def _saved_tensors_hooks_disable(message: str) -> None: ... +def _saved_tensors_hooks_get_disabled_error_message() -> Optional[str]: ... + +class CreationMeta(Enum): + DEFAULT = ... + IN_CUSTOM_FUNCTION = ... + MULTI_OUTPUT_NODE = ... + NO_GRAD_MODE = ... + INFERENCE_MODE = ... + +def _set_creation_meta(t: torch.Tensor, creation_meta: CreationMeta) -> None: ... +def _get_creation_meta(t: torch.Tensor) -> CreationMeta: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C/_cpu.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_C/_cpu.pyi new file mode 100644 index 0000000000000000000000000000000000000000..075fecf45d5a239849dd276a5ca79b4d30ed6120 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_C/_cpu.pyi @@ -0,0 +1,5 @@ +from torch.types import _bool + +# Defined in torch/csrc/cpu/Module.cpp + +def _is_cpu_support_vnni() -> _bool: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C/_distributed_autograd.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_C/_distributed_autograd.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f4c91304a1b1a83c24bfbfd108dc234aaa3615bc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_C/_distributed_autograd.pyi @@ -0,0 +1,26 @@ +from typing import Any, Dict, List, Set + +import torch + +# This module is defined in torch/csrc/distributed/autograd/init.cpp + +class DistAutogradContext: + def _context_id(self) -> int: ... + def _recv_functions(self) -> Dict[int, Any]: ... + def _send_functions(self) -> Dict[int, Any]: ... + def _known_worker_ids(self) -> Set[int]: ... + +def _new_context() -> DistAutogradContext: ... +def _release_context(context_id: int) -> None: ... +def _get_max_id() -> int: ... +def _is_valid_context(worker_id: int) -> bool: ... +def _retrieve_context(context_id: int) -> DistAutogradContext: ... +def _current_context() -> DistAutogradContext: ... +def _init(worker_id: int) -> None: ... +def _get_debug_info() -> Dict[str, str]: ... +def backward( + context_id: int, + roots: List[torch.Tensor], + retain_graph=False, +) -> None: ... +def get_gradients(context_id: int) -> Dict[torch.Tensor, torch.Tensor]: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C/_distributed_c10d.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_C/_distributed_c10d.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8f3153d67a9380b96a26a48944062d06640bf33c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_C/_distributed_c10d.pyi @@ -0,0 +1,478 @@ +# mypy: disable-error-code="type-arg" +from datetime import timedelta +from enum import Enum +from typing import Any, Dict, List, Optional, overload, Tuple, Union + +from torch import Tensor +from torch._C import ScriptObject +from torch.futures import Future + +# This module is defined in torch/csrc/distributed/c10d/init.cpp + +_DEFAULT_FIRST_BUCKET_BYTES: int +_DEFAULT_NO_TIMEOUT: timedelta +_DEFAULT_PG_TIMEOUT: timedelta +_DEFAULT_PG_NCCL_TIMEOUT: timedelta + +class BuiltinCommHookType(Enum): + ALLREDUCE = ... + FP16_COMPRESS = ... + +def _register_comm_hook(reducer: Reducer, state: Any, comm_hook: Any): ... +def _register_builtin_comm_hook( + reducer: Reducer, + comm_hook_type: BuiltinCommHookType, +): ... + +class GradBucket: + def index(self) -> int: ... + def buffer(self) -> Tensor: ... + def gradients(self) -> List[Tensor]: ... + def is_last(self) -> bool: ... + def set_buffer(self, tensor: Tensor) -> None: ... + def parameters(self) -> List[Tensor]: ... + +class Reducer: + def __init__( + self, + params: List[Tensor], + bucket_indices: List[List[int]], + per_bucket_size_limits: List[int], + process_group: ProcessGroup, + expect_sparse_gradients: List[bool] = ..., + bucket_bytes_cap: int = ..., # kDefaultBucketBytesCap in reducer.hpp + find_unused_parameters: bool = ..., + gradient_as_bucket_view: bool = ..., + param_to_name_mapping: Dict[int, str] = ..., + first_bucket_types_cap: int = ..., # kDefaultFirstBucketBytes in reducer.hpp + ): ... + def prepare_for_forward(self) -> None: ... + def prepare_for_backward(self, output: List[Tensor]) -> None: ... + def get_backward_stats(self) -> List[int]: ... + def _install_post_backward_futures(self, futures: List[Future]) -> None: ... + def _rebuild_buckets(self) -> bool: ... + def _get_zeros_like_grad_buckets(self) -> List[GradBucket]: ... + def _push_all_rebuilt_params(self) -> None: ... + def _set_forward_pass_work_handle( + self, + work: Work, + use_static_world_size: bool, + ): ... + def _get_local_used_map(self) -> Tensor: ... + def _set_ddp_runtime_logging_sample_rate(self, sample_rate: int) -> None: ... + def _set_static_graph(self) -> None: ... + def _run_comm_hook(self, bucket: GradBucket) -> Future: ... + def set_logger(self, logger: Logger) -> None: ... + def _remove_autograd_hooks(self) -> None: ... + def _check_reducer_finalized(self) -> None: ... + def _set_sparse_metadata(self, global_unique_ids: Dict[str, Tensor]) -> None: ... + def _reset_state(self) -> None: ... + def _update_process_group(self, new_process_group: ProcessGroup) -> None: ... + +class DDPLoggingData: + strs_map: Dict[str, str] + ints_map: Dict[str, int] + +class Logger: + def __init__(self, reducer: Reducer): ... + def set_construction_data_and_log( + self, + module_name: str, + device_ids: List[int], + output_device: int, + broadcast_buffers: bool, + has_sync_bn: bool, + static_graph: bool, + ): ... + def set_runtime_stats_and_log(self) -> None: ... + def set_error_and_log(self, error: str) -> None: ... + def _get_ddp_logging_data(self) -> DDPLoggingData: ... + def _set_comm_hook_name(self, comm_hook: str) -> None: ... + def _set_uneven_input_join(self) -> None: ... + def _set_static_graph(self) -> None: ... + +def get_debug_level(): ... +def set_debug_level(): ... +def set_debug_level_from_env(): ... + +class DebugLevel(Enum): + OFF = ... + INFO = ... + DETAIL = ... + +class ReduceOp: + def __init__(self, op: RedOpType): ... + + SUM: RedOpType = ... + AVG: RedOpType = ... + PRODUCT: RedOpType = ... + MIN: RedOpType = ... + MAX: RedOpType = ... + BAND: RedOpType = ... + BOR: RedOpType = ... + BXOR: RedOpType = ... + PREMUL_SUM: RedOpType = ... + UNUSED: RedOpType = ... + + class RedOpType(Enum): ... + +class BroadcastOptions: + rootRank: int + rootTensor: int + timeout: timedelta + asyncOp: bool + +class AllreduceOptions: + reduceOp: ReduceOp + timeout: timedelta + +class AllreduceCoalescedOptions(AllreduceOptions): ... + +class ReduceOptions: + reduceOp: ReduceOp + rootRank: int + rootTensor: int + timeout: timedelta + +class AllgatherOptions: + timeout: timedelta + asyncOp: bool + +class GatherOptions: + rootRank: int + timeout: timedelta + +class ScatterOptions: + rootRank: int + timeout: timedelta + asyncOp: bool + +class ReduceScatterOptions: + reduceOp: ReduceOp + timeout: timedelta + asyncOp: bool + +class BarrierOptions: + device_ids: List[int] + timeout: timedelta + +class AllToAllOptions: + timeout: timedelta + +class Store: + def set(self, key: str, value: str): ... + def get(self, key: str) -> bytes: ... + def add(self, key: str, value: int) -> int: ... + def compare_set( + self, + key: str, + expected_value: str, + desired_value: str, + ) -> bytes: ... + def delete_key(self, key: str) -> bool: ... + def num_keys(self) -> int: ... + def set_timeout(self, timeout: timedelta): ... + @overload + def wait(self, keys: List[str]): ... + @overload + def wait(self, keys: List[str], timeout: timedelta): ... + +class FileStore(Store): + def __init__(self, path: str, numWorkers: int = ...): ... + +class HashStore(Store): + def __init__(self): ... + +class TCPStore(Store): + def __init__( + self, + host_name: str, + port: int, + world_size: Optional[int] = ..., + is_master: bool = ..., + timeout: timedelta = ..., + wait_for_workers: bool = ..., + multi_tenant: bool = ..., + master_listen_fd: Optional[int] = ..., + use_libuv: Optional[bool] = ..., + ): ... + @property + def host(self) -> str: ... + @property + def port(self) -> int: ... + +class PrefixStore(Store): + def __init__(self, prefix: str, store: Store): ... + @property + def underlying_store(self) -> Store: ... + +class Work: + def is_completed(self) -> bool: ... + def is_success(self) -> bool: ... + def exception(self) -> Any: ... + def wait(self, timeout: timedelta = ...) -> bool: ... + def source_rank(self) -> int: ... + def _source_rank(self) -> int: ... + def result(self) -> List[Tensor]: ... + def synchronize(self): ... + def boxed(self) -> ScriptObject: ... + @staticmethod + def unbox(obj: ScriptObject) -> Work: ... + +class ProcessGroup: + class Options: ... + + def __init__(self): ... + def rank(self) -> int: ... + def size(self) -> int: ... + @overload + def broadcast( + self, + tensors: List[Tensor], + opts=..., + ) -> Work: ... + @overload + def broadcast( + self, + tensor: Tensor, + root: int, + ) -> Work: ... + @overload + def allreduce( + self, + tensors: List[Tensor], + opts: AllreduceOptions = ..., + ) -> Work: ... + @overload + def allreduce( + self, + tensors: List[Tensor], + op=..., + ) -> Work: ... + @overload + def allreduce( + self, + tensor: Tensor, + op=..., + ) -> Work: ... + def allreduce_coalesced( + self, + tensors: List[Tensor], + opts=..., + ) -> Work: ... + @overload + def reduce( + self, + tensors: List[Tensor], + opts=..., + ) -> Work: ... + @overload + def reduce( + self, + tensor: Tensor, + root: int, + op=..., + ) -> Work: ... + @overload + def allgather( + self, + output_tensors: List[List[Tensor]], + input_tensors: List[Tensor], + opts=..., + ) -> Work: ... + @overload + def allgather( + self, + output_tensors: List[Tensor], + input_tensor: Tensor, + ) -> Work: ... + def _allgather_base( + self, + output: Tensor, + input: Tensor, + opts=..., + ) -> Work: ... + def allgather_coalesced( + self, + output_lists: List[List[Tensor]], + input_list: List[Tensor], + opts=..., + ) -> Work: ... + @overload + def gather( + self, + output_tensors: List[List[Tensor]], + input_tensors: List[Tensor], + opts=..., + ) -> Work: ... + @overload + def gather( + self, + output_tensors: List[Tensor], + input_tensor: Tensor, + root: int, + ) -> Work: ... + @overload + def scatter( + self, + output_tensors: List[Tensor], + input_tensors: List[List[Tensor]], + opts=..., + ) -> Work: ... + @overload + def scatter( + self, + output_tensor: Tensor, + input_tensors: List[Tensor], + root: int, + ) -> Work: ... + @overload + def reduce_scatter( + self, + output_tensors: List[Tensor], + input_tensors: List[List[Tensor]], + opts=..., + ) -> Work: ... + @overload + def reduce_scatter( + self, + output_tensors: Tensor, + input_tensor: List[Tensor], + ) -> Work: ... + def _reduce_scatter_base( + self, + outputTensor: Tensor, + inputTensor: Tensor, + ) -> Work: ... + @overload + def alltoall_base( + self, + output_tensor: Tensor, + input_tensor: Tensor, + output_split_sizes: List[int], + input_split_sizes: List[int], + opts=..., + ) -> Work: ... + @overload + def alltoall_base( + self, + output: Tensor, + input: Tensor, + output_split_sizes: List[int], + input_split_sizes: List[int], + ) -> Work: ... + @overload + def alltoall( + self, + output_tensor: List[Tensor], + input_tensor: List[Tensor], + opts=..., + ) -> Work: ... + @overload + def alltoall( + self, + output: List[Tensor], + input: List[Tensor], + ) -> Work: ... + def send( + self, + tensors: List[Tensor], + dstRank: int, + tag: int, + ) -> Work: ... + def recv( + self, + tensors: List[Tensor], + srcRank: int, + tag: int, + ) -> Work: ... + def recv_anysource(self, tensors: List[Tensor], tag: int) -> Work: ... + def barrier(self, opts=...) -> Work: ... + def boxed(self) -> ScriptObject: ... + @staticmethod + def unbox(obj: ScriptObject) -> ProcessGroup: ... + +class ProcessGroupRoundRobin(ProcessGroup): ... + +def _round_robin_process_groups( + process_groups: List[ProcessGroup], +) -> ProcessGroupRoundRobin: ... + +class ProcessGroupGloo(ProcessGroup): + class Device: ... + class Options: ... + + def __init__( + self, + store: Store, + rank: int, + size: int, + timeout: timedelta, + ): ... + @staticmethod + def create_device(hostname="", interface="") -> Device: ... + @staticmethod + def create_default_device() -> Device: ... + +class _ProcessGroupWrapper(ProcessGroup): + def __init__(self, pg: ProcessGroup, gloo_pg: ProcessGroupGloo): ... + wrapped_pg: ProcessGroup + +class ProcessGroupNCCL(ProcessGroup): + class Options: ... + + def __init__( + self, + store: Store, + rank: int, + size: int, + timeout: timedelta, + ): ... + def _group_start(self) -> None: ... + def _group_end(self) -> None: ... + +class ProcessGroupUCC(ProcessGroup): + def __init__( + self, + store: Store, + rank: int, + size: int, + timeout: timedelta, + ): ... + +class ProcessGroupMPI(ProcessGroup): + def __init__( + self, + rank: int, + size: int, + pgComm: int, + ): ... + @staticmethod + def create(ranks: List[int]) -> ProcessGroupMPI: ... + +def _compute_bucket_assignment_by_size( + tensors: List[Tensor], + bucket_size_limits: List[int], + expect_sparse_gradient: List[bool] = ..., + tensor_indices: List[int] = ..., +) -> Tuple[List[List[int]], List[int]]: ... +def _broadcast_coalesced( + process_group: ProcessGroup, + tensors: List[Tensor], + buffer_size: int, + src: int, +): ... +def _test_python_store(store: Store): ... +def _verify_params_across_processes( + process_group: ProcessGroup, + params: List[Tensor], + logger: Optional[Logger], +): ... +def _make_nccl_premul_sum(factor: Union[float, List[Tensor]]) -> ReduceOp: ... + +class Backend: + def __init__( + self, + rank: int, + size: int, + ): ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C/_distributed_rpc_testing.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_C/_distributed_rpc_testing.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1ed8304bc6378bb1fbd5e2d15369fe66969acae4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_C/_distributed_rpc_testing.pyi @@ -0,0 +1,35 @@ +from typing import Dict, List + +import torch + +from ._distributed_c10d import Store +from ._distributed_rpc import _TensorPipeRpcBackendOptionsBase, TensorPipeAgent + +# This module is defined in torch/csrc/distributed/rpc/testing/init.cpp + +class FaultyTensorPipeRpcBackendOptions(_TensorPipeRpcBackendOptionsBase): + def __init__( + self, + num_worker_threads: int, + rpc_timeout: float, + init_method: str, + messages_to_fail: List[str], + messages_to_delay: Dict[str, float], + num_fail_sends: int, + ): ... + num_send_recv_threads: int + messages_to_fail: List[str] + messages_to_delay: Dict[str, float] + num_fail_sends: int + +class FaultyTensorPipeAgent(TensorPipeAgent): + def __init__( + self, + store: Store, + name: str, + rank: int, + world_size: int, + options: FaultyTensorPipeRpcBackendOptions, + reverse_device_maps: Dict[str, Dict[torch.device, torch.device]], + devices: List[torch.device], + ): ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C/_functorch.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_C/_functorch.pyi new file mode 100644 index 0000000000000000000000000000000000000000..60c9b4b3ce472e5b87458ac354e806632fc5b210 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_C/_functorch.pyi @@ -0,0 +1,71 @@ +from enum import Enum +from typing import Optional, Tuple + +from torch import Tensor + +# Defined in torch/csrc/functorch/init.cpp + +def _set_dynamic_layer_keys_included(included: bool) -> None: ... +def get_unwrapped(tensor: Tensor) -> Tensor: ... +def is_batchedtensor(tensor: Tensor) -> bool: ... +def is_functionaltensor(tensor: Tensor) -> bool: ... +def is_functorch_wrapped_tensor(tensor: Tensor) -> bool: ... +def is_gradtrackingtensor(tensor: Tensor) -> bool: ... +def maybe_get_bdim(tensor: Tensor) -> int: ... +def maybe_get_level(tensor: Tensor) -> int: ... +def unwrap_if_dead(tensor: Tensor) -> Tensor: ... +def _unwrap_for_grad(tensor: Tensor, level: int) -> Tensor: ... +def _wrap_for_grad(tensor: Tensor, level: int) -> Tensor: ... +def _unwrap_batched(tensor: Tensor, level: int) -> Tuple[Tensor, Optional[int]]: ... +def current_level() -> int: ... +def _add_batch_dim(tensor: Tensor, bdim: int, level: int) -> Tensor: ... +def set_single_level_autograd_function_allowed(allowed: bool) -> None: ... +def get_single_level_autograd_function_allowed() -> bool: ... +def _unwrap_functional_tensor(tensor: Tensor, reapply_views: bool) -> Tensor: ... +def _wrap_functional_tensor(tensor: Tensor, level: int) -> Tensor: ... + +# Defined in aten/src/ATen/functorch/Interpreter.h +class TransformType(Enum): + Torch: TransformType = ... + Vmap: TransformType = ... + Grad: TransformType = ... + Jvp: TransformType = ... + Functionalize: TransformType = ... + +class RandomnessType(Enum): + Error: TransformType = ... + Same: TransformType = ... + Different: TransformType = ... + +class CInterpreter: + def key(self) -> TransformType: ... + def level(self) -> int: ... + +class CGradInterpreterPtr: + def __init__(self, interpreter: CInterpreter): ... + def lift(self, Tensor) -> Tensor: ... + def prevGradMode(self) -> bool: ... + +class CJvpInterpreterPtr: + def __init__(self, interpreter: CInterpreter): ... + def lift(self, Tensor) -> Tensor: ... + def prevFwdGradMode(self) -> bool: ... + +class CFunctionalizeInterpreterPtr: + def __init__(self, interpreter: CInterpreter): ... + def key(self) -> TransformType: ... + def level(self) -> int: ... + def functionalizeAddBackViews(self) -> bool: ... + +class CVmapInterpreterPtr: + def __init__(self, interpreter: CInterpreter): ... + def key(self) -> TransformType: ... + def level(self) -> int: ... + def batchSize(self) -> int: ... + def randomness(self) -> RandomnessType: ... + +class DynamicLayer: ... + +def peek_interpreter_stack() -> CInterpreter: ... +def pop_dynamic_layer_stack() -> DynamicLayer: ... +def push_dynamic_layer_stack(dl: DynamicLayer) -> int: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C/_itt.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_C/_itt.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8a54437f527b994821133532f26598c951631b28 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_C/_itt.pyi @@ -0,0 +1,5 @@ +# Defined in torch/csrc/itt.cpp +def is_available() -> None: ... +def rangePush(message: str) -> None: ... +def rangePop() -> None: ... +def mark(message: str) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C/_lazy.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_C/_lazy.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ceaaedee210298b6448ee485ac0e433dc79dd450 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_C/_lazy.pyi @@ -0,0 +1,28 @@ +from typing import List + +from torch import Tensor + +# defined in torch/csrc/lazy/python/init.cpp +def _mark_step(device: str, devices: List[str], wait: bool): ... +def _wait_device_ops(devices: List[str]): ... +def _reset_metrics(): ... +def _counter_names() -> List[str]: ... +def _counter_value(name: str) -> int: ... +def _metrics_report() -> str: ... +def _get_graph_hash(tensors: List[Tensor]) -> str: ... +def _sync_multi( + tensors: List[Tensor], + devices: List[str], + wait: bool = True, + sync_ltc_data: bool = True, +): ... +def _get_tensor_id(tensor: Tensor) -> int: ... +def _get_tensors_text(tensors: List[Tensor]) -> str: ... +def _get_tensors_dot(tensors: List[Tensor]) -> str: ... +def _get_tensors_backend(tensors: List[Tensor]) -> str: ... +def _get_force_fallback() -> str: ... +def _set_force_fallback(newval: str): ... +def _clear_ir_cache(): ... +def _dump_ir_cache(filename: str): ... +def _set_reuse_ir(val: bool): ... +def _get_default_device_type(): ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C/_lazy_ts_backend.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_C/_lazy_ts_backend.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ce833c5ec2e4543f7c668d63ba0cb1cc8de72b5e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_C/_lazy_ts_backend.pyi @@ -0,0 +1,11 @@ +# defined in torch/csrc/lazy/python/init.cpp + +from typing import Any, List, Tuple + +from torch import Tensor + +def _init(): ... +def _get_tensors_ts_device_data_node( + tensors: List[Tensor], +) -> Tuple[List[int], List[Any]]: ... +def _run_cached_graph(hash_str: str, graph_inputs: List[Any]) -> List[Tensor]: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C/_monitor.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_C/_monitor.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9950a9e8c30a4b38f69ca9627e3980e948dfc213 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_C/_monitor.pyi @@ -0,0 +1,44 @@ +# Defined in torch/csrc/monitor/python_init.cpp + +import datetime +from enum import Enum +from typing import Callable, Dict, List, Union + +class Aggregation(Enum): + VALUE = ... + MEAN = ... + COUNT = ... + SUM = ... + MAX = ... + MIN = ... + +class Stat: + name: str + count: int + def __init__( + self, + name: str, + aggregations: List[Aggregation], + window_size: int, + max_samples: int = -1, + ) -> None: ... + def add(self, v: float) -> None: ... + def get(self) -> Dict[Aggregation, float]: ... + +class Event: + name: str + timestamp: datetime.datetime + data: Dict[str, Union[int, float, bool, str]] + def __init__( + self, + name: str, + timestamp: datetime.datetime, + data: Dict[str, Union[int, float, bool, str]], + ) -> None: ... + +def log_event(e: Event) -> None: ... + +class EventHandlerHandle: ... + +def register_event_handler(handler: Callable[[Event], None]) -> EventHandlerHandle: ... +def unregister_event_handler(handle: EventHandlerHandle) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C/_profiler.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_C/_profiler.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f1b8cda073b0da319d015539c19f80fe459560b9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_C/_profiler.pyi @@ -0,0 +1,238 @@ +from enum import Enum +from typing import Any, Dict, List, Literal, Optional, Tuple, Union + +from torch._C import device, dtype, layout +from typing_extensions import TypeAlias + +# defined in torch/csrc/profiler/python/init.cpp + +class RecordScope(Enum): + FUNCTION = ... + BACKWARD_FUNCTION = ... + TORCHSCRIPT_FUNCTION = ... + KERNEL_FUNCTION_DTYPE = ... + CUSTOM_CLASS = ... + BUILD_FEATURE = ... + LITE_INTERPRETER = ... + USER_SCOPE = ... + STATIC_RUNTIME_OP = ... + STATIC_RUNTIME_MODEL = ... + +class ProfilerState(Enum): + Disable = ... + CPU = ... + CUDA = ... + NVTX = ... + ITT = ... + KINETO = ... + KINETO_GPU_FALLBACK = ... + KINETO_PRIVATEUSE1_FALLBACK = ... + KINETO_PRIVATEUSE1 = ... + +class ActiveProfilerType(Enum): + NONE = ... + LEGACY = ... + KINETO = ... + NVTX = ... + ITT = ... + +class ProfilerActivity(Enum): + CPU = ... + CUDA = ... + MTIA = ... + PrivateUse1 = ... + +class _EventType(Enum): + TorchOp = ... + Backend = ... + Allocation = ... + OutOfMemory = ... + PyCall = ... + PyCCall = ... + Kineto = ... + +class _ExperimentalConfig: + def __init__( + self, + profiler_metrics: List[str] = ..., + profiler_measure_per_kernel: bool = ..., + verbose: bool = ..., + performance_events: List[str] = ..., + enable_cuda_sync_events: bool = ..., + ) -> None: ... + +class ProfilerConfig: + def __init__( + self, + state: ProfilerState, + report_input_shapes: bool, + profile_memory: bool, + with_stack: bool, + with_flops: bool, + with_modules: bool, + experimental_config: _ExperimentalConfig, + ) -> None: ... + +class _ProfilerEvent: + start_tid: int + start_time_ns: int + children: List[_ProfilerEvent] + + # TODO(robieta): remove in favor of `self.typed` + extra_fields: Union[ + _ExtraFields_TorchOp, + _ExtraFields_Backend, + _ExtraFields_Allocation, + _ExtraFields_OutOfMemory, + _ExtraFields_PyCall, + _ExtraFields_PyCCall, + _ExtraFields_Kineto, + ] + + @property + def typed( + self, + ) -> Union[ + Tuple[Literal[_EventType.TorchOp], _ExtraFields_TorchOp], + Tuple[Literal[_EventType.Backend], _ExtraFields_Backend], + Tuple[Literal[_EventType.Allocation], _ExtraFields_Allocation], + Tuple[Literal[_EventType.OutOfMemory], _ExtraFields_OutOfMemory], + Tuple[Literal[_EventType.PyCall], _ExtraFields_PyCall], + Tuple[Literal[_EventType.PyCCall], _ExtraFields_PyCCall], + Tuple[Literal[_EventType.Kineto], _ExtraFields_Kineto], + ]: ... + @property + def name(self) -> str: ... + @property + def tag(self) -> _EventType: ... + @property + def id(self) -> int: ... + @property + def parent(self) -> Optional[_ProfilerEvent]: ... + @property + def correlation_id(self) -> int: ... + @property + def end_time_ns(self) -> int: ... + @property + def duration_time_ns(self) -> int: ... + +class _TensorMetadata: + impl_ptr: Optional[int] + storage_data_ptr: Optional[int] + id: Optional[int] + + @property + def allocation_id(self) -> Optional[int]: ... + @property + def layout(self) -> layout: ... + @property + def device(self) -> device: ... + @property + def dtype(self) -> dtype: ... + @property + def sizes(self) -> List[int]: ... + @property + def strides(self) -> List[int]: ... + +Scalar: TypeAlias = Union[int, float, bool, complex] +Input: TypeAlias = Optional[Union[_TensorMetadata, List[_TensorMetadata], Scalar]] + +class _ExtraFields_TorchOp: + name: str + sequence_number: int + allow_tf32_cublas: bool + + @property + def inputs(self) -> List[Input]: ... + @property + def scope(self) -> RecordScope: ... + +class _ExtraFields_Backend: ... + +class _ExtraFields_Allocation: + ptr: int + id: Optional[int] + alloc_size: int + total_allocated: int + total_reserved: int + + @property + def allocation_id(self) -> Optional[int]: ... + @property + def device(self) -> device: ... + +class _ExtraFields_OutOfMemory: ... + +class _PyFrameState: + line_number: int + function_name: str + + @property + def file_name(self) -> str: ... + +class _NNModuleInfo: + @property + def self_ptr(self) -> int: ... + @property + def cls_ptr(self) -> int: ... + @property + def cls_name(self) -> str: ... + @property + def parameters( + self, + ) -> List[Tuple[str, _TensorMetadata, Optional[_TensorMetadata]]]: ... + +class _OptimizerInfo: + @property + def parameters( + self, + ) -> List[ + Tuple[ + # Parameter + _TensorMetadata, + # + # Gradient (if present during optimizer.step()) + Optional[_TensorMetadata], + # + # Optimizer state for Parameter as (name, tensor) pairs + List[Tuple[str, _TensorMetadata]], + ] + ]: ... + +class _ExtraFields_PyCCall: + @property + def caller(self) -> _PyFrameState: ... + +class _ExtraFields_PyCall: + @property + def callsite(self) -> _PyFrameState: ... + @property + def caller(self) -> _PyFrameState: ... + @property + def module(self) -> Optional[_NNModuleInfo]: ... + @property + def optimizer(self) -> Optional[_OptimizerInfo]: ... + +class _ExtraFields_Kineto: ... + +def _add_execution_trace_observer(output_file_path: str) -> bool: ... +def _remove_execution_trace_observer() -> None: ... +def _enable_execution_trace_observer() -> None: ... +def _disable_execution_trace_observer() -> None: ... +def _set_record_concrete_inputs_enabled_val(val: bool) -> None: ... +def _set_fwd_bwd_enabled_val(val: bool) -> None: ... +def _set_cuda_sync_enabled_val(val: bool) -> None: ... + +class CapturedTraceback: ... + +def gather_traceback(python: bool, script: bool, cpp: bool) -> CapturedTraceback: ... + +# The Dict has name, filename, line +def symbolize_tracebacks( + to_symbolize: List[CapturedTraceback], +) -> List[List[Dict[str, str]]]: ... + +class _RecordFunctionFast: + def __init__(self, name: str) -> None: ... + def __enter__(self) -> None: ... + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_refs/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/_refs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7a78c08dd0c24db400bbabe220a20f682042beca --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_refs/__init__.py @@ -0,0 +1,6330 @@ +import builtins +import collections +import inspect +import itertools +import math +import operator +import warnings + +from collections.abc import Iterable +from enum import Enum +from functools import partial, reduce, singledispatch, wraps +from typing import Any, Callable, Dict, List, Optional, overload, Sequence, Tuple, Union + +import torch + +import torch._prims as prims +import torch._prims_common as utils +from torch import sym_float, sym_int +from torch._prims_common import ( + DeviceLikeType, + Dim, + DimsSequenceType, + DimsType, + dtype_to_type, + ELEMENTWISE_TYPE_PROMOTION_KIND, + FloatLike, + FloatWithoutSymFloat, + IntLike, + is_weakly_lesser_type, + Number, + NumberType, + RealNumberType, + REDUCTION_OUTPUT_TYPE_KIND, + ShapeType, + StrideType, + TensorLike, + TensorLikeType, + TensorOrNumberLikeType, + TensorSequenceType, +) +from torch._prims_common.wrappers import ( + _maybe_convert_to_dtype, + _maybe_resize_out, + _safe_copy_out, + elementwise_type_promotion_wrapper, + elementwise_unary_scalar_wrapper, + out_wrapper, +) + +# Experimental module containing prototype Python references for existing +# PyTorch operations. + +__all__ = [ + # + # Elementwise Unary References + # + "abs", + "acos", + "acosh", + "asinh", + "asin", + "atan", + "atanh", + "bitwise_not", + # "cbrt", # No corresponding torch operation + "ceil", + "conj_physical", + "cos", + "cosh", + "count_nonzero", + "deg2rad", + "digamma", + "erf", + "erfinv", + "erfc", + "exp", + "expm1", + "exponential", + "exp2", + "fill", + "fill_", + "floor", + "frac", + "geometric", + "index_add", + "index_copy", + "index_copy_", + "index_select", + "index_fill", + "index_fill_", + "isfinite", + "isinf", + "isposinf", + "isneginf", + "isnan", + "isreal", + "i0", + "lerp", + "lgamma", + "log", + "log1p", + "log2", + "log10", + "log_normal", + "log_softmax", + "mvlgamma", + "norm", + "normal", + "nan_to_num", + "neg", + "positive", + "rad2deg", + "reciprocal", + "round", # TODO: model kwargs + "sigmoid", + "sgn", + "sign", + "signbit", + "sin", + "sinc", + "sinh", + "softmax", + "sqrt", + "square", + "tan", + "tanh", + "trace", + "trunc", + # + # Elementwise Binary References + # + "add", + "atan2", + "bitwise_and", + "bitwise_left_shift", + "bitwise_or", + "bitwise_right_shift", + "bitwise_xor", + "clamp_min", + "clamp_max", + "copysign", + "div", + "eq", + "float_power", + "floor_divide", + "fmax", + "fmin", + "fmod", + "gcd", + "ge", + "gt", + "heaviside", + "hypot", + "igamma", + "igammac", + "imag", + "isclose", + "lcm", + # 'ldexp', + "le", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "logsumexp", + "lt", + # 'max', # implement with reductions + "maximum", + # 'min', # implement with reductions + "minimum", + "mul", + "ne", + "nextafter", + # 'polar', # abs, cos, sin + "pow", + "real", + "rpow", + "remainder", + "rsub", + "rtruediv", + "rfloordiv", + "sub", + "true_divide", + "trunc_divide", + "xlogy", + # + # Elementwise Ternary References + # + "addcdiv", + "addcmul", + "clamp", + # + # Conditional references + # + "masked_fill", + "masked_fill_", + "where", + # + # Data conversion and movement references + # + "clone", + "copy_to", # TODO: add OpInfo (or implement .to) + "item", + "to", + # + # Reduction ops + # + "all", + "amax", + "amin", + "any", + "cumsum", + "cumprod", + "mean", + "dot", + "vdot", + "std", + "std_mean", + "sum", + "sum_to_size", + "prod", + "var", + "var_mean", + # + # Linear algebra ops + # + "addr", + # + # View & Shape Ops + # + "alias", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "as_strided", + "as_strided_scatter", + "broadcast_shapes", + "broadcast_tensors", + "broadcast_to", + "cat", + "chunk", + "column_stack", + "conj", + "constant_pad_nd", + "contiguous", + "diag_embed", + "diag", + "diagonal", + "diagonal_copy", + "diagonal_scatter", + "dsplit", + "dstack", + "expand", + "expand_as", + "flatten", + "flip", + "fliplr", + "flipud", + "hsplit", + "hstack", + "meshgrid", + "movedim", + "narrow", + "narrow_copy", + "native_group_norm", + "native_layer_norm", + "permute", + "ravel", + "repeat", + "reshape", + "reshape_as", + "roll", + "rot90", + "rsqrt", + "stack", + "swap_axes", # alias for transpose + "squeeze", + "t", + "T", + "take_along_dim", + "tensor_split", + "transpose", + "unfold", + "unfold_copy", + "unsqueeze", + "view", + "view_as", + "vsplit", + "vstack", + "view_as_complex", + "unflatten", + "unbind", + "triu", + "tril", + "triu_indices", + "tril_indices", + # + # Tensor Creation + # + "arange", + "cauchy", + "empty", + "empty_like", + "empty_permuted", + "empty_strided", + "eye", + "full", + "full_like", + "linspace", + "logspace", + "new_empty", + "new_empty_strided", + "new_full", + "new_ones", + "new_zeros", + "ones", + "ones_like", + "randn", + "scalar_tensor", + "zero", + "zeros", + "zeros_like", + # + # Test-related functions + # + "allclose", + "equal", + # + # Statistical operations + # + "bucketize", + # + # Misc + # + "is_complex", + "renorm", + "stft", + "istft", +] + +Tensor = torch.Tensor +DispatchKey = torch._C.DispatchKey # type: ignore[attr-defined] +aten = torch._ops.ops.aten + +# Note that the docstrings for the public methods from this file are in +# torch/_torch_docs.py + + +def is_noncontiguous_supported(device): + if device is not None and device.type == "hpu": + return False + return True + + +def handle_noncontiguous_outputs(input_tlist, output): + device = None + from torch._subclasses.fake_tensor import FakeTensor + + for t in input_tlist: + if isinstance(t, FakeTensor): + device = t.fake_device + break + + if not is_noncontiguous_supported(device): + output = output.contiguous() + + return output + + +def _broadcast_shapes(*_shapes): + shapes = tuple( + (x,) if isinstance(x, IntLike) else x + for x in filter(lambda x: x is not None, _shapes) + ) + + # Short-circuits on no input + if len(shapes) == 0: + return None + + # Type checking + # TODO: make common validations available as utils + for shape in shapes: + assert isinstance(shape, Sequence) + + # Computes common shape + common_shape = [ + 1, + ] * reduce(max, (len(shape) for shape in shapes)) + for arg_idx, shape in enumerate(shapes): + for idx in range(-1, -1 - len(shape), -1): + if common_shape[idx] == 1: + if shape[idx] < 0: + raise ValueError( + "Attempting to broadcast a dimension with negative length!" + ) + common_shape[idx] = shape[idx] + elif shape[idx] != 1: + if common_shape[idx] != shape[idx]: + raise RuntimeError( + f"Attempting to broadcast a dimension of length {shape[idx]} at {idx}! " + f"Mismatching argument at index {arg_idx} had {shape}; but expected shape " + f"should be broadcastable to {common_shape}" + ) + + return common_shape + + +def _maybe_broadcast(*args, preserve_cpu_scalar_tensors=True): + # Computes common shape + common_shape = _broadcast_shapes( + *(t.shape if isinstance(t, TensorLike) else None for t in args) + ) + + def __maybe_broadcast(x, shape): + if x is None: + return None + elif isinstance(x, Number): + return x + elif isinstance(x, TensorLike): + if preserve_cpu_scalar_tensors and utils.is_cpu_scalar_tensor(x): + return x + + if not utils.same_shape(x.shape, common_shape): + return x.expand(common_shape) + + return x + else: + raise RuntimeError( + "Unexpected type when broadcasting: " + str(type(x)) + "!" + ) + + return tuple(__maybe_broadcast(x, common_shape) for x in args) + + +# Utilities should come BEFORE this import +from torch._decomp import register_decomposition + +# +# Elementwise unary references +# + +infer_aten_op = object() + + +# TODO: add type promotion support +def _make_elementwise_unary_reference( + type_promotion_kind, + *, + aten_op=infer_aten_op, + extra_meta=None, +) -> Callable: + def inner(prim: Callable): + nonlocal aten_op + + @wraps(prim) + @out_wrapper() + @elementwise_unary_scalar_wrapper + @elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=type_promotion_kind, + ) + def _ref(a: TensorLikeType) -> TensorLikeType: + if extra_meta is not None: + extra_meta(a) + + output = prim(a) + return handle_noncontiguous_outputs([a], output) + + if aten_op is infer_aten_op: + aten_op = utils.get_aten_op(prim, prim.__name__) + if aten_op is not None: + register_decomposition(aten_op)(_ref) + + return _ref + + return inner + + +def _make_alias(fn, name): + """ + This function defines an alias of another function and sets its __name__ argument. + It also sets its __module__ argument to the module of the caller. + Note that when naïvely doing `alias = fn`, we have that `alias.__name__ == "fn"`, and + `alias.__module__ == fn.__module__`. + """ + + def _fn(*args, **kwargs): + return fn(*args, **kwargs) + + _fn.__name__ = name + _fn.__module__ = inspect.currentframe().f_back.f_globals["__name__"] # type: ignore[union-attr] + return _fn + + +def _make_inplace(fn): + """ + Given a function with out variant (i.e. using `out_wrapper()), it returns its in-place variant + See https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-do-in-place-operations-work-in-pytorch + """ + + # nb. We use the name of the first argument used in the unary references + @wraps(fn) + def _fn(a, *args, **kwargs): + return fn(a, *args, out=a, **kwargs) + + inplace_name = f"{fn.__name__}_" + _fn.__name__ = inplace_name + _fn = register_decomposition(getattr(aten, inplace_name))(_fn) + + # We access the __all__ attribute of the module where fn is defined + # There may be a cleaner way of doing this... + from inspect import getmodule + + _all = getmodule(fn).__all__ # type: ignore[union-attr] + if inplace_name not in _all: + _all.append(inplace_name) + return _fn + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT) +def abs(a): + return prims.abs(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def acos(a): + return prims.acos(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def acosh(a): + return prims.acosh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def asin(a): + return prims.asin(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def asinh(a): + return prims.asinh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def atan(a): + return prims.atan(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def atanh(a): + return prims.atanh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def bitwise_not(a): + return prims.bitwise_not(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def ceil(a): + return prims.ceil(a) + + +@register_decomposition(aten.is_complex) +def is_complex(input: TensorLikeType): + return utils.is_complex_dtype(input.dtype) + + +@register_decomposition(aten.conj_physical) +@out_wrapper() +def conj_physical(input: TensorLikeType): + if not utils.is_complex_dtype(input.dtype): + return input + return prims.conj_physical(input) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def cos(a): + return prims.cos(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def cosh(a): + return prims.cosh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def digamma(a): + return prims.digamma(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def erf(a): + return prims.erf(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def erfinv(a): + return prims.erf_inv(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def erfc(a): + return prims.erfc(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def exp(a): + return prims.exp(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def expm1(a): + return prims.expm1(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def exp2(a): + return prims.exp2(a) + + +# Fill has its own implementation because it has a value parameter +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a,"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH, +) +def fill(a: TensorLikeType, value: NumberType) -> TensorLikeType: + assert isinstance(a, TensorLike) + assert isinstance(value, Number) + + python_type = utils.dtype_to_type(a.dtype) + if not utils.is_weakly_lesser_type(type(value), python_type): + msg = f"value argument of type {type(value)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + + return prims.fill(a, value) + + +def fill_(a: TensorLikeType, value: NumberType) -> TensorLikeType: + r = prims.fill(a, value) + prims.copy_to(a, r) + return a + + +@register_decomposition(aten.zero) +@out_wrapper() +def zero(input: TensorLikeType) -> TensorLikeType: + return torch.zeros_like(input) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def floor(a): + return prims.floor(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def frac(x: TensorLikeType) -> TensorLikeType: + trunc_x = torch.mul(torch.floor(torch.abs(x)), torch.sign(x)) + return torch.sub(x, trunc_x) + + +# imag does not use _make_elementwise_unary_reference because it does not support out +def imag(a: TensorLikeType) -> TensorLikeType: + assert isinstance(a, TensorLike) + torch._check( + utils.is_complex_dtype(a.dtype), lambda: "imag only supports complex tensors." + ) + return prims.imag(a) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + aten_op=None, # CompositeImplicitAutograd +) +def isfinite(a: TensorLikeType) -> TensorLikeType: + if utils.is_float_dtype(a.dtype) or utils.is_complex_dtype(a.dtype): + return prims.isfinite(a) + + return ones_like(a, dtype=torch.bool) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def isinf(a: TensorLikeType) -> TensorLikeType: + if utils.is_complex_dtype(a.dtype): + return torch.logical_or(isinf(torch.real(a)), isinf(torch.imag(a))) + if utils.is_float_dtype(a.dtype): + return torch.abs(a) == float("inf") + return torch.zeros_like(a, dtype=torch.bool) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def isposinf(a: TensorLikeType) -> TensorLikeType: + torch._check( + not utils.is_complex_dtype(a.dtype), + lambda: f"Complex dtype is not supported for isposinf, got dtype {a.dtype}", + ) + if utils.is_float_dtype(a.dtype): + return a == float("inf") + return torch.zeros_like(a, dtype=torch.bool) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def isneginf(a: TensorLikeType) -> TensorLikeType: + torch._check( + not utils.is_complex_dtype(a.dtype), + lambda: f"Complex dtype is not supported for isneginf, got dtype {a.dtype}", + ) + if utils.is_float_dtype(a.dtype): + return a == float("-inf") + return torch.zeros_like(a, dtype=torch.bool) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def isnan(a: TensorLikeType) -> TensorLikeType: + return prims.ne(a, a) + + +# alias +mvlgamma = _make_alias(torch.special.multigammaln, "mvlgamma") # type: ignore[has-type] + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + aten_op=None, # CompositeImplicitAutograd +) +def isreal(a: TensorLikeType) -> TensorLikeType: + if utils.is_complex_dtype(a.dtype): + return torch.imag(a) == 0 + return torch.ones_like(a, dtype=torch.bool) + + +# TODO: if this is special maybe it should be defined there and imported here? +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, aten_op=aten.special_i0 +) +def i0(a): + return prims.bessel_i0(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def lgamma(a): + return prims.lgamma(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def log(a): + return prims.log(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def log1p(a): + return prims.log1p(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def log2(a): + return prims.log2(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def log10(a): + return prims.log10(a) + + +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def log_softmax( + a: TensorLikeType, + dim: int, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + result_dtype = dtype or a.dtype + computation_dtype = utils.get_computation_dtype(result_dtype) + a_ = _maybe_convert_to_dtype(a, computation_dtype) + return _maybe_convert_to_dtype(a_ - logsumexp(a_, dim, keepdim=True), result_dtype) # type: ignore[return-value] + + +@register_decomposition(aten.logsumexp) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def logsumexp( + self: TensorLikeType, dim: DimsType, keepdim: bool = False +) -> TensorLikeType: + if not isinstance(dim, Iterable): + dim = (dim,) + if self.numel() == 0: + return torch.sum(torch.exp(self), dim, keepdim).log() + maxes = torch.amax(self, dim, keepdim=True) + maxes = torch.masked_fill(maxes, maxes.abs() == float("inf"), 0) + maxes_squeezed = maxes if keepdim else torch.squeeze(maxes, dim) + result = torch.sum(torch.exp(self - maxes), dim, keepdim) + return result.log().add(maxes_squeezed) + + +@register_decomposition(aten.nan_to_num) +@out_wrapper() +def nan_to_num( + a: TensorLikeType, + nan: Optional[NumberType] = 0.0, + posinf: Optional[NumberType] = None, + neginf: Optional[NumberType] = None, +) -> TensorLikeType: + assert isinstance(a, TensorLike) + + if utils.is_boolean_dtype(a.dtype) or utils.is_integer_dtype(a.dtype): + return a.clone() + + if nan is None: + nan = 0.0 + + if posinf is None: + posinf = torch.finfo(a.dtype).max + + if neginf is None: + neginf = torch.finfo(a.dtype).min + + result = torch.where(torch.isnan(a), nan, a) # type: ignore[call-overload] + result = torch.where(torch.isneginf(a), neginf, result) # type: ignore[call-overload] + result = torch.where(torch.isposinf(a), posinf, result) # type: ignore[call-overload] + return result + + +def _neg_meta(a: TensorLikeType): + torch._check( + a.dtype is not torch.bool, + lambda: ( + "Negation, the `-` operator, on a bool tensor is not supported. " + "If you are trying to invert a mask, use the `~` or `logical_not()` " + "operator instead." + ), + ) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, extra_meta=_neg_meta +) +def neg(a): + return prims.neg(a) + + +# positive does not use _make_elementwise_unary_reference because it does not support out +# CompositeImplicitAutograd - don't register decomp +def positive(a: TensorLikeType) -> TensorLikeType: + assert isinstance(a, TensorLike) + if a.dtype is torch.bool: + msg = "positive does not support bool tensors." + raise RuntimeError(msg) + return a + + +# real does not use _make_elementwise_unary_reference because it does not support out +def real(a: TensorLikeType) -> TensorLikeType: + assert isinstance(a, TensorLike) + if utils.is_complex_dtype(a.dtype): + return prims.real(a) + return a + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def reciprocal(a): + return prims.reciprocal(a) + + +# TODO: round takes additional kwargs +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + aten_op=None, # TODO: this does need a decomp, but kwarg handling is needed +) +def round(a): + return prims.round(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def rsqrt(a): + return prims.rsqrt(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def sigmoid(a: TensorLikeType) -> TensorLikeType: + return true_divide(1, add(1, exp(neg(a)))) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def sgn(a): + if utils.is_complex_dtype(a.dtype): + a_abs = a.abs() + return torch.where(a_abs == 0, 0, a / a_abs) + else: + return a.sign() + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def sign(a): + return prims.sign(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def signbit(a): + return prims.signbit(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def sin(a): + return prims.sin(a) + + +# Autograd note: This will give the right first derivative at zero (by chance), +# but not the right second derivative +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def sinc(a): + a = math.pi * a + return torch.where(a == 0, 1, torch.sin(a) / a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def sinh(a): + return prims.sinh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def sqrt(a): + return prims.sqrt(a) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG, + aten_op=None, # CompositeImplicitAutograd, +) +def square(a: TensorLikeType) -> TensorLikeType: + return mul(a, a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def tan(a): + return prims.tan(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def tanh(a): + return prims.tanh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def trunc(a): + return prims.trunc(a) + + +# TODO: register this as a real ref/decomposition once TorchInductor supports complex! +def view_as_complex(self: TensorLikeType) -> TensorLikeType: + input_dtype = self.dtype + torch._check( + utils.is_float_dtype(input_dtype), + lambda: f"view_as_complex is only supported for floating point" + f"tensors, but got a tensor of scalar type: {input_dtype}", + ) + sizes = self.size() + torch._check( + len(sizes) != 0, + lambda: "Input tensor must have one or more dimensions", + ) + torch._check( + sizes[-1] == 2, + lambda: "Tensor must have a last dimension of size 2", + ) + + old_strides = self.stride() + torch._check( + old_strides[-1] == 1, + lambda: "Tensor must have a last dimension with stride 1", + ) + dims = old_strides[:-1] + torch._check( + py_all(stride % 2 == 0 for stride in dims), + lambda: "Tensor must have a stride divisible by 2 for all but last dimension", + ) + torch._check( + self.storage_offset() % 2 == 0, + lambda: "Tensor must have a storage_offset divisible by 2", + ) + return prims.view_element_type( + self, utils.corresponding_complex_dtype(input_dtype) + ).squeeze(-1) + + +def _make_elementwise_binary_reference( + type_promotion_kind, + aten_op=infer_aten_op, + name=None, + has_out=True, + supports_lhs_python_scalar=True, + supports_rhs_python_scalar=True, + supports_two_python_scalars=False, + should_register_decomposition=True, +) -> Callable: + def inner(prim: Callable): + nonlocal aten_op, name + if name is None: + name = prim.__name__ + + @wraps(prim) + @elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=type_promotion_kind, + ) + def _ref( + a: Union[Tensor, NumberType], + b: Union[Tensor, NumberType], + ) -> Tensor: + torch._check_value( + supports_lhs_python_scalar or not isinstance(a, Number), + lambda: f"{name}: Received a lhs Python scalar to an elementwise binary " + "operation that does not accept lhs scalars!", + ) + torch._check_value( + supports_rhs_python_scalar or not isinstance(b, Number), + lambda: f"{name}: Received a rhs Python scalar to an elementwise binary " + "operation that does not accept rhs scalars!", + ) + torch._check_value( + supports_two_python_scalars + or not (isinstance(a, Number) and isinstance(b, Number)), + lambda: f"{name}: Receive two Number inputs to an elementwise binary operation!", + ) + a, b = _maybe_broadcast(a, b) + output = prim(a, b) + return handle_noncontiguous_outputs([a, b], output) + + if has_out: + _ref = out_wrapper()(_ref) + + _ref.__name__ = name + if aten_op is infer_aten_op: + aten_op = utils.get_aten_op(prim, name) + if aten_op is not None and should_register_decomposition: + register_decomposition(aten_op)(_ref) + + return _ref + + return inner + + +# Add has its own implementation because it has an alpha argument +@register_decomposition(aten.add) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def add( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], + *, + alpha: Optional[NumberType] = None, +): + """ + Reference implementation of torch.add + """ + + a, b = _maybe_broadcast(a, b) + + if alpha is not None: + dtype = a.dtype if isinstance(a, TensorLike) else b.dtype # type: ignore[union-attr] + python_type = utils.dtype_to_type(dtype) + if python_type != bool and not utils.is_weakly_lesser_type( + type(alpha), python_type + ): + msg = f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + if isinstance(b, TensorLike): + b = prims.mul(b, alpha) + else: + b = b * alpha + + output = prims.add(a, b) + return handle_noncontiguous_outputs([a, b], output) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def atan2(a, b): + return prims.atan2(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def bitwise_and(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.bitwise_and(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def bitwise_left_shift(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.shift_left(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def bitwise_or(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.bitwise_or(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def bitwise_right_shift(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.shift_right_arithmetic(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def bitwise_xor(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.bitwise_xor(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + supports_lhs_python_scalar=False, +) +def copysign( + a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType] +): + if isinstance(b, Number) and isinstance(a, Tensor): + b = scalar_tensor(b, dtype=a.dtype, device=a.device) + elif isinstance(a, Tensor) and isinstance(b, Tensor) and a.device != b.device: + msg = "Expected divisor (b) to be on the same device ({}) as dividend (a), but it is found on {}!".format( + a.device, b.device + ) + raise RuntimeError(msg) + return where(signbit(b), neg(abs(a)), abs(a)) + + +# complex = _make_elementwise_binary_reference(prims.complex, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) + + +@register_decomposition(aten.div) +@out_wrapper() +def div( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], + *, + rounding_mode: Optional[str] = None, +): + """ + Reference implementation of torch.div + """ + if rounding_mode is None: + return true_divide(a, b) + elif rounding_mode == "trunc": + return trunc_divide(a, b) + elif rounding_mode == "floor": + return floor_divide(a, b) + else: + msg = f"div expected rounding_mode to be one of None, 'trunc', or 'floor' but found {rounding_mode}." + raise ValueError(msg) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def eq(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.eq(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG, +) +def pow( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], +) -> TensorLikeType: + assert isinstance(a, TensorLikeType) or isinstance(b, TensorLikeType) + + if isinstance(b, Number): + if b == 1.0: + return a.clone() # type: ignore[return-value,union-attr] + elif b == 2.0: + return a * a # type: ignore[return-value] + elif b == 0.5: + return torch.sqrt(a) # type: ignore[arg-type] + elif isinstance(a, Number): + if a == 1.0: + return torch.fill(b, True) + if a == 2.0 and ( + utils.is_float_dtype(b.dtype) or utils.is_complex_dtype(b.dtype) + ): + return torch.exp2(b) + + return prims.pow(a, b) + + +# Float power has its own implementation because it has unique type promotion. +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def float_power( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], +) -> Tensor: + if isinstance(a, Number) and isinstance(b, Number): + raise ValueError( + "Receive two Number inputs to an elementwise binary operation!" + ) + + # Handles type promotion + dtype = utils.get_higher_dtype(a, b) + assert dtype is not None + if utils.is_complex_dtype(dtype): + dtype = torch.complex128 + else: + dtype = torch.float64 + + # Float power has the following contiguous cast behavior to be + # consistent with its C++ impl + a = _maybe_convert_to_dtype(a, dtype) + b = _maybe_convert_to_dtype(b, dtype) + + a, b = _maybe_broadcast(a, b) + return pow(a, b) + + +# >>> a = torch.tensor(-0.2500, dtype=torch.float64) +# tensor(-0.250000000000000, dtype=torch.float64) +# +# >>> b = torch.tensor(-0.0010, dtype=torch.float64) +# tensor(-0.001000000000000, dtype=torch.float64) +# +# Note: In this case, casting float to double will expand the float mantissa with zeros, +# while creating a double generates a distinct mantissa. +# >>> torch.tensor(-0.001).to(dtype=torch.float64) +# tensor(-0.001000000047497, dtype=torch.float64) +# +# Floor Division +# The difference is caused because torch.remainder(a, b) = -0.001. +# +# >>> torch.floor(torch.true_divide(a, b)) +# tensor(250., dtype=torch.float64) +# +# >>> torch.div(a, b, rounding_mode='floor') +# tensor(249., dtype=torch.float64) +# +# Definition: a // b = (a - remainder(a, b)) / b +# >>> torch.true_divide(torch.sub(a, torch.remainder(a, b)), b) +# tensor(249., dtype=torch.float64) +# +# For reference, see CPython's implementation: +# https://github.com/python/cpython/blob/ace008c531dd685a30c1dd68f9b5ba35f20171cf/Objects/floatobject.c#L636 + + +@_make_elementwise_binary_reference( + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_two_python_scalars=True, + should_register_decomposition=False, +) +def floor_divide( + a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType] +): + # Wrap scalars because some references only accept tensor arguments. + if isinstance(a, Number) and isinstance(b, Number): + a = scalar_tensor(a) + b = scalar_tensor(b) + elif isinstance(b, Number) and isinstance(a, Tensor): + b = scalar_tensor(b, dtype=a.dtype, device=a.device) + elif isinstance(a, Number) and isinstance(b, Tensor): + a = scalar_tensor(a, dtype=b.dtype, device=b.device) + elif isinstance(a, Tensor) and isinstance(b, Tensor) and a.device != b.device: + if a.device == torch.device("cpu"): + msg = "Expected divisor (b) to be on the same device ({}) as dividend (a), but it is found on {}!".format( + a.device, b.device + ) + raise RuntimeError(msg) + else: + b = prims.device_put(b, device=a.device) + + assert isinstance(a, Tensor) and isinstance(b, Tensor) + dtype = a.dtype + if utils.is_float_dtype(dtype): + return _floor_divide_float(a, b) + elif utils.is_integer_dtype(dtype): + return _floor_divide_integer(a, b) + else: + torch._check(False, lambda: f"{dtype} not supported for floor_divide") + + +def _floor_divide_integer(a: Tensor, b: Tensor) -> Tensor: + a, b = _maybe_broadcast(a, b) + + if not a.dtype.is_signed: + return prims.div(a, b) + + # Convert truncation to flooring: + offset = (torch.signbit(a) != torch.signbit(b)).logical_and(torch.fmod(a, b) != 0) + return prims.div(a, b) - _maybe_convert_to_dtype(offset, a.dtype) + + +def _floor_divide_float(a: Tensor, b: Tensor) -> Tensor: + mod = fmod(a, b) + div = true_divide(sub(a, mod), b) + + # Ensure that the remainder has the same sign as denominator + different_signed_inputs = bitwise_xor(lt(a, 0), lt(b, 0)) + non_zero_remainder = ne(mod, 0) + mask = bitwise_and(non_zero_remainder, different_signed_inputs) + div = where(mask, sub(div, 1), div) + + # Map quotient to nearest integer value + floor_div = floor(div) + mask = gt(sub(div, floor_div), 0.5) + floor_div = where(mask, add(floor_div, 1), floor_div) + + basic_div = true_divide(a, b) + zero_tensor = scalar_tensor(0, dtype=basic_div.dtype, device=basic_div.device) + + # If quotient is zero, copy signbit from true_divide quotient + floor_div = where(ne(div, 0), floor_div, copysign(zero_tensor, basic_div)) + + # If denominator is zero, then follow true_divide behavior + return where(ne(b, 0), floor_div, basic_div) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def fmax(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.fmax(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def fmin(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.fmin(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=True, +) +def fmod(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.fmod(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def gcd(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.gcd(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def ge(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.ge(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def gt(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.gt(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def heaviside(input: TensorLikeType, values: TensorLikeType) -> TensorLikeType: + input_eq_zero = torch.eq(input, 0) + input_lt_zero = torch.logical_or(torch.lt(input, 0), torch.isnan(input)) + zeros_and_ones = torch.where(input_lt_zero, 0, 1) + output = torch.where(input_eq_zero, values, zeros_and_ones) + return output + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def hypot(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.hypot(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def igamma(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.igamma(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def igammac(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.igammac(a, b) + + +def _check_close_args( + name: str, + a: TensorLikeType, + b: TensorLikeType, + rtol: float, + atol: float, +) -> None: + torch._check_value( + a.dtype == b.dtype, + lambda: f"{name}: Attempting to compare tensors of different dtypes {a.dtype} and {b.dtype}!", + ) + torch._check( + rtol >= 0, + lambda: f"{name}: rtol must be greater than or equal to zero, but got {rtol}!", + ) + torch._check( + atol >= 0, + lambda: f"{name}: atol must be greater than or equal to zero, but got {atol}!", + ) + + +# CompositeImplicitAutograd - don't register decomp +def isclose( + a: TensorLikeType, + b: TensorLikeType, + rtol: float = 1e-05, + atol: float = 1e-08, + equal_nan: bool = False, +) -> TensorLikeType: + _check_close_args(name="torch.isclose", a=a, b=b, rtol=rtol, atol=atol) + + close = eq(a, b) + if equal_nan and (utils.is_float_dtype(a.dtype) or utils.is_complex_dtype(a.dtype)): + close = logical_or(close, logical_and(isnan(a), isnan(b))) + + # Note: In case of zero tolerances the closeness inequality degenerates to an equality check. + # In this case, the short-circuit prevents false positives as detailed in the paragraph below. + if atol == 0 and rtol == 0: + return close + + # Note [closeness error computation] + # atol and rtol are provided as doubles, so the computation + # rtol * other will produce a float or complex tensor. + # When the difference (self - other) is compared to it then the + # tensor representing the difference will also be cast to float or complex. + # However, since (self - other) in uint8 is very likely to produce a + # negative value, this moves the cast forward so the difference is + # always computed in a float or complex type. + # If the values of the integer tensors cannot be exactly represented + # by the default scalar type then this may cause an incorrect result. + if not utils.is_float_dtype(a.dtype) and not utils.is_complex_dtype(a.dtype): + a = prims.convert_element_type(a, torch.get_default_dtype()) + b = prims.convert_element_type(b, torch.get_default_dtype()) + + allowed_error = add(atol, abs(mul(b, rtol))) + actual_error = abs(sub(a, b)) + + # Computes finite closeness + result = logical_or( + close, logical_and(isfinite(actual_error), le(actual_error, allowed_error)) + ) + + return result + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def lcm(a: TensorLikeType, b: TensorLikeType): + dtype = a.dtype + # promoting to int32 to maintain 100% consistency with C++ and to + # prevent overflow in case of int8 and int16 + promote_to_int = dtype in (torch.int8, torch.int16) + if promote_to_int: + a = prims.convert_element_type(a, torch.int32) + b = prims.convert_element_type(b, torch.int32) + + g = torch.gcd(a, b) + # Avoid division by zero in case gcd(0, 0) == 0 + g = torch.where(g == 0, 1, g) + res = torch.abs(prims.div(a, g) * b) + return res if not promote_to_int else prims.convert_element_type(res, dtype) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def le(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.le(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def logaddexp(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + # Nb. this implementation does not distribute the gradients evenly when a == b + mask = torch.real(a) >= torch.real(b) + max_ = torch.where(mask, a, b) + min_ = torch.where(mask, b, a) + inf_mask = torch.logical_and( + torch.logical_not(torch.isfinite(torch.real(a))), torch.real(a) == torch.real(b) + ) + if utils.is_complex_dtype(a.dtype) or utils.is_complex_dtype(b.dtype): + # are you wondering what this bunch of codes are for? edge cases! + neg_min_mask = torch.real(min_) < 0 + inf_vals = torch.where( + neg_min_mask, min_, torch.log(torch.exp(min_) + torch.exp(max_)) + ) + non_nan_vals = torch.where( + inf_mask, inf_vals, max_ + torch.log1p(torch.exp(min_ - max_)) + ) + # the type for full_like does not include tensor yet + nan_mask = torch.isnan(min_) + return torch.where(nan_mask, complex(float("nan"), float("nan")), non_nan_vals) # type: ignore[call-overload] + else: + return torch.where(inf_mask, a, max_ + torch.log1p(torch.exp(min_ - max_))) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def logaddexp2(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + torch._check( + not (utils.is_complex_dtype(a.dtype) or utils.is_complex_dtype(b.dtype)), + lambda: "logaddexp2 doesn't support complex dtypes", + ) + # Nb. this implementation does not distribute the gradients evenly when a == b + mask = a >= b + max_ = torch.where(mask, a, b) + min_ = torch.where(mask, b, a) + inf_mask = torch.logical_and(torch.isinf(a), a == b) + inv_log_2 = 1.0 / math.log(2) + result = max_ + torch.log1p(torch.exp2(min_ - max_)) * inv_log_2 + return torch.where(inf_mask, a, result) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, +) +def logical_and(a: TensorLikeType, b: TensorLikeType): + if not utils.is_boolean_dtype(a.dtype): + a = a != 0 + if not utils.is_boolean_dtype(b.dtype): + b = b != 0 + return a & b + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def logical_not(a: TensorLikeType): + if not utils.is_boolean_dtype(a.dtype): + return a == 0 + return ~a + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, +) +def logical_or(a: TensorLikeType, b: TensorLikeType): + if not utils.is_boolean_dtype(a.dtype): + a = a != 0 + if not utils.is_boolean_dtype(b.dtype): + b = b != 0 + return bitwise_or(a, b) + + +# TODO: skip unnecessary conversion of long to float +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, +) +def logical_xor(a: TensorLikeType, b: TensorLikeType): + if not utils.is_boolean_dtype(a.dtype): + a = a != 0 + if not utils.is_boolean_dtype(b.dtype): + b = b != 0 + return a ^ b + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def lt(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.lt(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def maximum(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.maximum(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def minimum(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.minimum(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_two_python_scalars=True, +) +def mul(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.mul(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def ne(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.ne(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def nextafter(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.nextafter(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def remainder(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.remainder(a, b) + + +# reverse sub +def rsub( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], + *, + alpha: Optional[NumberType] = None, +): + if isinstance(a, Number): + msg = "Received a Number for the first argument, but expected a Tensor" + raise ValueError(msg) + return sub(b, a, alpha=alpha) + + +# TODO: consider refactoring this with add impl +# sub has its own implementation because it has an alpha argument +@register_decomposition(aten.sub) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def sub( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], + *, + alpha: Optional[NumberType] = None, +): + """ + Reference implementation of torch.sub + """ + + a, b = _maybe_broadcast(a, b) + + if alpha is not None: + dtype = a.dtype if isinstance(a, TensorLike) else b.dtype # type: ignore[union-attr] + python_type = utils.dtype_to_type(dtype) + if not utils.is_weakly_lesser_type(type(alpha), python_type): + msg = f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + if isinstance(b, torch.Tensor): + b = prims.mul(b, alpha) + else: + # Carefully not to use prims.mul if b is a scalar / symint. + # prims.mul always returns a tensor, + # which will mess with type promotion. + b = b * alpha + + output = prims.sub(a, b) + return handle_noncontiguous_outputs([a, b], output) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + name="true_divide", + aten_op=None, # CompositeImplicitAutograd + supports_two_python_scalars=True, +) +def true_divide(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.div(a, b) + + +@register_decomposition(aten.xlogy) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def xlogy(a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType]): + torch._check( + isinstance(a, TensorLike) or isinstance(b, TensorLike), + lambda: 'Expected either argument a or b to be a Tensor"', + ) + + # Operations like eq and log do not handle scalar values, so we convert them to scalar_tensors. + if isinstance(b, TensorLike) and isinstance(a, Number): + a = scalar_tensor(a, dtype=b.dtype, device=b.device) + elif isinstance(a, TensorLike) and isinstance(b, Number): + b = scalar_tensor(b, dtype=a.dtype, device=a.device) + + # mypy: expected "Tensor" + assert isinstance(a, TensorLike) + assert isinstance(b, TensorLike) + rhs = torch.where(torch.eq(a, 0), 0, torch.mul(a, torch.log(b))) + return torch.where(torch.isnan(b), float("nan"), rhs) + + +@_make_elementwise_binary_reference( + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + aten_op=None, # CompositeImplicitAutograd + supports_two_python_scalars=True, +) +def trunc_divide( + a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType] +): + dtype = utils.get_dtype(a) + if utils.is_integer_dtype(dtype): + return prims.div(a, b) + + return trunc(prims.div(a, b)) + + +# +# Elementwise Ternary References +# + + +@register_decomposition(aten.addcdiv) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self", "tensor1", "tensor2"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def addcdiv( + self: TensorLikeType, + tensor1: TensorLikeType, + tensor2: TensorLikeType, + *, + value: NumberType = 1, +) -> TensorLikeType: + """ + Reference implementation of torch.addcdiv + """ + if value is not None: + dtype = self.dtype # no scalars allowed, see add + python_type = utils.dtype_to_type(dtype) + torch._check_value( + utils.is_weakly_lesser_type(type(value), python_type), + lambda: f"value argument of type {type(value)} cannot be safely cast to type {python_type}!", + ) + + return self + value * tensor1 / tensor2 + + +@register_decomposition(aten.addcmul) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self", "tensor1", "tensor2"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def addcmul( + self: TensorLikeType, + tensor1: TensorLikeType, + tensor2: TensorLikeType, + *, + value: NumberType = 1, +) -> TensorLikeType: + """ + Reference implementation of torch.addcmul + """ + if value is not None: + dtype = self.dtype # no scalars allowed, see add + python_type = utils.dtype_to_type(dtype) + torch._check_value( + utils.is_weakly_lesser_type(type(value), python_type), + lambda: f"value argument of type {type(value)} cannot be safely cast to type {python_type}!", + ) + + return self + value * tensor1 * tensor2 + + +@register_decomposition(aten.clamp) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "min", "max"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def clamp( + a: TensorLikeType, + min: Optional[TensorOrNumberLikeType] = None, + max: Optional[TensorOrNumberLikeType] = None, +) -> TensorLikeType: + # NOTE: grad behavior with implementation `where` is not consistent on `nan` + if min is None and max is None: + msg = "clamp called but both min and max are none!" + raise ValueError(msg) + if min is not None: + a_isnan = torch.isnan(a) + condition = torch.bitwise_or(torch.ge(a, min), a_isnan) # type: ignore[arg-type] + # we should also propagate `nan` coming from boundaries. However, that's + # not necessary since `ge` would already `False` when either operands has + # a `nan`. So this line below is redundant + # `condition = bitwise_and(condition, bitwise_not(isnan(min)))` + a = torch.where(condition, a, min) # type: ignore[arg-type] + if max is not None: + a_isnan = torch.isnan(a) + # same as above, no need to adjust `nan` from `max` + condition = torch.bitwise_or(torch.le(a, max), a_isnan) # type: ignore[arg-type] + a = torch.where(condition, a, max) # type: ignore[arg-type] + + return a + + +@register_decomposition(aten.clamp_min) +@out_wrapper() +def clamp_min( + self: TensorLikeType, + min: Optional[TensorOrNumberLikeType] = None, +) -> TensorLikeType: + return torch.clamp(self, min=min) # type: ignore[arg-type] + + +@register_decomposition(aten.clamp_max) +@out_wrapper() +def clamp_max( + self: TensorLikeType, + max: Optional[TensorOrNumberLikeType] = None, +) -> TensorLikeType: + return torch.clamp(self, max=max) # type: ignore[arg-type] + + +# +# Conditional references +# + + +# https://pytorch.org/docs/stable/generated/torch.where.html +# TODO: implement alternate where +@register_decomposition(aten.where) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH, +) +def where( + pred: Tensor, + a: Optional[TensorOrNumberLikeType] = None, + b: Optional[TensorOrNumberLikeType] = None, +): + """ """ + + if a is None or b is None: + raise NotImplementedError + + utils.check_same_device(pred, a, b, allow_cpu_scalar_tensors=True) + torch._check( + pred.dtype is torch.bool, + lambda: f"expected predicate to be bool, got {pred.dtype}", + ) + + pred, a, b = _maybe_broadcast(pred, a, b) + return prims.where(pred, a, b) + + +# +# Data Movement References +# +@register_decomposition(aten.clone) +@out_wrapper() +def clone( + a: TensorLikeType, *, memory_format: torch.memory_format = torch.preserve_format +) -> TensorLikeType: + result = prims.clone(a, memory_format=memory_format) + return result + + +def copy_to(a: Tensor, b: Tensor, *, allow_cross_device=True): + if not allow_cross_device and a.device != b.device: + msg = "Attempting to copy from device {} to device {}, but cross-device copies are not allowed!".format( + b.device, a.device + ) + raise RuntimeError(msg) + + return prims.copy_to(a, b) + + +@register_decomposition(aten.item) +def item(a: TensorLikeType) -> NumberType: + if a.numel() != 1: + msg = f"Can't convert a tensor with {a.numel()} elements to a number!" + raise ValueError(msg) + + # NOTE: explicit conversion is necessary for bool! + # See https://github.com/pytorch/pytorch/issues/78071 + number_type = utils.dtype_to_type(a.dtype) + return number_type(prims.item(a)) + + +# fast path when `to` returns an alias to input. This mimics the same function in aten +def _to_will_alias( + a: TensorLikeType, + device: Optional[DeviceLikeType] = None, + dtype: Optional[torch.dtype] = None, + copy: Optional[bool] = None, + layout: Optional[torch.layout] = None, + memory_format: Optional[torch.memory_format] = None, + pin_memory: Optional[bool] = False, + non_blocking: bool = False, # not using non_blocking +) -> bool: + return ( + not copy + and (device is None or a.device == device) + and (dtype is None or a.dtype == dtype) + and (layout is None or a.layout == layout) + # is_pinned issue #84925 + # and (pin_memory is None or pin_memory == a.is_pinned()) + and ( + memory_format is None + or memory_format == torch.preserve_format + or utils.is_contiguous_for_memory_format(a, memory_format=memory_format) + ) + ) + + +@singledispatch +def _to_dispatch(*args, **kwargs): + raise NotImplementedError + + +@_to_dispatch.register +def _to_device( + device: torch.device, + dtype: torch.dtype, + non_blocking: bool = False, + copy: bool = False, + memory_format: Optional[torch.memory_format] = None, +) -> Dict[str, Any]: + kwargs = { + "device": device, + "dtype": dtype, + "non_blocking": non_blocking, + "copy": copy, + "memory_format": memory_format, + } + return kwargs + + +@_to_dispatch.register +def _to_device_str( + device: str, + dtype: torch.dtype, + non_blocking: bool = False, + copy: bool = False, + memory_format: Optional[torch.memory_format] = None, +) -> Dict[str, Any]: + kwargs = { + "device": torch.device(device), + "dtype": dtype, + "non_blocking": non_blocking, + "copy": copy, + "memory_format": memory_format, + } + return kwargs + + +@_to_dispatch.register +def _to_dtype( + dtype: torch.dtype, + non_blocking: bool = False, + copy: bool = False, + memory_format: Optional[torch.memory_format] = None, +) -> Dict[str, Any]: + kwargs = { + "dtype": dtype, + "non_blocking": non_blocking, + "copy": copy, + "memory_format": memory_format, + } + return kwargs + + +@_to_dispatch.register +def _to_other( + other: Tensor, + non_blocking: bool = False, + copy: bool = False, + memory_format: Optional[torch.memory_format] = None, +) -> Dict[str, Any]: + device = other.device + dtype = other.dtype + layout = other.layout + # is_pinned issue #84925 + # pin_memory = other.is_pinned() + kwargs = { + "device": device, + "dtype": dtype, + "layout": layout, + "non_blocking": non_blocking, + "copy": copy, + "memory_format": memory_format, + } + return kwargs + + +# remove to_kwargs that is already present in `a` +def _canonicalize_to_arguments(a: Tensor, to_kwargs: dict): + options_to_check = ["dtype", "device", "layout", "memory_format"] + # "device" option could be passed a str instead torch.device + if "device" in to_kwargs and isinstance(to_kwargs["device"], str): + to_kwargs["device"] = torch.device(to_kwargs["device"]) + + for kw in options_to_check: + if kw in to_kwargs: + if ( + (kw == "memory_format" and to_kwargs[kw] is torch.preserve_format) + or ( + kw == "device" + and to_kwargs[kw].type == a.device.type + and ( + not to_kwargs[kw].index or to_kwargs[kw].index == a.device.index + ) + ) + or ( + getattr(a, kw, None) == to_kwargs[kw] + ) # this also handles {"memory_format": None} + ): + to_kwargs.pop(kw) + + +def to(a: TensorLikeType, *args, **kwargs) -> TensorLikeType: + # handled dispatch via positional arguments + if len(args) != 0: + kwargs = _to_dispatch(*args, **kwargs) + + # TODO: is_pinned is not currently supported in refs or fake_tensor + # https://github.com/pytorch/pytorch/issues/84925 + assert "pin_memory" not in kwargs + _canonicalize_to_arguments(a, kwargs) + + if _to_will_alias(a, **kwargs): + return a + + copy = kwargs.pop("copy") if "copy" in kwargs else False + non_blocking = kwargs.pop("non_blocking") if "non_blocking" in kwargs else False + + # short-circuit to `prims.convert_element_type` when `to` is just a dtype change + if ( + (copy or (kwargs.get("dtype", a.dtype) != a.dtype)) + and (not non_blocking) + and ("memory_format" not in kwargs) + and ("device" not in kwargs) + and ("layout" not in kwargs) + # is_pinned issue #84925 + # and ("pin_memory" not in kwargs) + ): + return prims.convert_element_type(a, kwargs.get("dtype", a.dtype)) + + result = torch.empty_like(a, **kwargs) + # TODO: non_blocking should be handled by `copy_to` + copy_to(result, a) + return result + + +# +# Reduction references +# + + +def _reduction( + a: TensorLikeType, + prim: Callable, + *, + has_identity: bool = True, + accepts_dim_tuple: bool = True, # to handle min/argmin that accept single dim only + dims: Optional[DimsType] = None, + keepdims: bool = False, + dtype: Optional[torch.dtype] = None, # should be specified for ops that support it + out: Optional[Tensor] = None, + output_dtype_kind: REDUCTION_OUTPUT_TYPE_KIND, +) -> TensorLikeType: # it is usually SAME, but I want + # ref writers to actually think about what to put here + assert isinstance(a, TensorLike) + if a.ndim > 64: + raise RuntimeError( + f"Received a tensor with {a.ndim} dimensions, but only tensors with up to 64 dims are supported!" + ) + + if out is not None: + assert isinstance(out, TensorLike) + if dtype is not None: + # TODO - this is true for eager mode currently, but it's wrong behavior for complex norms + if dtype != out.dtype: + raise RuntimeError( + "dtype argument and out dtype must match in reduction" + ) + if not accepts_dim_tuple: + assert dims is None or isinstance(dims, Dim) + if isinstance(dims, Dim): + dims = (dims,) # type: ignore[assignment] + dims = utils.reduction_dims(a.shape, dims) + if not has_identity: + valid_shape = a.ndim == 0 or py_all(a.shape[i] for i in dims) + if not valid_shape: + raise RuntimeError( + "reducing over zero-size dimension for reduction operation without identity" + ) + computation_dtype, result_dtype = utils.reduction_dtypes( + a, output_dtype_kind, dtype + ) + a = _maybe_convert_to_dtype(a, computation_dtype) # type: ignore[assignment] + result = prim(a, dims) + if keepdims: + output_shape = [a.shape[i] if i not in dims else 1 for i in range(a.ndim)] + broadcast_dims = [i for i in range(a.ndim) if i not in dims] + result = prims.broadcast_in_dim(result, output_shape, broadcast_dims) + + if out is not None: + assert result_dtype is not None + if dtype is not None and result_dtype != out.dtype: + raise RuntimeError( + "Expected the dtype of reduction result and out to match" + ) + out = _maybe_resize_out(out, result.shape) + return _safe_copy_out(copy_from=result, copy_to=out) # type: ignore[arg-type] + + if result.dtype != result_dtype and result_dtype is not None: + result = prims.convert_element_type(result, result_dtype) + + return result + + +def _make_copy_from_view(fn): + """ + Given a view function (e.g. torch.diagonal) generates its copy variant (e.g. torch.diagonal_copy) + """ + name = fn.__name__ + fn = out_wrapper()(fn) + + def _fn(*args, out=None, **kwargs): + result = fn(*args, out=out, **kwargs) + if out is None: + return result.clone(memory_format=torch.contiguous_format) + return result + + copy_name = f"{name}_copy" + _fn.__name__ = copy_name + _fn = register_decomposition(getattr(aten, copy_name))(_fn) + return _fn + + +# Saves Python all +py_all = all + + +@register_decomposition(aten.all) +@out_wrapper() +def all( + a: TensorLikeType, + dim: Optional[DimsType] = None, + keepdim: bool = False, +) -> TensorLikeType: + result = torch.logical_not(torch.any(torch.logical_not(a), dim, keepdim=keepdim)) + + if a.dtype == torch.uint8: + result = result.to(dtype=torch.uint8) + + return result + + +# Saves Python any +py_any = any + + +@register_decomposition(aten.any) +@out_wrapper() +def any( + a: TensorLikeType, + dim: Optional[DimsType] = None, + keepdim: bool = False, +) -> TensorLikeType: + a_ = _maybe_convert_to_dtype(a, torch.bool) + if isinstance(dim, (list, tuple)) and len(dim) == 0: + result = a_.clone() + else: + result = a_.sum(dim=dim, keepdim=keepdim).ne(False) + + # Preserves uint8 -- probably a legacy mask thing + if a.dtype is torch.uint8: + return prims.convert_element_type(result, torch.uint8) + + return result + + +@register_decomposition([aten.sum.dim_IntList, aten.sum.IntList_out]) +def sum( + a: TensorLikeType, + dim: Union[Optional[int], Optional[List[int]]] = None, + keepdim: bool = False, + *, + dtype: Optional[torch.dtype] = None, + out: Optional[Tensor] = None, +) -> TensorLikeType: + if dtype is None: + if out is not None: + dtype = out.dtype + elif utils.is_boolean_dtype(a.dtype) or utils.is_integer_dtype(a.dtype): + dtype = torch.int64 + else: + dtype = a.dtype + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + return _reduction( + a, + prims.sum, + dims=dim, + keepdims=keepdim, + dtype=dtype, + out=out, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.SAME, + ) + + +def sum_to_size( + a: Tensor, + *shape, +) -> Tensor: + shape = utils.extract_shape_from_varargs(shape, validate=False) + torch._check( + utils.is_expandable_to(shape, a.shape), + lambda: f'sum_to_size: size "{shape}" is not expandable to size "{a.shape}"', + ) + # In ATen scalar tensors are sent through sum and the result is returned as + # type promoted + if utils.is_same_shape(shape, a.shape) and len(shape) > 0: + return prims.view_of(a) + leading_dims = a.ndim - len(shape) + reduce_dims = tuple(range(leading_dims)) + tuple( + i + for i in range(leading_dims, len(shape)) + if shape[i - leading_dims] == 1 and a.shape[i] != 1 + ) + return torch.sum(a, dim=reduce_dims, keepdim=True, dtype=None) + + +@register_decomposition(aten.prod) +def prod( + a: TensorLikeType, + dim: Union[Optional[int], Optional[List[int]]] = None, + keepdim: bool = False, + *, + dtype=None, + out: Optional[Tensor] = None, +) -> TensorLikeType: + if dtype is None: + if out is not None: + dtype = out.dtype + elif utils.is_boolean_dtype(a.dtype) or utils.is_integer_dtype(a.dtype): + dtype = torch.int64 + else: + dtype = a.dtype + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + return _reduction( + a, + prims.prod, + dims=dim, + keepdims=keepdim, + dtype=dtype, + out=out, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.SAME, + ) + + +@register_decomposition(aten.amin) +def amin( + a: TensorLikeType, + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + out: Optional[Tensor] = None, +) -> TensorLikeType: + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + + return _reduction( + a, + prims.amin, + dims=dim, + keepdims=keepdim, + dtype=None, + out=out, + has_identity=False, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.SAME, + ) + + +@register_decomposition(aten.amax) +def amax( + a: TensorLikeType, + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + out: Optional[Tensor] = None, +) -> TensorLikeType: + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + + return _reduction( + a, + prims.amax, + dims=dim, + keepdims=keepdim, + dtype=None, + out=out, + has_identity=False, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.SAME, + ) + + +def _dim_var_dispatch(dim=None, unbiased=None): + # There's the following overload of torch.var: + # var(Tensor self, bool unbiased=True) -> (Tensor, Tensor) + # We need to explicitly convert bool dims to unbiased arg + if unbiased is None and isinstance(dim, bool): + unbiased = dim + dim = None + return dim, unbiased + + +@register_decomposition(aten.var) +@out_wrapper() +def var( + a: TensorLikeType, + dim: Optional[DimsType] = None, + unbiased: Optional[bool] = None, + keepdim: bool = False, + *, + correction: Optional[NumberType] = None, +) -> TensorLikeType: + dim, unbiased = _dim_var_dispatch(dim, unbiased) + correction = utils.set_correction(unbiased, correction) + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + + result = _reduction( + a, + partial(prims.var, correction=correction), + dims=dim, + keepdims=keepdim, + dtype=None, + out=None, + has_identity=True, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT, + ) + return result + + +@register_decomposition(aten.std) +@out_wrapper() +def std( + a: TensorLikeType, + dim: Union[Optional[int], Optional[List[int]]] = None, + unbiased: Optional[bool] = None, + keepdim: bool = False, + *, + correction: Optional[NumberType] = None, +) -> TensorLikeType: + dim, unbiased = _dim_var_dispatch(dim, unbiased) + correction = utils.set_correction(unbiased, correction) + + opmath_dtype, dtype = utils.reduction_dtypes( + a, REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT + ) + a = _maybe_convert_to_dtype(a, opmath_dtype) + a_var = torch.var(a, dim, correction=correction, keepdim=keepdim) + a_std = torch.sqrt(a_var) + assert dtype is not None + return _maybe_convert_to_dtype(a_std, dtype) + + +@register_decomposition(aten.mean) +def mean( + a: TensorLikeType, + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + dtype=None, + out=None, +) -> TensorLikeType: + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + orig_dtype = dtype + if dtype is None: + dtype = a.dtype + # can't use out wrapper because of this argument + torch._check( + out is None or out.dtype == dtype, + lambda: f"Expected out tensor to have dtype {dtype}, but got {out.dtype} instead", + ) + result = _reduction( + a, + prims.sum, + dims=dim, + keepdims=keepdim, + dtype=dtype, + out=None, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.KEEP_PROMOTED_TYPE, + ) + torch._check( + utils.is_float_dtype(dtype) or utils.is_complex_dtype(dtype), + lambda: ( + f"mean(): could not infer output dtype. " + f"{'Input' if orig_dtype is None else 'Optional'} dtype must be either " + f"a floating point or complex dtype. Got: {dtype}" + ), + ) + if isinstance(dim, Dim): + dim = (dim,) # type: ignore[assignment] + dims = utils.reduction_dims(a.shape, dim) # type: ignore[arg-type] + nelem = 1 if a.ndim == 0 else reduce(operator.mul, (a.shape[i] for i in dims), 1) + result = true_divide(result, nelem) + result_dtype = a.dtype if dtype is None else dtype + result = _maybe_convert_to_dtype(result, result_dtype) # type: ignore[assignment] + if out is not None: + assert isinstance(out, TensorLike) + out = _maybe_resize_out(out, result.shape) + return _safe_copy_out(copy_from=result, copy_to=out) # type: ignore[arg-type] + return result + + +@register_decomposition(aten.std_mean) +@out_wrapper("out0", "out1") +def std_mean( + a: TensorLikeType, + dim: Optional[DimsType] = None, + *, + unbiased: Optional[bool] = None, + keepdim: bool = False, + correction: Optional[NumberType] = None, +): + dim, unbiased = _dim_var_dispatch(dim, unbiased) + correction = utils.set_correction(unbiased, correction) + opmath_dtype, dtype = utils.reduction_dtypes( + a, REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT + ) + original_dtype = a.dtype + a = _maybe_convert_to_dtype(a, opmath_dtype) + a_var, a_mean = torch.var_mean(a, dim, correction=correction, keepdim=keepdim) + a_std = torch.sqrt(a_var) + assert dtype is not None + return ( + _maybe_convert_to_dtype(a_std, dtype), + _maybe_convert_to_dtype(a_mean, original_dtype), + ) + + +@register_decomposition(aten.var_mean) +@out_wrapper("out0", "out1") +def var_mean( + a: TensorLikeType, + dim: Optional[DimsType] = None, + unbiased: Optional[bool] = None, + keepdim: bool = False, + *, + correction: Optional[NumberType] = None, +): + dim, unbiased = _dim_var_dispatch(dim, unbiased) + v = var(a, dim, unbiased, keepdim, correction=correction) + m = mean(a, dim, keepdim) + return v, m + + +@register_decomposition(aten.addr) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self", "vec1", "vec2"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def addr( + self: TensorLikeType, + vec1: TensorLikeType, + vec2: TensorLikeType, + *, + beta: NumberType = 1, + alpha: NumberType = 1, +) -> TensorLikeType: + torch._check( + vec1.ndim == 1, + lambda: f"addr: Expected 1-D argument vec1, but got {vec1.ndim}-D", + ) + torch._check( + vec2.ndim == 1, + lambda: f"addr: Expected 1-D argument vec2, but got {vec2.ndim}-D", + ) + self = self.expand(vec1.shape[0], vec2.shape[0]) + if utils.is_boolean_dtype(self.dtype): + # Integers are accepted for booleans + torch._check( + is_weakly_lesser_type(type(beta), int), + lambda: f"expected bool/int beta but got {type(beta)}", + ) + torch._check( + is_weakly_lesser_type(type(alpha), int), + lambda: f"expected bool/int alpha but got {type(beta)}", + ) + if not beta: + return torch.outer(vec1, vec2) if alpha else torch.full_like(self, False) + else: + return torch.logical_or( + self, + torch.outer(vec1, vec2) if alpha else torch.full_like(self, False), + ) + else: + torch._check( + is_weakly_lesser_type(type(beta), dtype_to_type(self.dtype)), + lambda: f"cannot safely convert {type(beta)} to {self.dtype}", + ) + torch._check( + is_weakly_lesser_type(type(alpha), dtype_to_type(self.dtype)), + lambda: f"cannot safely convert {type(alpha)} to {self.dtype}", + ) + if beta == 0: + # This means NaNs from self are dropped if beta is zero + return alpha * torch.outer(vec1, vec2) + else: + return beta * self + alpha * torch.outer(vec1, vec2) + + +# CompositeImplicitAutograd - don't register decomp +def atleast_1d( + arg: Union[TensorLikeType, Sequence[TensorLikeType]], *args: TensorLikeType +) -> Union[TensorLikeType, Tuple[TensorLikeType, ...]]: + """Reference implementation of :func:`torch.atleast_1d`.""" + if not args and isinstance(arg, collections.abc.Sequence): + args_ = arg + else: + assert not isinstance(arg, collections.abc.Sequence) + args_ = (arg,) + args + res = tuple(a if a.ndim >= 1 else unsqueeze(a, 0) for a in args_) + return res if len(res) > 1 else res[0] + + +# Helper function with assert to avoid MyPy error +# of incompatible type passed to unsqueeze +def _unsqueeze_atleast( + at_least_fn: Callable, dim: int, arg: TensorLikeType +) -> TensorLikeType: + arg_ = at_least_fn(arg) + assert isinstance(arg_, TensorLike) + return unsqueeze(arg_, dim) + + +# CompositeImplicitAutograd - don't register decomp +def atleast_2d( + arg: Union[TensorLikeType, Sequence[TensorLikeType]], *args: TensorLikeType +) -> Union[TensorLikeType, Tuple[TensorLikeType, ...]]: + """Reference implementation of :func:`torch.atleast_2d`.""" + if not args and isinstance(arg, collections.abc.Sequence): + args_ = arg + else: + assert not isinstance(arg, collections.abc.Sequence) + args_ = (arg,) + args + unsqueeze_atleast_1d = partial(_unsqueeze_atleast, atleast_1d, 0) + res = tuple(a if a.ndim >= 2 else unsqueeze_atleast_1d(a) for a in args_) + return res if len(res) > 1 else res[0] + + +# CompositeImplicitAutograd - don't register decomp +def atleast_3d( + arg: Union[TensorLikeType, Sequence[TensorLikeType]], *args: TensorLikeType +) -> Union[TensorLikeType, Tuple[TensorLikeType, ...]]: + """Reference implementation of :func:`torch.atleast_3d`.""" + if not args and isinstance(arg, collections.abc.Sequence): + args_ = arg + else: + assert not isinstance(arg, collections.abc.Sequence) + args_ = (arg,) + args + unsqueeze_atleast_2d = partial(_unsqueeze_atleast, atleast_2d, -1) + res = tuple(a if a.ndim >= 3 else unsqueeze_atleast_2d(a) for a in args_) + return res if len(res) > 1 else res[0] + + +def as_strided( + a: TensorLikeType, + size: ShapeType, + stride: StrideType, + storage_offset: Optional[int] = None, +) -> TensorLikeType: + storage_offset_int = ( + storage_offset if storage_offset is not None else a.storage_offset() + ) + return prims.as_strided(a, size, stride, storage_offset_int) + + +@register_decomposition(aten.as_strided_scatter) +@out_wrapper() +def as_strided_scatter( + input: TensorLikeType, + src: TensorLikeType, + size: ShapeType, + stride: StrideType, + storage_offset: Optional[int] = None, +) -> TensorLikeType: + storage_offset_int = 0 if storage_offset is None else storage_offset + return prims.as_strided_scatter(input, src, size, stride, storage_offset_int) + + +def broadcast_shapes(*shapes) -> ShapeType: + return torch.Size(_broadcast_shapes(*shapes)) + + +@aten.broadcast_tensors.default.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten.broadcast_tensors.default.py_impl(DispatchKey.Meta) +def broadcast_tensors(*tensors) -> List[TensorLikeType]: + if len(tensors) == 1 and not isinstance(tensors[0], Tensor): + tensors = tensors[0] + return list(_maybe_broadcast(*tensors, preserve_cpu_scalar_tensors=False)) + + +# CompositeImplicitAutograd - don't register decomp +def broadcast_to(a: TensorLikeType, size: ShapeType) -> TensorLikeType: + start = len(size) - len(a.shape) + dims = tuple(range(start, len(a.shape) + start)) + return prims.broadcast_in_dim(a, size, dims) + + +@register_decomposition(aten.cat) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("tensors",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH, +) +def cat(tensors: TensorSequenceType, dim: int = 0) -> TensorLikeType: + def cat_compute_output_memory_format(inputs): + format = None + for t in inputs: + f = utils.suggest_memory_format(t) + if f == torch.contiguous_format: + return f + if format is not None and format != f: + return torch.contiguous_format + format = f + assert format is not None + return format + + if len(tensors) == 0: + msg = "cat expects at least one tensor, but received zero!" + raise ValueError(msg) + + for tensor in tensors: + assert isinstance(tensor, TensorLike) + + utils.check_same_device(*tensors, allow_cpu_scalar_tensors=False) + + for t in tensors: + # match logic in legacy_cat_wrap_dim + if t.ndim == 1 and t.size(0) == 0: + continue + dim = utils.canonicalize_dim(t.ndim, dim) + utils.validate_idx(t.ndim, dim) + break + + memory_format = cat_compute_output_memory_format(tensors) + + # Filters tensors with one dimension of length zero + filtered = tuple(x for x in tensors if not (x.ndim == 1 and x.numel() == 0)) + if len(filtered) == 0: + t = tensors[0] + + # TODO: fix this to work with meta tensors + try: + requires_grad = any(x.requires_grad for x in tensors) + except Exception: + requires_grad = False + + return empty( + (0,), + dtype=t.dtype, + device=t.device, + requires_grad=requires_grad, + memory_format=memory_format, + ) + + return prims.cat(filtered, dim).clone(memory_format=memory_format) + + +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def column_stack(tensors: TensorSequenceType) -> TensorLikeType: + aligned_tensors = tuple( + x if x.ndim > 1 else x.reshape((x.numel(), 1)) for x in tensors + ) + return cat(aligned_tensors, 1) + + +def conj(input: TensorLikeType) -> TensorLikeType: + if not utils.is_complex_dtype(input.dtype): + return input + if input.is_sparse: + return torch.conj_physical(input) + return prims.conj(input) + + +# This replicates at::constant_pad_nd, defined in ATen/native/PadNd.cpp +@register_decomposition(aten.constant_pad_nd) +@out_wrapper() +def constant_pad_nd( + input: TensorLikeType, pad: List[int], value: NumberType = 0 +) -> TensorLikeType: + torch._check( + len(pad) % 2 == 0, + lambda: f"Length of pad must be even but instead it equals {len(pad)}", + ) + + input_sizes = input.shape + l_inp = len(input_sizes) + + l_pad = len(pad) // 2 + l_diff = l_inp - l_pad + + torch._check( + l_inp >= l_pad, + lambda: "Length of pad should be no more than twice the number of " + f"dimensions of the input. Pad length is {len(pad)} while the input has " + f"{l_inp} dimensions.", + ) + + c_input = input + for i in range(l_diff, l_inp): + pad_idx = 2 * (l_inp - i - 1) + if pad[pad_idx] < 0: + c_input = c_input.narrow(i, -pad[pad_idx], c_input.shape[i] + pad[pad_idx]) + + if pad[pad_idx + 1] < 0: + c_input = c_input.narrow(i, 0, c_input.shape[i] + pad[pad_idx + 1]) + + # if none of the pads are positive we can just return the result + if builtins.all(p <= 0 for p in pad): + return c_input.clone() + + new_shape = list(input_sizes[:l_diff]) + + for i in range(l_pad): + pad_idx = len(pad) - ((i + 1) * 2) + new_dim = input_sizes[l_diff + i] + pad[pad_idx] + pad[pad_idx + 1] + torch._check( + new_dim > 0, + lambda: f"The input size {input_sizes[l_diff + i]}, plus negative padding " + f"{pad[pad_idx]} and {pad[pad_idx + 1]} resulted in a negative output size, " + f"which is invalid. Check dimension {l_diff + i} of your input.", + ) + new_shape.append(new_dim) + + memory_format = utils.suggest_memory_format(input) + output = torch.empty( + new_shape, + dtype=input.dtype, + device=input.device, + requires_grad=input.requires_grad, + memory_format=memory_format, + ) + + if value == 0 and input.dtype == torch.bool: + value = False + # torch.fill isn't typed to allow complex values + output = torch.fill(output, value) # type: ignore[arg-type] + + c_output = output + for i in range(l_diff, l_inp): + pad_idx = 2 * (l_inp - i - 1) + if pad[pad_idx] > 0: + c_output = c_output.narrow( + i, pad[pad_idx], c_output.shape[i] - pad[pad_idx] + ) + if pad[pad_idx + 1] > 0: + c_output = c_output.narrow(i, 0, c_output.shape[i] - pad[pad_idx + 1]) + + prims.copy_to(c_output, c_input) + return output + + +def contiguous( + a: Tensor, *, memory_format: torch.memory_format = torch.contiguous_format +) -> Tensor: + torch._check( + memory_format != torch.preserve_format, + lambda: "preserve memory format is unsupported by the contiguous operator", + ) + + if utils.is_contiguous_for_memory_format(a, memory_format=memory_format): + return a + + return torch.clone(a, memory_format=memory_format) + + +@out_wrapper() +def dstack(tensors: TensorSequenceType) -> TensorLikeType: + torch._check(len(tensors) > 0, lambda: "dstack expects a non-empty TensorList") + aligned_tensors = atleast_3d(*tensors) + return cat(aligned_tensors, 2) + + +@register_decomposition(aten.expand) +def expand(a: Tensor, *shape) -> Tensor: + # NOTE: cannot use utils.extract_shape_from_varargs here + # because that also validates the shape, but the shape + # given to expand may be "invalid" + if len(shape) == 1 and isinstance(shape[0], Sequence): + shape = tuple(shape[0]) + + torch._check( + len(shape) >= len(a.shape), + lambda: "expand: the requested shape has too few dimensions!", + ) + + offset = len(shape) - len(a.shape) + shape_ = list(shape) + for idx, x in enumerate(a.shape): + offset_idx = idx + offset + requested_length = shape[offset_idx] + torch._check( + requested_length == x or x == 1 or requested_length == -1, + lambda: f"expand: attempting to expand a dimension of length {x}!", + ) + + shape_[offset_idx] = requested_length if requested_length != -1 else x + + # At this point shape must be valid + utils.validate_shape(shape_) + + return prims.broadcast_in_dim( + a, shape_, tuple(range(offset, len(a.shape) + offset)) + ) + + +# CompositeImplicitAutograd - don't register decomp +def expand_as(a: Tensor, b: Tensor) -> Tensor: + return a.expand(b.shape) + + +def chunk(a: TensorLikeType, chunks: int, dim: int = 0) -> Tuple[TensorLikeType, ...]: + if chunks <= 0: + msg = f"Expected at least one chunk, but got {chunks}!" + raise ValueError(msg) + + dim = utils.canonicalize_dim(a.ndim, dim) + length = a.shape[dim] + chunk_size = math.ceil(length / chunks) + full_chunks = math.floor(length / chunk_size) + tail_chunk_size = length % chunk_size + + result = [] + for i in range(full_chunks): + result.append(narrow(a, dim, i * chunk_size, chunk_size)) + + if tail_chunk_size != 0: + result.append(narrow(a, dim, full_chunks * chunk_size, tail_chunk_size)) + + return tuple(result) + + +# Note: flatten, unlike other shape operators, returns the input tensor on a no-op (unless +# a 0D tensor is flattened, in which case it's returned in 1D) +# CompositeImplicitAutograd - don't register decomp +def flatten(a: TensorLikeType, start_dim: int = 0, end_dim: int = -1) -> TensorLikeType: + start_dim = utils.canonicalize_dim(a.ndim, start_dim) + end_dim = utils.canonicalize_dim(a.ndim, end_dim) + + # Short-circuits on no-op + if start_dim == end_dim and a.ndim != 0: + return a + + # Tries to take a view + # TODO: we could look at directing collapse_view to skip its meta function here (unsafe_collapse_view) + new_shape, new_strides = prims._collapse_view_helper(a, start_dim, end_dim) + if new_shape is not None: + return prims.collapse_view(a, start_dim, end_dim) + + # Makes a copy if it can't make a view + return prims.collapse(a, start_dim, end_dim) + + +@register_decomposition(aten.flip) +@out_wrapper() +def flip(a: TensorLikeType, dims: DimsSequenceType) -> TensorLikeType: + if not isinstance(dims, tuple) and not isinstance(dims, list): + raise ValueError("dims has to be a sequence of ints") + dims = utils.canonicalize_dims(a.ndim, dims) # type: ignore[assignment] + utils.validate_no_repeating_dims(dims) + return prims.rev(a, dims) + + +# CompositeImplicitAutograd - don't register decomp +def fliplr(a: TensorLikeType) -> TensorLikeType: + if a.ndim < 2: + raise RuntimeError("Input must be >= 2-d.") + + return flip(a, (1,)) + + +# CompositeImplicitAutograd - don't register decomp +def flipud(a: TensorLikeType) -> TensorLikeType: + if a.ndim < 1: + raise RuntimeError("Input must be >= 1-d.") + + return flip(a, (0,)) + + +# CompositeImplicitAutograd - don't register decomp +def narrow( + a: TensorLikeType, dim: int, start: Union[int, TensorLikeType], length: int +) -> TensorLikeType: + # Supports Tensor overload that was added for XLA: + # https://github.com/pytorch/pytorch/issues/31558 + if isinstance(start, TensorLike): + torch._check( + start.dim() == 0 and utils.is_integer_dtype(start.dtype), + lambda: "start must be an 0-dim integral Tensor.", + ) + start = start.item() # type: ignore[assignment] + torch._check(a.dim() > 0, lambda: "narrow() cannot be applied to a 0-dim tensor.") + torch._check(length >= 0, lambda: "narrow(): length must be non-negative.") + dim = utils.canonicalize_dim(a.ndim, dim) + dim_length = a.size(dim) + torch._check_with( + IndexError, + -dim_length <= start and start <= dim_length, # type: ignore[arg-type] + lambda: f"start out of range (expected to be in range of [{-dim_length}, {dim_length}], but got {start})", + ) + if start < 0: + start = start + dim_length + torch._check( + start <= dim_length - length, # type: ignore[arg-type] + lambda: f"start ({start}) + length ({length}) exceeds dimension size ({dim_length}).", + ) + return prims.slice_in_dim(a, start, start + length, axis=dim) + + +# TODO: This must return a sparse tensor if the input is sparse, but refs have +# no sparse support. See narrow_copy_sparse in core. +narrow_copy = _make_copy_from_view(narrow) + + +def _normalize( + a: Tensor, norm_dims: DimsType, eps: float +) -> Tuple[Tensor, Tensor, Tensor]: + """Computes mean and 1/std of a tensor along norm_dims. + + Used as a helper function for normalization layers. + + Args: + a (Tensor): input tensor + norm_dims (DimsType): dimensions to normalize over + eps (float): epsilon for numerical stability + + Returns: + out (Tensor): normalized tensor. + mean (Tensor): mean of the tensor along norm_dims. + rstd (Tensor): 1/std of the tensor along norm_dims. + """ + norm_dims = utils.canonicalize_dims(a.ndim, norm_dims) + computation_dtype = utils.get_computation_dtype(a.dtype) + a_acc = _maybe_convert_to_dtype(a, computation_dtype) + assert isinstance(a_acc, TensorLike) # to avoid mypy error for var_mean + biased_var, mean = torch.var_mean( + a_acc, dim=norm_dims, unbiased=False, keepdim=True + ) + rstd = torch.rsqrt(biased_var + eps) + out = (a - mean) * rstd + return out, mean, rstd + + +# add all specified dimensions +def _unsqueeze_multiple(x: TensorLikeType, dimensions: List[int]) -> TensorLikeType: + for dim in sorted(dimensions): + x = torch.unsqueeze(x, dim) + return x + + +@register_decomposition(aten.native_group_norm.default) +def native_group_norm( + input: Tensor, + weight: Optional[Tensor], + bias: Optional[Tensor], + batch_size: int, + num_channels: int, + flattened_inner_size: int, + num_groups: int, + eps: float, +) -> Tuple[Tensor, Tensor, Tensor]: + torch._check( + input.ndim >= 2, + lambda: f"Expected at least 2 dimensions for input tensor but received {input.ndim}", + ) + torch._check( + num_channels % num_groups == 0, + lambda: "Expected number of channels in input to be divisible by num_groups, " + + f"but got input of shape {input.shape} and num_groups = {num_groups}", + ) + + # num_channels / num_groups and flattened inner dimension are the reduction axes + reduction_dims = [2, 3] + input_reshaped = torch.reshape( + input, + [batch_size, num_groups, num_channels // num_groups, flattened_inner_size], + ) + out, mean, rstd = _normalize(input_reshaped, reduction_dims, eps) + out = out.view(input.shape) + + broadcast_dims = [0] + list(range(2, input.ndim)) + unsqueeze_bias = None + if bias is not None: + unsqueeze_bias = _unsqueeze_multiple(bias, broadcast_dims) + unsqueeze_weight = None + if weight is not None: + unsqueeze_weight = _unsqueeze_multiple(weight, broadcast_dims) + + if unsqueeze_weight is not None: + out = out * unsqueeze_weight + if unsqueeze_bias is not None: + out = out + unsqueeze_bias + + out = _maybe_convert_to_dtype(out, input.dtype) # type: ignore[assignment] + mean = _maybe_convert_to_dtype(mean, input.dtype) # type: ignore[assignment] + rstd = _maybe_convert_to_dtype(rstd, input.dtype) # type: ignore[assignment] + + # remove broadcast dimensions from mean and rstd + mean = torch.squeeze(mean, reduction_dims) + rstd = torch.squeeze(rstd, reduction_dims) + return (out, mean, rstd) + + +@register_decomposition(aten.native_layer_norm) +@out_wrapper("out0", "out1", "out2") +def native_layer_norm( + input: Tensor, + normalized_shape: ShapeType, + weight: Optional[Tensor], + bias: Optional[Tensor], + eps: float, +) -> Tuple[Tensor, Tensor, Tensor]: + normalized_ndim = len(normalized_shape) + torch._check( + normalized_ndim >= 1, + lambda: "Expected normalized_shape to be at least 1-dimensional, i.e., " + + "containing at least one element, but got normalized_shape = " + + str(normalized_shape), + ) + # torch.Size([1, 2, 3]) == [1, 2, 3] evaluates to False + # while torch.Size([1, 2, 3]) == (1, 2, 3) is True + # therefore we use tuple(normalized_shape) + torch._check( + weight is None or weight.shape == tuple(normalized_shape), + lambda: "Expected weight to be of same shape as normalized_shape, but got " + + "weight of shape " + + str(weight.shape) # type: ignore[union-attr] + + " and normalized_shape = " + + str(normalized_shape), + ) + torch._check( + bias is None or bias.shape == tuple(normalized_shape), + lambda: "Expected bias to be of same shape as normalized_shape, but got " + + "bias of shape " + + str(bias.shape) # type: ignore[union-attr] + + " and normalized_shape = " + + str(normalized_shape), + ) + torch._check( + input.ndim >= normalized_ndim + and input.shape[(input.ndim - normalized_ndim) :] == tuple(normalized_shape), + lambda: "Given normalized_shape=" + + str(normalized_shape) + + ", expected input with shape " + + str(normalized_shape) + + ", but got input of size " + + str(input.shape), + ) + + input = input.contiguous() + if weight is not None: + weight = weight.contiguous() + if bias is not None: + bias = bias.contiguous() + + axis = input.ndim - normalized_ndim + reduction_dims = list(range(axis, input.ndim)) + out, mean, rstd = _normalize(input, reduction_dims, eps) + + if weight is None and bias is not None: + out = out + bias + elif weight is not None and bias is None: + out = out * weight + elif weight is not None and bias is not None: + out = out * weight + bias + + out = _maybe_convert_to_dtype(out, input.dtype) # type: ignore[assignment] + if input.device.type == "cpu": + mean = _maybe_convert_to_dtype(mean, input.dtype) # type: ignore[assignment] + rstd = _maybe_convert_to_dtype(rstd, input.dtype) # type: ignore[assignment] + return (out, mean, rstd) + + +# TODO: Adding this as a meta function causes functorch tests to fail when compiled with debug mode. +# test/test_eager_transforms.py::TestFunctionalizeCPU::test_functionalize_fx_transpose_simple_cpu +@register_decomposition(aten.permute) +def permute(a: TensorLikeType, *dims) -> TensorLikeType: + _permutation = utils.canonicalize_dims( + a.ndim, utils.extract_dims_from_varargs(dims) + ) + return prims.transpose(a, _permutation) + + +@register_decomposition(aten.renorm) +@out_wrapper() +def renorm( + input: TensorLikeType, p: RealNumberType, dim: int, maxnorm: RealNumberType +) -> TensorLikeType: + torch._check(not isinstance(p, complex), lambda: "renorm: p must be real-valued") + torch._check(p > 0, lambda: "renorm: non-positive norm not supported") + torch._check( + not isinstance(maxnorm, complex), lambda: "renorm: maxnorm must be real-valued" + ) + torch._check( + maxnorm >= 0, lambda: f"renorm: expected maxnorm to be >= 0 but got {maxnorm}" + ) + ndim = input.ndim + torch._check( + ndim > 1, + lambda: f"renorm: input needs at least 2 dimensions, got {ndim} dimensions", + ) + + dim = utils.canonicalize_dim(ndim, dim) + reduce_dims = list(range(ndim)) + del reduce_dims[dim] + + # For half and bfloat16, calculate norm in float precision then cast + # normalization factor to half + acc_type = utils.get_computation_dtype(input.dtype) + if acc_type != input.dtype: + norm = torch.linalg.vector_norm( + input, p, reduce_dims, keepdim=True, dtype=acc_type + ) + else: + norm = torch.linalg.vector_norm(input, p, reduce_dims, keepdim=True) + + eps = 1e-7 + norm_factor = torch.where(norm > maxnorm, maxnorm / (norm + eps), 1.0) + if acc_type != input.dtype: + norm_factor = prims.convert_element_type(norm_factor, input.dtype) + return (input * norm_factor).contiguous() + + +# CompositeImplicitAutograd - don't register decomp +@aten.stft.center.py_impl(DispatchKey.CompositeImplicitAutograd) +def stft( + input: Tensor, + n_fft: int, + hop_length: Optional[int] = None, + win_length: Optional[int] = None, + window: Optional[Tensor] = None, + center: bool = True, + pad_mode: str = "reflect", + normalized: bool = False, + onesided: Optional[bool] = None, + return_complex: Optional[bool] = None, +) -> Tensor: + torch._check( + window is None or window.device == input.device, + lambda: ( + f"stft input and window must be on the same device but got self on {input.device}" + + f" and window on {window.device}" # type: ignore[union-attr] + ), + ) + + hop_length_ = hop_length if hop_length is not None else n_fft // 4 + win_length_ = win_length if win_length is not None else n_fft + + if return_complex is None: + return_complex_ = input.is_complex() or ( + window is not None and utils.is_complex_dtype(window.dtype) + ) + torch._check( + return_complex_, + ( + "stft requires the return_complex parameter be given for real inputs, " + + "and will further require that return_complex=True in a future PyTorch release." + ), + ) + else: + return_complex_ = return_complex + + torch._check( + utils.is_float_dtype(input.dtype) or utils.is_complex_dtype(input.dtype), + lambda: "stft expected a tensor of floating point or complex values", + ) + torch._check(1 <= input.ndim <= 2, lambda: "stft expected a 1D or 2D tensor") + + original_ndim = input.ndim + if original_ndim == 1: + input = input.unsqueeze(0) + + if center: + extra_dims = 3 - input.ndim + pad_amount = n_fft // 2 + extended_shape = [*itertools.repeat(1, extra_dims), *input.shape] + input = aten.pad(input.view(extended_shape), [pad_amount, pad_amount], pad_mode) + input = input.view(input.size()[extra_dims:]) + + batch = input.size(0) + length = input.size(1) + torch._check( + 0 < n_fft <= length, + lambda: f"stft expected 0 < n_fft <= {length}, but got n_fft={n_fft}", + ) + torch._check( + hop_length_ > 0, + lambda: f"stft expected hop_length > 0 but got hop_length={hop_length_}", + ) + torch._check( + 0 < win_length_ <= n_fft, + lambda: f"stft expected 0 < win_length <= n_fft but got win_length={win_length_}", + ) + torch._check( + window is None or window.shape == (win_length_,), + lambda: ( + f"expected a 1D window tensor of size equal to win_length={win_length_}, " + + f"but got window with size {window.shape}" # type: ignore[union-attr] + ), + ) + + if win_length_ < n_fft: + if window is None: + window = torch.ones(win_length_, dtype=input.dtype, device=input.device) + left = (n_fft - win_length_) // 2 + window = aten.constant_pad_nd(window, [left, n_fft - win_length_ - left]) + + input = input.unfold(dimension=-1, size=n_fft, step=hop_length_) + if window is not None: + input = input * window + + complex_fft = utils.is_complex_dtype(input.dtype) + onesided = onesided if onesided is not None else not complex_fft + norm = "ortho" if normalized else None + if onesided: + torch._check( + not complex_fft, + lambda: "Cannot have onesided output if window or input is complex", + ) + out = torch.fft.rfft(input, dim=-1, norm=norm) + else: + out = torch.fft.fft(input, dim=-1, norm=norm) + + out.transpose_(1, 2) + + if original_ndim == 1: + out = out.squeeze_(0) + + return out if return_complex_ else torch.view_as_real(out) + + +# CompositeImplicitAutograd - don't register decomp +@aten.istft.default.py_impl(DispatchKey.CompositeImplicitAutograd) +def istft( + input: Tensor, + n_fft: int, + hop_length: Optional[int] = None, + win_length: Optional[int] = None, + window: Optional[Tensor] = None, + center: bool = True, + normalized: bool = False, + onesided: Optional[bool] = None, + length: Optional[int] = None, + return_complex=False, +) -> Tensor: + torch._check( + window is None or window.device == input.device, + lambda: ( + f"istft input and window must be on the same device but got self on {input.device}" + + f" and window on {window.device}" # type: ignore[union-attr] + ), + ) + + hop_length_ = hop_length if hop_length is not None else n_fft // 4 + win_length_ = win_length if win_length is not None else n_fft + + torch._check( + utils.is_complex_dtype(input.dtype), + lambda: ( + "istft input and window must be on the same device but got self on " + + f"{input.device} and window on {window.device}" # type: ignore[union-attr] + ), + ) + n_frames = input.size(-1) + fft_size = input.size(-2) + + expected_output_signal_len = n_fft + hop_length_ * (n_frames - 1) + torch._check(input.numel() > 0, lambda: "istft input tensor cannot be empty") + torch._check( + 2 <= input.ndim <= 3, + lambda: f"istft expected a tensor with 2 or 3 dimensions, but got {input.ndim}", + ) + onesided_ = onesided if onesided is not None else fft_size != n_fft + + if onesided_: + torch._check( + n_fft // 2 + 1 == fft_size, + lambda: ( + "istft expected the frequency dimension (3rd to the last) of the input tensor " + + "to match n_fft / 2 + 1 when onesided=True, but got {fft_size}" + ), + ) + else: + torch._check( + n_fft == fft_size, + lambda: ( + "istft expected the frequency dimension (3rd to the last) of the input tensor " + + "to match n_fft when onesided=False, but got {fft_size}", + ), + ) + + torch._check( + 0 < hop_length_ <= win_length_, + lambda: "istft expected 0 < hop_length <= win_length", + ) + torch._check( + 0 < win_length_ <= n_fft, lambda: "istft expected 0 < win_length <= n_fft" + ) + torch._check( + window is None or window.shape == (win_length_,), + lambda: "Invalid window shape. window has to be 1D and length of `win_length`", + ) + + if window is None: + real_dtype = utils.corresponding_real_dtype(input.dtype) + window_ = torch.ones(win_length_, dtype=real_dtype, device=input.device) + else: + window_ = window + + if win_length_ != n_fft: + left = (n_fft - win_length_) // 2 + window_ = aten.constant_pad_nd(window_, (left, n_fft - win_length_ - left), 0) + + original_ndim = input.ndim + if input.ndim == 2: + input = input.unsqueeze(0) + + input = input.transpose(1, 2) + norm = "ortho" if normalized else None + if return_complex: + torch._check( + not onesided_, + lambda: "cannot have onesided output if window or input is complex", + ) + input = torch.fft.ifft(input, dim=-1, norm=norm) + else: + torch._check( + window is None or not utils.is_complex_dtype(window.dtype), + lambda: "Complex windows are incompatible with return_complex=False", + ) + if not onesided_: + input = input.narrow(dim=-1, start=0, length=n_fft // 2 + 1) + input = torch.fft.irfft(input, dim=-1, norm=norm) + + assert input.size(2) == n_fft + + y_tmp = input * window_.view([1, 1, n_fft]) + y = aten.unfold_backward( + y_tmp, + input_sizes=(y_tmp.size(0), expected_output_signal_len), + dim=1, + size=n_fft, + step=hop_length_, + ) + window_envelop = aten.unfold_backward( + window_.pow(2).expand((1, n_frames, n_fft)), + input_sizes=(y_tmp.size(0), expected_output_signal_len), + dim=1, + size=n_fft, + step=hop_length_, + ) + + assert expected_output_signal_len == y.size(1) + assert expected_output_signal_len == window_envelop.size(1) + + start = n_fft // 2 if center else 0 + if length is not None: + end = start + length + elif center: + end = expected_output_signal_len - n_fft // 2 + else: + end = expected_output_signal_len + + length = max(0, end - start) + y = y.narrow(dim=1, start=start, length=length) + window_envelop = window_envelop.narrow(dim=1, start=start, length=length) + + window_envelop_lowest = window_envelop.abs().min().lt(1e-11) + torch._check( + not window_envelop_lowest.item(), + lambda: "window overlap add min less than 1e-11", + ) + + y = y / window_envelop + if original_ndim == 2: + y = y.squeeze(0) + + if end > expected_output_signal_len: + warnings.warn( + "The length of signal is shorter than the length parameter. Result is being " + + "padded with zeros in the tail. Please check your center and hop_length settings" + ) + y = aten.constant_pad_nd(y, (0, end - expected_output_signal_len), 0) + return y + + +# Get the new shape and stride after applying unfold to an input tensor +def _get_unfold_shape_stride( + a_shape: ShapeType, a_stride: StrideType, dimension: int, size: int, step: int +): + a_ndim = len(a_shape) + dim = utils.canonicalize_dim(a_ndim, dimension, wrap_scalar=True) + max_size = 1 if a_ndim == 0 else a_shape[dim] + last_stride = 1 if a_ndim == 0 else a_stride[dim] + + torch._check( + size <= max_size, + lambda: f"Maximum size for tensor at dimension {dim} is {max_size} but size is {size}", + ) + + torch._check( + step > 0, + lambda: f"Step is {step} but must be > 0", + ) + + shape = list(a_shape) + strides = list(a_stride) + shape.append(size) + strides.append(last_stride) + if dim < a_ndim: + shape[dim] = (shape[dim] - size) // step + 1 + strides[dim] *= step + return shape, strides + + +@register_decomposition(aten.repeat) +@out_wrapper() +def repeat(a: Tensor, *repeat_shape) -> Tensor: + repeat_shape = utils.extract_shape_from_varargs(repeat_shape, validate=False) + torch._check( + len(repeat_shape) >= len(a.shape), + lambda: "repeat: Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor", + ) + + if len(repeat_shape) == 0: + return torch.clone(a) + + num_new_dimensions = len(repeat_shape) - a.ndim + padded_shape = [1] * num_new_dimensions + for dim_size in a.shape: + padded_shape.append(dim_size) + + target_shape = tuple( + padded_size * repeat_size + for padded_size, repeat_size in zip(padded_shape, repeat_shape) + ) + + # return an empty tensor if one of the repeat_shape dimensions is zero + if 0 in repeat_shape: + return torch.empty( + target_shape, + dtype=a.dtype, + device=a.device, + requires_grad=a.requires_grad, + memory_format=utils.suggest_memory_format(a), + ) + + urtensor_shape = target_shape + urtensor_stride = utils.make_contiguous_strides_for(target_shape) + for dim, dim_size in enumerate(padded_shape): + # repeat each dimension by using unfold_copy operation + urtensor_shape, urtensor_stride = _get_unfold_shape_stride( + urtensor_shape, urtensor_stride, dim, dim_size, max(dim_size, 1) + ) + + # derive permute order by sorting urtensor strides + enumerated_stride = list(enumerate(urtensor_stride)) + enumerated_stride.sort(key=lambda item: item[1], reverse=True) + permute_order, sorted_stride = zip(*enumerated_stride) + + # add new and expand dimensions according to urtensor + repeat_xtensor = a.expand(urtensor_shape) + + # clone tensor to concretize expanded dimensions + cloned_result = torch.clone(repeat_xtensor) + + # transpose axis so strides are in sorted order + permuted_result = cloned_result.permute(permute_order) + + # reshape to get contiguous tensor with correct target shape + return permuted_result.reshape(target_shape) + + +def _reshape_view_helper(a: TensorLikeType, *shape, allow_copy: bool) -> TensorLikeType: + # Creates a valid shape + shape = utils.extract_shape_from_varargs(shape, validate=False) + # Reshape may be given a shape with a -1 length + # This indicates that the dimension's length should be inferred + shape = utils.infer_size(shape, a.numel()) + + # Short-circuits if shape is the same + if tuple(a.shape) == tuple(shape): + return prims.view_of(a) + + # Special-cases tensors with no elements + if a.numel() == 0: + return as_strided(a, shape, utils.make_contiguous_strides_for(shape)) + + # Special-cases reshaping zero dim tensors + if a.ndim == 0: + _a = a + for length in shape: + assert length == 1 + _a = unsqueeze(_a, -1) + return _a + + # Special-cases reshaping to zero dim tensors + if len(shape) == 0: + _a = a + for length in a.shape: + assert length == 1 + _a = squeeze(_a, -1) + return _a + + # Handles general case: a 1+D tensor reshaped into a distinct 1+D shape + + # NOTE [Reshape Algorithm] + # This algorithm works by attempting to greedily construct the desired dimensions in + # the output shape, left to right. It does this by, conceptually, accumulating + # dimensions of the original tensor, also left to right, until the dimension + # can be constructed using prims.split_dim. + # The algorithm also has special handling for tail squeezes/unsqueezes, like + # if a reshape from (5, 5) to (5, 5, 1) or vice versa. + # + # This algorithm does not flatten the original tensor and then split dims as appropriate + # because that would create copies more often than this algorithm. flatten is the only + # operation below which can create a view or a copy, and while it prefers creating + # views it may sometimes create a copy if the tensor's strides do not permit a view. + # As a result, this algorithm tries to minimize flattening. + # + # Note that a better version of this algorithm may exist. Regions which could be + # flattened without creating a copy can be identified in advance, and that might + # allow fewer flatten calls or faster short-circuiting to make a copy. + idx = 0 + a_ = a + for length in shape: + # Handles tail unsqueezes + if idx >= a_.ndim: + assert length == 1 + last_dim = a_.ndim - 1 + # NOTE: using split_dim instead of unsqueeze may seem silly here, + # but it's necessary to get the strides correct + a_ = prims.split_dim(a_, last_dim, a_.shape[last_dim]) + idx = idx + 1 + continue + + # Skips dimensions that are already the correct length + if length == a_.shape[idx]: + idx = idx + 1 + continue + + # Gathers enough original dimensions such that this new dimension can be created + # Note that this accumulation will terminate because we've verified a and the shape + # specify the same number of elements above + accum = a_.shape[idx] + end = idx + while accum % length != 0: + end = end + 1 + accum = accum * a_.shape[end] + if end != idx: + # NOTE: in this case multiple dimensions must be flatten to create the desired dimension + # This flattening is why reshape sometimes creates a copy -- because flattening + # may return a view of a copy + + # Checks if collapse can be a view and short-circuits to copying reshape if it can't + new_shape, new_strides = prims._collapse_view_helper(a_, idx, end) + if new_shape is None: + if allow_copy: + return prims.reshape(a, shape) + + msg = "Cannot view a tensor with shape {} and strides {} as a tensor with shape {}!".format( + a.shape, a.stride(), shape + ) + raise ValueError(msg) + + a_ = flatten(a_, idx, end) + + # Splits the (possibly flattened) dimension to create the desired dim length + if accum != length: + a_ = prims.split_dim(a_, idx, length) + + idx = idx + 1 + + # Squeezes tail + while idx < a_.ndim: + assert a_.shape[idx] == 1 + a_ = squeeze(a_, idx) + + return a_ + + +# CompositeImplicitAutograd - don't register decomp +# NOTE: shape is a vararg because Tensor.reshape can be called with as +# Tensor.reshape(a, b, c) or Tensor.reshape((a, b, c)) Function call +# torch.reshape doesn't support unpacked shapes +def reshape(a: TensorLikeType, *shape: ShapeType) -> TensorLikeType: + return _reshape_view_helper(a, *shape, allow_copy=True) + + +# CompositeImplicitAutograd - don't register decomp +def reshape_as(self: TensorLikeType, other: TensorLikeType) -> TensorLikeType: + return self.reshape(other.size()) + + +@register_decomposition(aten.roll) +@out_wrapper() +def roll( + a: TensorLikeType, shifts: DimsType, dims: DimsType = tuple() +) -> TensorLikeType: + """Reference implementation of :func:`torch.roll`.""" + dims = utils.canonicalize_dims(a.ndim, dims) + # ATen specifies int[1] type for shifts and dims which expands integers to tuples of length 1 + if not isinstance(shifts, Iterable): + shifts = (shifts,) + if not isinstance(dims, Iterable): + dims = (dims,) + + # Avoid modulo by zero + if a.numel() == 0: + # Keeping this as ref for now as FakeTensor runs into some issues with complex tensors + return clone(a) + + if a.dim() == 0 and len(dims) > 0: + raise IndexError( + f"Dimension specified as {dims[0]} but tensor has no dimensions" + ) + + len_shifts = len(shifts) + len_dims = len(dims) + if len_shifts != 1 or len_dims != 1: + if len_shifts == 0: + raise RuntimeError("`shifts` required") + # Takes care of the case when dims is not specified (default) + # By default, the tensor is flattened before shifting, after which the original shape is restored + if len_dims == 0 and len_shifts == 1: + return torch.roll(torch.flatten(a), shifts, 0).view(a.shape) + if len_shifts != len_dims: + raise RuntimeError( + f"shifts and dimensions must align. shifts: {len_shifts}, dims: {len_dims}" + ) + assert len_dims > 1 + tail_shifts = shifts[1:] + tail_dims = dims[1:] + first_dim_rolled = torch.roll(a, (shifts[0],), dims[0]) + return torch.roll(first_dim_rolled, tail_shifts, tail_dims) + + # This path is taken when only one dimension is rolled + # For example to get `first_dim_rolled` above + dim = dims[0] + size = a.shape[dim] + start = (size - shifts[0]) % size + t0 = torch.narrow(a, dim, start, size - start) + t1 = torch.narrow(a, dim, 0, start) + return torch.cat((t0, t1), dim) + + +@register_decomposition(aten.rot90) +@out_wrapper() +def rot90( + a: TensorLikeType, k: int = 1, dims: DimsSequenceType = (0, 1) +) -> TensorLikeType: + """Reference implementation of :func:`torch.rot90`.""" + if len(dims) != 2: + raise RuntimeError( + f"expected total rotation dims == 2, but got dims = {len(dims)}" + ) + if a.ndim < 2: + raise RuntimeError(f"expected total dims >= 2, but got total dims = {a.ndim}") + + # Do this after the initial checks to be compatible with the behavior in + # core. + dims = utils.canonicalize_dims(a.ndim, dims) + + if dims[0] == dims[1]: + raise RuntimeError( + f"expected rotation dims to be different, but got dim0 = {dims[0]} and dim1 = {dims[1]}" + ) + k = k % 4 # Rotation direction is from the second towards the first axis for k < 0 + if k == 1: + return torch.transpose(torch.flip(a, (dims[1],)), dims[0], dims[1]) + elif k == 2: + return torch.flip(a, dims) + elif k == 3: + return torch.transpose(torch.flip(a, (dims[0],)), dims[0], dims[1]) + else: + return clone(a, memory_format=torch.contiguous_format) + + +def _check_stack_inputs(tensors: TensorSequenceType) -> None: + entry_shape = tensors[0].shape + for i in range(1, len(tensors)): + assert tensors[i].shape == entry_shape, ( + f"stack expects each tensor to be equal size, but got {entry_shape} at entry 0" + f"and {tensors[i].shape} at entry {i}" + ) + + +@register_decomposition(aten.stack) +@out_wrapper() +def stack(tensors: TensorSequenceType, dim: int = 0) -> TensorLikeType: + assert len(tensors) > 0, "stack expects a non-empty TensorList" + wrapped_dim = utils.canonicalize_dim(tensors[0].ndim + 1, dim) + # Refs need sparse support to check other condition + if wrapped_dim < tensors[0].ndim: # and not tensors[0].is_sparse: + _check_stack_inputs(tensors) + result_sizes = list(tensors[0].shape) + result_sizes.insert(wrapped_dim, len(tensors)) + out = torch.cat(tensors, wrapped_dim) + return out.view(result_sizes) + + # If dim == tensors[0].ndim, view cannot efficiently handle it + return torch.cat([t.unsqueeze(wrapped_dim) for t in tensors], dim) + + +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def softmax( + a: TensorLikeType, + dim: int, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + result_dtype = dtype or a.dtype + computation_dtype = utils.get_computation_dtype(result_dtype) + a_ = _maybe_convert_to_dtype(a, computation_dtype) + if a.numel() == 0: + a_exp = exp(a_) + else: + a_max = amax(a_, dim, keepdim=True) + a_exp = exp(a_ - a_max) + return _maybe_convert_to_dtype( + true_divide(a_exp, sum(a_exp, dim, keepdim=True)), result_dtype + ) # type: ignore[return-value] + + +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def hstack(tensors: TensorSequenceType) -> TensorLikeType: + torch._check(len(tensors) > 0, lambda: "hstack expects a non-empty TensorList") + aligned_tensors = atleast_1d(*tensors) + if aligned_tensors[0].ndim == 1: + return cat(aligned_tensors, 0) + return cat(aligned_tensors, 1) + + +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def vstack(tensors: TensorSequenceType) -> TensorLikeType: + torch._check(len(tensors) > 0, lambda: "vstack expects a non-empty TensorList") + aligned_tensors = atleast_2d(*tensors) + return cat(aligned_tensors, 0) + + +# CompositeImplicitAutograd - don't register decomp +def unflatten(a: TensorLikeType, dim: int, sizes: ShapeType) -> TensorLikeType: + dim = utils.canonicalize_dim(a.ndim, dim) + torch._check(len(sizes) != 0, lambda: "unflatten: sizes must be non-empty") + return a.view(tuple(a.shape[:dim]) + tuple(sizes) + tuple(a.shape[dim + 1 :])) + + +@register_decomposition(aten.unbind) +def unbind(t: TensorLikeType, dim: int = 0) -> TensorSequenceType: + dim = utils.canonicalize_dim(t.ndim, dim) + torch._check_index( + len(t.shape) > 0, + lambda: "Dimension specified as 0 but tensor has no dimensions", + ) + if t.shape[dim] == 0: + return tuple() + else: + return tuple( + torch.squeeze(s, dim) for s in torch.tensor_split(t, t.shape[dim], dim) + ) + + +@out_wrapper() +def index_copy(x: TensorLike, dim: int, index: TensorLike, tensor: TensorLike): + return x.clone(memory_format=torch.contiguous_format).index_copy_( + dim, index, tensor + ) + + +def index_copy_(x: TensorLike, dim: int, index: TensorLike, tensor: TensorLike): + dim = utils.canonicalize_dims(x.ndim, dim) + torch._check( + index.ndim <= 1, + lambda: f"Index should have dimension 1 or 0 (got {index.ndim})", + ) + # Treat scalars as elements of \R^1 + y = x.unsqueeze(0) if x.ndim == 0 else x + idx = (slice(None),) * dim + (index,) + y[idx] = tensor + return x + + +@register_decomposition(aten.index_fill) +@out_wrapper() +def index_fill( + x: TensorLike, dim: int, index: TensorLike, value: Union[NumberType, TensorLike] +): + return _index_fill(x, dim, index, value, inplace=False) + + +@register_decomposition(aten.index_fill_) +def index_fill_( + x: TensorLike, dim: int, index: TensorLike, value: Union[NumberType, TensorLike] +): + return _index_fill(x, dim, index, value, inplace=True) + + +def _index_fill( + x: TensorLike, + dim: int, + index: TensorLike, + value: Union[NumberType, TensorLike], + *, + inplace: bool, +): + torch._check( + index.ndim <= 1, + lambda: f"Index should have dimension 1 or 0 (got {index.ndim})", + ) + if isinstance(value, TensorLike): + torch._check( + value.ndim == 0, + lambda: "Only supports 0-dimensional value tensor. " # type: ignore[union-attr] + f"Got a tensor with {value.ndim} dimensions.", + ) # type: ignore[arg-type] + else: + value = torch.scalar_tensor( + value, dtype=x.dtype, layout=x.layout, device=x.device # type: ignore[arg-type] + ) + + # index_copy has some unnecessary preconditions when x is a scalar. We do this to work through them + zero_dim = x.ndim == 0 + y = x.unsqueeze(0) if zero_dim else x + # index_copy does not broadcast on value so we have to do it manually + shape = list(y.shape) + shape[dim] = index.numel() + value = value.expand(shape) + index_copy = Tensor.index_copy_ if inplace else torch.index_copy + out = index_copy(y, dim, index, value) # type: ignore[operator] + if inplace: + return x + else: + if zero_dim: + # The clone is necessary so that it returns a fresh tensor rather than a view + out = out.squeeze(0).clone() + # index_fill preserves the strides. index_copy always returns contiguous tensors + if out.stride() != x.stride(): + new_out = torch.empty_like(x) + new_out.copy_(out) + out = new_out + return out + + +@out_wrapper() +def index_add( + x: TensorLike, + dim: int, + index: TensorLike, + tensor: TensorLike, + *, + alpha: NumberType = 1, +): + # index_add always returns a new contiguous tensor + return x.clone(memory_format=torch.contiguous_format).index_add_( + dim, index, tensor, alpha=alpha # type: ignore[arg-type] + ) + + +@register_decomposition(aten.index_select) +@out_wrapper() +def index_select(x: TensorLike, dim: int, index: TensorLike): + dim = utils.canonicalize_dims(x.ndim, dim) + torch._check( + index.ndim <= 1, + lambda: f"Index should have dimension 1 or 0 (got {index.ndim})", + ) + if index.ndim == 0: + index = index.unsqueeze(0) + if x.ndim == 0: + # Treat scalars as elements of \R^1 + # We cannot use x[idx] here as it accesses item() (??), hence this awkward construction + return torch.empty_like(x).index_copy(0, index, x.expand_as(index)) + + idx = (slice(None),) * dim + (index,) + return x[idx] + + +@register_decomposition(aten.squeeze.dims) +def squeeze(a: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType: + if dim is None: + dims = tuple(idx for idx, size in enumerate(a.shape) if size == 1) + return prims.squeeze(a, dims) if dims else prims.view_of(a) + + ndim = a.ndim + dim = utils.canonicalize_dims(ndim, dim) + dims = (dim,) if isinstance(dim, Dim) else dim + # Short-circuits if the tensor has no dimensions + if ndim == 0: + assert len(dims) == 0 or dims == (0,) + return prims.view_of(a) + + # Note: squeeze does not modify tensors when the given dim is not a dimension of length 1 + dims = tuple(d for d in dims if a.shape[d] == 1) + if len(dims) == 0: + return prims.view_of(a) + if len(dims) == 1: + return prims.squeeze(a, dims) + dims_list = list(dims) + dims_list = sorted(dims_list, reverse=True) + for i in dims_list: + a = squeeze(a, i) + return a + + +# Note: does not work with TensorMetas because of data-dependent control-flow +# CompositeImplicitAutograd - don't register decomp +def tensor_split( + a: TensorLikeType, + indices_or_sections: Union[Tensor, DimsType], + dim: int = 0, +) -> Tuple[TensorLikeType, ...]: + _dim = utils.canonicalize_dim(a.ndim, dim) + if a.ndim == 0: + msg = "tensor_split: received a rank zero tensor, but expected a tensor of rank one or greater!" + raise ValueError(msg) + + # If indices_or_sections is a tensor, it must be a CPU Long tensor + if isinstance(indices_or_sections, TensorLike): + if not indices_or_sections.device.type == "cpu": + msg = "tensor_split: if indices_or_sections is a tensor it must be on the CPU, but received one on {}".format( + indices_or_sections.device + ) + raise ValueError(msg) + if indices_or_sections.dtype != torch.long: + msg = "tensor_split: if indices_or_sections is a tensor it must have long dtype, " + f" but received one with dtype {indices_or_sections.dtype}" + raise ValueError(msg) + + # Case 0 -- indices_or_sections is an integer or a scalar tensor n and a is split along dim into n parts of equal-ish length + if isinstance(indices_or_sections, IntLike) or ( + isinstance(indices_or_sections, TensorLike) and indices_or_sections.ndim == 0 + ): + sections: int = ( + indices_or_sections # type: ignore[assignment] + if isinstance(indices_or_sections, Number) + else indices_or_sections.item() + ) + + if sections <= 0: + msg = f"tensor_split: number of sections must be greater than 0, but was {sections}" + raise ValueError(msg) + + splits = [] + dim_size = a.shape[_dim] + min_split_size = math.floor(dim_size / sections) + num_splits_one_extra = dim_size % sections + start_idx = 0 + for split_idx in range(sections): + split_size = ( + min_split_size + 1 + if (split_idx < num_splits_one_extra) + else min_split_size + ) + s = prims.slice_in_dim(a, start_idx, start_idx + split_size, axis=_dim) + splits.append(s) + start_idx = start_idx + split_size + + return tuple(splits) + # Case 1 -- indices_or_sections is a sequence of integers or a 1D tensor describing the splits + else: + indices = indices_or_sections + if isinstance(indices_or_sections, TensorLike): + if indices_or_sections.ndim != 1: + msg = "tensor_split: non-scalar indices_or_sections tensors must have only one dimension, " + f"but received a tensor with {indices_or_sections.ndim} dimensions" + raise ValueError(msg) + + indices = indices_or_sections.tolist() + + splits = [] + start_idx = 0 + for x in indices: + splits.append(prims.slice_in_dim(a, start_idx, x, axis=_dim)) + start_idx = x + splits.append(prims.slice_in_dim(a, start_idx, a.shape[_dim], axis=_dim)) + return tuple(splits) + + +# CompositeImplicitAutograd - don't register decomp +def hsplit( + a: TensorLikeType, indices_or_sections: DimsType +) -> Tuple[TensorLikeType, ...]: + torch._check( + a.ndim >= 1, + lambda: ( + "torch.hsplit requires a tensor with at least 1 dimension, but got a tensor with " + + str(a.ndim) + + " dimensions!" + ), + ) + dim = 0 if a.ndim == 1 else 1 + if isinstance(indices_or_sections, IntLike): + split_size = indices_or_sections + torch._check( + (split_size != 0 and a.shape[dim] % split_size == 0), + lambda: ( + "torch.hsplit attempted to split along dimension " + + str(dim) + + ", but the size of the dimension " + + str(a.shape[dim]) + + " is not divisible by the split_size " + + str(split_size) + + "!" + ), + ) + return tensor_split(a, split_size, dim) + + torch._check_type( + isinstance(indices_or_sections, (list, tuple)), + lambda: ( + "hsplit(): received an invalid combination of arguments. " + "Expected indices_or_sections to be of type int, list of ints or tuple of ints " + f"but got type {type(indices_or_sections)}" + ), + ) + + split_sizes = indices_or_sections + return tensor_split(a, split_sizes, dim) + + +# CompositeImplicitAutograd - don't register decomp +def vsplit( + a: TensorLikeType, indices_or_sections: DimsType +) -> Tuple[TensorLikeType, ...]: + torch._check( + a.ndim >= 2, + lambda: ( + "torch.vsplit requires a tensor with at least 2 dimension, but got a tensor with " + + str(a.ndim) + + " dimensions!" + ), + ) + if isinstance(indices_or_sections, IntLike): + split_size = indices_or_sections + torch._check( + (split_size != 0 and a.shape[0] % split_size == 0), + lambda: ( + f"torch.vsplit attempted to split along dimension 0" + f", but the size of the dimension " + f"{a.shape[0]}" + f" is not divisible by the split_size " + f"{split_size}" + f"!" + ), + ) + return tensor_split(a, split_size, 0) + + torch._check_type( + isinstance(indices_or_sections, (list, tuple)), + lambda: ( + "vsplit(): received an invalid combination of arguments. " + "Expected indices_or_sections to be of type int, list of ints or tuple of ints " + f"but got type {type(indices_or_sections)}" + ), + ) + + split_sizes = indices_or_sections + return tensor_split(a, split_sizes, 0) + + +@register_decomposition(aten.diag.out) +@out_wrapper() +def diag( + self: TensorLikeType, + offset: int = 0, +) -> TensorLikeType: + ndim = self.dim() + torch._check( + ndim in (1, 2), lambda: f"diag(): Supports 1D or 2D tensors. Got {ndim}D" + ) + if ndim == 1: + return torch.diag_embed(self, offset) + else: + return torch.diagonal_copy(self, offset) + + +@register_decomposition(aten.diagonal_scatter) +@out_wrapper() +def diagonal_scatter( + input: TensorLikeType, + src: TensorLikeType, + offset: int = 0, + dim1: int = 0, + dim2: int = 1, +) -> TensorLikeType: + out = utils.clone_preserve_strides(input) + diag = out.diagonal(offset, dim1, dim2) + torch._check( + diag.shape == src.shape, + lambda: "expected src to have a size equal to the diagonal of the input." + f"Got {src.shape} for a diagonal of shape {diag.shape}", + ) + copy_to(diag, src) + return out + + +@register_decomposition(aten.diagonal) +def diagonal( + self: TensorLikeType, + offset: int = 0, + dim1: int = 0, + dim2: int = 1, +) -> TensorLikeType: + """ + Reference implementation of torch.diagonal + """ + num_dims = self.dim() + dim1 = utils.canonicalize_dim(idx=dim1, rank=num_dims) + dim2 = utils.canonicalize_dim(idx=dim2, rank=num_dims) + + torch._check( + dim1 != dim2, lambda: f"diagonal dimensions cannot be identical {dim1}, {dim2}" + ) + + storage_offset = self.storage_offset() + + if offset >= 0: + diag_size = max(min(self.size()[dim1], self.size()[dim2] - offset), 0) + else: + diag_size = max(min(self.size()[dim1] + offset, self.size()[dim2]), 0) + + if diag_size > 0: + if offset >= 0: + storage_offset += offset * self.stride()[dim2] + else: + storage_offset -= offset * self.stride()[dim1] + + sizes = [s for i, s in enumerate(self.size()) if i not in (dim1, dim2)] + sizes.append(diag_size) + + strides = [s for i, s in enumerate(self.stride()) if i not in (dim1, dim2)] + strides.append(self.stride()[dim1] + self.stride()[dim2]) + + result = self.as_strided(size=sizes, stride=strides, storage_offset=storage_offset) + + return result + + +diagonal_copy = _make_copy_from_view(diagonal) + + +@register_decomposition(aten.diag_embed) +@out_wrapper() +def diag_embed( + t: TensorLikeType, + offset: int = 0, + dim1: int = -2, + dim2: int = -1, +) -> TensorLikeType: + """ + Reference implementation of torch.diag_embed + """ + # as per the docs, exchanging dims is equivalent to changing the sign of + # offset + if dim1 > dim2: + dim1, dim2 = dim2, dim1 + offset = -offset + + # convert from negative dims + rank = t.ndim + 1 + dim1 = utils.canonicalize_dim(rank=rank, idx=dim1) + dim2 = utils.canonicalize_dim(rank=rank, idx=dim2) + + torch._check( + dim1 != dim2, lambda: f"diagonal dimensions cannot be identical {dim1}, {dim2}" + ) + + # as per the docs, the size of last dim is placed at dim1 and dim2 + last_dim = t.size(-1) + + if offset != 0: + # add padding to match the new size + t_shape = list(t.shape) + t_shape[-1] = builtins.abs(offset) + z = torch.zeros(t_shape, dtype=t.dtype, device=t.device, requires_grad=False) + pair = (z, t) if offset > 0 else (t, z) + t = torch.cat(pair, dim=-1) + # make sure the diagonal always has the same size + last_dim += builtins.abs(offset) + + # preserve original data, but place 1 at dim1 and move last dim to dim2 + t = t.unsqueeze(dim1).movedim(-1, dim2) + + # generate ranges shifting indices based on offset + a_range = torch.arange(last_dim, device=t.device, dtype=torch.int64) + b_range = torch.arange( + offset, last_dim + offset, device=t.device, dtype=torch.int64 + ) + + # broadcast + cond = a_range == b_range.unsqueeze(-1) + cond_shape = [last_dim if i in (dim1, dim2) else 1 for i in range(len(t.shape))] + cond = cond.reshape(cond_shape) + + # aten.diag_embed always returns a new contiguous tensor + # contiguous() is needed to correctly model the output stride + return utils.mask_tensor(cond, t).contiguous() + + +# CompositeImplicitAutograd - don't register decomp +def dsplit(a: TensorLikeType, sections: DimsType) -> TensorSequenceType: + if a.ndim < 3: + raise RuntimeError( + f"torch.dsplit requires a tensor with at least 3 dimension, but got a tensor with {a.ndim} dimensions!" + ) + if isinstance(sections, IntLike) and (sections == 0 or a.shape[2] % sections != 0): + raise RuntimeError( + "torch.dsplit attempted to split along dimension 2, " + + f"but the size of the dimension {a.shape[2]} is not divisible by the split_size {sections}!" + ) + return tensor_split(a, sections, 2) + + +@register_decomposition(aten.t.default) +def t(a: TensorLikeType): + # TODO: Add sparse support + # if a.is_sparse: + # sparse_dim = a.sparse_dim() + # dense_dim = a.dense_dim() + # if not (sparse_dim <= 2 and dense_dim == 0): + # raise RuntimeError( + # f"t() expects a tensor with <= 2 sparse and 0 dense dimensions, but got {sparse_dim} sparse and" + # f"{dense_dim} dense dimensions" + # ) + if a.ndim > 2: + raise RuntimeError( + f"t() expects a tensor with <= 2 dimensions, but self is {a.ndim}D" + ) + return torch.transpose(a, 0, 0 if a.ndim < 2 else 1) + + +# CompositeImplicitAutograd - don't register decomp +def T(a: TensorLikeType) -> TensorLikeType: + # n != 2 && n != 0 is deprecated in regular PyTorch. + torch._check( + a.ndim in (0, 2), + lambda: ( + "The use of `x.T` on tensors of dimension other than 0 or 2 " + "to reverse their shape is not supported." + ), + ) + return a.t() + + +@register_decomposition(aten.alias) +def alias(a: TensorLikeType) -> TensorLikeType: + return prims.view_of(a) + + +@register_decomposition(aten.transpose) +def transpose(a: TensorLikeType, dim0: int, dim1: int) -> TensorLikeType: + _dim0, _dim1 = utils.canonicalize_dims(a.ndim, (dim0, dim1)) # type: ignore[misc] + + if a.ndim <= 1 or dim0 == dim1: + return aten.alias.default(a) + + _permutation = list(range(0, a.ndim)) + _permutation[_dim0] = _dim1 + _permutation[_dim1] = _dim0 + return torch.permute(a, _permutation) + + +# Aliases for transpose +swap_axes = transpose + + +@register_decomposition(aten.unfold) +def unfold( + self: TensorLikeType, dimension: int, size: int, step: int +) -> TensorLikeType: + shape, strides = _get_unfold_shape_stride( + self.shape, self.stride(), dimension, size, step + ) + return self.as_strided(shape, strides) + + +@register_decomposition(aten.unfold_copy) +@out_wrapper() +def unfold_copy(self: TensorLikeType, dimension: int, size: int, step: int): + return self.unfold(dimension, size, step).clone( + memory_format=torch.contiguous_format + ) + + +def _cumsumprod_common( + func, + init, + a: TensorLikeType, + dim: int, + *, + dtype: Optional[torch.dtype] = None, + out: Optional[Tensor] = None, +) -> TensorLikeType: + # We implement all the kwargs of a reduction. ATen just handles dtype + # nb. This decomposition may not be as efficient as a backend-specific implementation + ndim = a.ndim + dim = utils.canonicalize_dim(ndim, dim) + if ndim == 0: + return func(a.unsqueeze(0), dim=0, dtype=dtype, out=out) + a = a.unsqueeze(dim + 1) + rg = torch.arange(a.shape[dim], device=a.device) + mask = rg.unsqueeze(1) <= rg + for _ in range(ndim - dim - 1): + mask = mask.unsqueeze(-1) + masked_a = torch.where(mask, a, init) + return func(masked_a, dim=dim, dtype=dtype, out=out) + + +@register_decomposition(aten.cumsum) +def cumsum( + a: TensorLikeType, + dim: int, + *, + dtype: Optional[torch.dtype] = None, + out: Optional[Tensor] = None, +) -> TensorLikeType: + return _cumsumprod_common(func=sum, init=0, a=a, dim=dim, dtype=dtype, out=out) + + +@register_decomposition(aten.cumprod) +def cumprod( + a: TensorLikeType, + dim: int, + *, + dtype: Optional[torch.dtype] = None, + out: Optional[Tensor] = None, +) -> TensorLikeType: + return _cumsumprod_common(func=prod, init=1, a=a, dim=dim, dtype=dtype, out=out) + + +# Note: although squeeze is documented as having the out= kwarg it doesn't +@register_decomposition(aten.unsqueeze) +def unsqueeze(a: TensorLikeType, dim: int) -> TensorLikeType: + # Note that unsqueeze canonicalizes with rank + 1 because it allows + # a new innermost dimension to be specified + ndim = a.ndim + 1 + dim = utils.canonicalize_dim(ndim, dim) + return prims.expand_dims(a, (dim,), ndim=ndim) + + +# NOTE: shape is a vararg because Tensor.reshape can be called with as +# Tensor.view(a, b, c) or Tensor.view((a, b, c)) Function call torch.view +# doesn't support unpacked shapes +# TODO: Turn this into a decomposition (currently fails on reshape meta tests) +@register_decomposition(aten.view.default) +def view(a: TensorLikeType, *shape: ShapeType) -> TensorLikeType: + return _reshape_view_helper(a, *shape, allow_copy=False) + + +# CompositeImplicitAutograd - don't register decomp +def view_as(self: TensorLikeType, other: TensorLikeType) -> TensorLikeType: + return self.view(other.size()) + + +# CompositeImplicitAutograd - don't register decomp +def ravel(a: TensorLikeType) -> TensorLikeType: + return reshape(a, (-1,)) + + +# CompositeImplicitAutograd - don't register decomp +# missing ref impl. for aten.gather +@out_wrapper() +def take_along_dim( + a: torch.Tensor, indices: torch.Tensor, dim: Optional[int] = None +) -> torch.Tensor: + torch._check( + a.ndim == indices.ndim, + lambda: ( + "torch.take_along_dim(): input and indices should have the same " + f"number of dimensions, but got {a.ndim} dimensions for input, and " + f"{indices.ndim} dimensions for indices" + ), + ) + + torch._check( + utils.is_integer_dtype(indices.dtype), + lambda: ( + "torch.take_along_dim(): dtype of indices should be int but got " + f"{indices.dtype} instead" + ), + ) + + if dim is None: + return torch.gather(a.view(-1), 0, indices.view(-1)) + else: + self_sizes = list(a.shape) + self_sizes[dim] = indices.size(dim) + broadcast_shape = utils.infer_size_shapes(self_sizes, indices.size()) + indices_broadcast = broadcast_to(indices, broadcast_shape) + + indices_sizes = list(indices.shape) + indices_sizes[dim] = a.size(dim) + broadcast_shape = utils.infer_size_shapes(indices_sizes, a.size()) + self_broadcast = broadcast_to(a, broadcast_shape) + + return torch.gather(self_broadcast, dim, indices_broadcast) + + +@out_wrapper() +def empty( + *shape, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + requires_grad: bool = False, + pin_memory: bool = False, + memory_format: torch.memory_format = torch.contiguous_format, +) -> TensorLikeType: + torch._check( + memory_format != torch.preserve_format, + lambda: "torch.empty: the Preserve memory format is not supported", + ) + + shape = utils.extract_shape_from_varargs(shape) + + if memory_format == torch.contiguous_format: + strides = utils.make_contiguous_strides_for(shape) + elif memory_format == torch.channels_last_3d: + strides = utils.make_channels_last_3d_strides_for(shape) + else: # memory_format == torch.channels_last + torch._check( + memory_format == torch.channels_last, + lambda: f"torch.empty: received an unknown memory format {memory_format}!", + ) + strides = utils.make_channels_last_2d_strides_for(shape) + + return torch.empty_strided( + shape, + strides, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@out_wrapper() +def empty_permuted( + shape, + physical_layout, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + requires_grad: bool = False, + pin_memory: bool = False, +) -> TensorLikeType: + return prims.empty_permuted( + shape, + physical_layout, + dtype=dtype, + device=device, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.new_empty) +@out_wrapper() +def new_empty( + a: TensorLikeType, + size: ShapeType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, +) -> TensorLikeType: + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + return torch.empty( + size, + dtype=dtype, + device=device, + pin_memory=pin_memory, + layout=layout, + ) + + +@register_decomposition(aten.new_empty_strided) +@out_wrapper() +def new_empty_strided( + a: TensorLikeType, + size: ShapeType, + stride: StrideType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, +) -> TensorLikeType: + """ + Reference implementation of torch.Tensor.new_empty_strided + """ + + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + return torch.empty_strided( + size, + stride, + dtype=dtype, + device=device, + pin_memory=pin_memory, + layout=layout, + ) + + +@register_decomposition(aten.zeros.default) +@out_wrapper() +def zeros( + *size, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + size = utils.extract_shape_from_varargs(size) + + if dtype is None: + dtype = torch.get_default_dtype() + + return torch.full( + size, + False if dtype == torch.bool else 0, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.new_zeros) +@out_wrapper() +def new_zeros( + a: TensorLikeType, + size: ShapeType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + return torch.full( + size, + False if (dtype or a.dtype) == torch.bool else 0, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.ones.default) +@out_wrapper() +def ones( + *size, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + size = utils.extract_shape_from_varargs(size) + + if dtype is None: + dtype = torch.get_default_dtype() + + return torch.full( + size, + True if dtype == torch.bool else 1, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.new_ones) +@out_wrapper() +def new_ones( + a: TensorLikeType, + size: ShapeType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + return torch.full( + size, + True if (dtype or a.dtype) == torch.bool else 1, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.new_full) +@out_wrapper() +def new_full( + a: TensorLikeType, + size: ShapeType, + fill_value: NumberType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, +) -> TensorLikeType: + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + return torch.full( + size, + fill_value, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + ) + + +@register_decomposition(aten.empty_like) +@out_wrapper() +def empty_like( + a: TensorLikeType, + *, + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceLikeType] = None, + layout: Optional[torch.layout] = None, + pin_memory: bool = False, + requires_grad: bool = False, + memory_format: torch.memory_format = torch.preserve_format, +) -> TensorLikeType: + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + if memory_format != torch.preserve_format: + return torch.empty( + a.shape, + dtype=dtype, + layout=layout, + device=device, + requires_grad=requires_grad, + pin_memory=pin_memory, + memory_format=memory_format, + ) + + # memory_format == torch.preserve_format + logical_to_physical_perm = ( + utils.compute_elementwise_output_logical_to_physical_perm(a) + ) + # identity perm is [2, 1, 0] + return torch.empty_permuted( + a.shape, + logical_to_physical_perm, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_decomposition([aten.arange.start_step, aten.arange.start_out]) +@out_wrapper() +def arange( + start: NumberType = 0, + end: Optional[NumberType] = None, + step: NumberType = 1, + *, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + utils.check_layout(layout) + utils.check_pin_memory(pin_memory) + device = torch.device(utils.device_or_default(device)) + + assert not isinstance(start, complex) + assert not isinstance(end, complex) + assert not isinstance(step, complex) + + # Case: torch.arange(5) + if end is None: + end = start + start = 0 + torch._check(step != 0, lambda: "step must be nonzero") + if step > 0: + torch._check( + end >= start, + lambda: "upper bound and lower bound inconsistent with step sign", + ) + elif step < 0: + torch._check( + end <= start, + lambda: "upper bound and lower bound inconsistent with step sign", + ) + + def is_finite(x): + return not isinstance(x, FloatWithoutSymFloat) or math.isfinite(x) + + torch._check( + is_finite(start) and is_finite(end), + lambda: f"unsupported range: {start} -> {end}", + ) + torch._check( + is_finite(step), + lambda: f"step must be finite but got {step}", + ) + + if dtype is None: + args = (start, end, step) + integer_args = builtins.all(isinstance(arg, IntLike) for arg in args) + dtype = torch.int64 if integer_args else torch.get_default_dtype() + + is_integer = utils.is_integer_dtype(dtype) + if is_integer: + xstart = sym_int(start) + xend = sym_int(end) + xstep = sym_int(step) + + # For int64 we truncate arguments to int before calculating length, but + # other integral dtypes we don't. Weird... but needed to match ATen shapes. + if dtype == torch.int64: + # Uses floordiv to avoid ceil in inductor. + sgn = bool(xstep > 0) - bool(xstep < 0) + length = (xend - xstart + xstep - sgn) // xstep + else: + length = math.ceil((end - start) / step) + + if is_integer: + return prims.iota( + length, + start=xstart, + step=xstep, + dtype=dtype, + device=device, + requires_grad=requires_grad, + ) + + computation_dtype = utils.get_acc_type(dtype, device) + index = prims.iota( + length, + start=0, + step=1, + dtype=torch.int64, + device=device, + requires_grad=False, + ) + index = _maybe_convert_to_dtype(index, computation_dtype) + result = start + step * index + result = _maybe_convert_to_dtype(result, dtype) + + if requires_grad: + result.requires_grad_(True) + return result + + +@register_decomposition(aten.lerp) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("start", "end", "weight"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def lerp(start: Tensor, end: Tensor, weight: Union[Tensor, NumberType]): + inputs = [start, end] + if isinstance(weight, Number): + weight = start.new_full((), weight) # type: ignore[arg-type] + else: + inputs.append(weight) + assert isinstance(weight, Tensor) # mypy + # We implement it this way for numerical stability. We assume (in the stability optimisation) + # that 0 <= weight <= 1. We take the abs to deal with complex numbers + # We want to perform operations near zero, which is where floating points are most precise + # thus, we perform the following optimisation: + # If weight.abs() >= 0.5: + # return (1 - weight) * (start - end) + end + mask = weight.abs() >= 0.5 + coeff = torch.where(mask, weight - 1, weight) + base = torch.where(mask, end, start) + output = coeff * (end - start) + base + # make sure the decomposition output's stride is same as non-decomposition path. + stride = utils.compute_elementwise_output_strides(*_maybe_broadcast(*inputs)) + if output.stride() != stride: + output = prims.copy_strided(output, stride) + + return handle_noncontiguous_outputs(inputs, output) + + +@register_decomposition(aten.linspace) +@out_wrapper() +def linspace( + start: Union[NumberType, TensorLikeType], + end: Union[NumberType, TensorLikeType], + steps: NumberType, + *, + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceLikeType] = None, + layout: torch.layout = torch.strided, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + if isinstance(start, TensorLikeType): + torch._check( + start.dim() == 0, + lambda: "linspace only supports 0-dimensional start and end tensors", + ) + start = _maybe_convert_to_dtype(start, torch.float64) + if isinstance(end, TensorLikeType): + torch._check( + end.dim() == 0, + lambda: "linspace only supports 0-dimensional start and end tensors", + ) + end = _maybe_convert_to_dtype(end, torch.float64) + + if py_any(isinstance(arg, complex) for arg in (start, end, steps)): + default_complex_dtype = utils.corresponding_complex_dtype( + torch.get_default_dtype() + ) + if dtype is None: + dtype = default_complex_dtype + else: + torch._check( + utils.is_complex_dtype(dtype), + lambda: f"linspace(): inferred dtype {default_complex_dtype} can't be safely cast to passed dtype {dtype}", + ) + else: + dtype = dtype or torch.get_default_dtype() + assert isinstance(dtype, torch.dtype) + + # steps does not participate in the computation of the dtype + torch._check_type( + isinstance(steps, IntLike), + lambda: f"received an invalid combination of arguments - got \ +({type(start).__name__}, {type(end).__name__}, {type(steps).__name__})", + ) + assert isinstance(steps, IntLike) # for mypy + torch._check(steps >= 0, lambda: "number of steps must be non-negative") + + factory_kwargs = { + "layout": layout, + "device": device, + "pin_memory": pin_memory, + "requires_grad": requires_grad, + } + if steps == 0: + return torch.full((0,), 0, dtype=dtype, **factory_kwargs) # type: ignore[arg-type] + if steps == 1: + if isinstance(start, TensorLikeType): + return torch.empty((steps,), dtype=dtype, **factory_kwargs).copy_(start) # type: ignore[arg-type] + else: + return torch.full((steps,), start, dtype=dtype, **factory_kwargs) # type: ignore[arg-type] + + # Perform in arange in int because some backends like ATen or Triton do not support all the dtypes + rg = torch.arange(0, steps, **factory_kwargs) # type: ignore[arg-type] + + # Small types need to be computed in higher precision as this is, at heart, an associative scan + dtype_red = ( + torch.int64 + if (utils.is_boolean_dtype(dtype) or utils.is_integer_dtype(dtype)) + else dtype + ) + computation_dtype, _ = utils.reduction_dtypes( + rg, REDUCTION_OUTPUT_TYPE_KIND.SAME, dtype_red + ) + cast_rg = partial(_maybe_convert_to_dtype, dtype=computation_dtype) + + # We implement torch.lerp without performing rg / (steps - 1) explicitly + # With this we get out[0] == start, out[-1] == end + step = (end - start) / (steps - 1) + out = torch.where( + rg < steps / 2, + start + step * cast_rg(rg), # type: ignore[arg-type,operator] + end - step * cast_rg((steps - 1) - rg), # type: ignore[arg-type,operator] + ) + return _maybe_convert_to_dtype(out, dtype) # type: ignore[return-value] + + +@register_decomposition(aten.logspace) +@out_wrapper() +def logspace( + start: Union[NumberType, TensorLikeType], + end: Union[NumberType, TensorLikeType], + steps: NumberType, + base: NumberType = 10, + *, + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceLikeType] = None, + layout: torch.layout = torch.strided, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + if dtype is None: + dtype = torch.get_default_dtype() + + # NB: NumPy doesn't have this cast + if prims.utils.is_integer_dtype(dtype): + if isinstance(start, FloatLike): + start = sym_int(start) + elif isinstance(start, TensorLikeType): + torch._check( + start.dim() == 0, + lambda: "logspace only supports 0-dimensional start and end tensors", + ) + start = _maybe_convert_to_dtype(start, dtype) + if isinstance(end, FloatLike): + end = sym_int(end) + elif isinstance(end, TensorLikeType): + torch._check( + end.dim() == 0, + lambda: "logspace only supports 0-dimensional start and end tensors", + ) + end = _maybe_convert_to_dtype(end, dtype) + + if py_any(isinstance(arg, complex) for arg in (start, end, steps)): + default_complex_dtype = utils.corresponding_complex_dtype( + torch.get_default_dtype() + ) + dtype = default_complex_dtype + _dtype = None # torch.linspace will update the correct dtype + else: + _dtype = torch.float64 + + assert not isinstance(base, complex) # for mypy + if base < 0: + raise NotImplementedError + ret = torch.linspace( # type: ignore[misc] + start, # type: ignore[arg-type] + end, # type: ignore[arg-type] + steps, # type: ignore[arg-type] + dtype=_dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + return _maybe_convert_to_dtype(torch.pow(base, ret), dtype) # type: ignore[arg-type,return-value] + + +@overload +def meshgrid(tensors: Sequence[TensorLikeType], indexing: str): + pass + + +@overload +def meshgrid(*tensors: TensorLikeType, indexing: str): + pass + + +@register_decomposition(aten.meshgrid) +def meshgrid( + *tensors: Union[TensorLikeType, List[TensorLikeType], Tuple[TensorLikeType]], + indexing: str, +) -> List[TensorLikeType]: + # This ref simultaneously handles two overloads (see stubs above) + # The `indexing` argument is currently optional for torch.meshgrid, but we + # plan to make the argument required: https://github.com/pytorch/pytorch/issues/50276 + if isinstance(tensors[0], (list, tuple)): + assert len(tensors) == 1 + tensors = tuple(tensors[0]) + + torch._check( + py_all(isinstance(a, TensorLike) for a in tensors), + lambda: "meshgrid expects its inputs to be tensors", + ) + + torch._check(len(tensors) > 0, lambda: "meshgrid expects a non-empty TensorList") + + for i in range(len(tensors) - 1): + torch._check( + tensors[i].dtype == tensors[i + 1].dtype, # type: ignore[union-attr] + lambda: "meshgrid expects all tensors to have the same dtype", + ) + torch._check( + tensors[i].device == tensors[i + 1].device, # type: ignore[union-attr] + lambda: "meshgrid expects all tensors to have the same device", + ) + + swap_first_and_second_tensors = False + if indexing == "xy": + swap_first_and_second_tensors = len(tensors) >= 2 + if swap_first_and_second_tensors: + tensors = (tensors[1], tensors[0], *tensors[2:]) + else: + torch._check( + indexing == "ij", + lambda: ( + 'torch.meshgrid: indexing must be one of "xy" or "ij", ' + f"but received: {indexing}" + ), + ) + + result_shape: List[int] = [] + for t in tensors: + assert isinstance(t, TensorLike) # mypy + torch._check( + t.ndim == 0 or t.ndim == 1, + lambda: f"torch.meshgrid: Expected 0D or 1D tensor in the tensor list but got: {t}", + ) + result_shape.append(t.numel()) + + grids: List[TensorLikeType] = [] + for i, t in enumerate(tensors): + assert isinstance(t, TensorLike) # mypy + if t.ndim == 0: + t = t.view((1,)) + grids.append(prims.broadcast_in_dim(t, result_shape, (i,))) + + if swap_first_and_second_tensors: + # Swap outputs if we originally swapped at the beginning + grids[0], grids[1] = grids[1], grids[0] + + return grids + + +# CompositeImplicitAutograd - don't register decomp +def movedim( + input: TensorLikeType, + source: Union[int, DimsSequenceType], + destination: Union[int, DimsSequenceType], +) -> TensorLikeType: + """ + Reference implementation of torch.movedim + """ + if type(source) is int: + source = (source,) + if type(destination) is int: + destination = (destination,) + + # Converts to list to produce a compatible error message with core PyTorch, + # which prints sequences in square brackets. + torch._check( + len(source) == len(destination), # type: ignore[arg-type] + lambda: ( + "movedim: Invalid source or destination dims: source " # type: ignore[arg-type] + f"({list(source)} dims) should contain the same number " # type: ignore[arg-type] + f"of dims as destination ({list(destination)} dims)" # type: ignore[arg-type] + ), + ) + + rank = input.ndim + ss = tuple(utils.canonicalize_dims(rank=rank, indices=source)) # type: ignore[arg-type] + ds = tuple(utils.canonicalize_dims(rank=rank, indices=destination)) # type: ignore[arg-type] + + sss = set(ss) + dss = set(ds) + + # See above on why this converts to list in error messages. + torch._check( + len(ss) == len(sss), + lambda: f"movedim: repeated dim in `source` ({list(source)})", # type: ignore[arg-type] + ) + torch._check( + len(ds) == len(dss), + lambda: f"movedim: repeated dim in `destination` ({list(destination)})", # type: ignore[arg-type] + ) + + m = dict(zip(ds, ss)) + dims = [] + si = 0 # source index + for di in range(rank): + # check if the destination index is in the mapping + s = m.get(di) + if s is not None: + # insert source index if found + dims.append(s) + else: + # insert source index sequentially, skipping indices from the mapping + while si in sss: + si += 1 + dims.append(si) + si += 1 + + result = torch.permute(input, tuple(dims)) + + return result + + +# NOTE: for convenience, shape can be a tuple of ints or a tuple containing a tuple of ints +@register_decomposition(aten.empty_strided) +@out_wrapper() +def empty_strided( + shape: Union[ShapeType, Tuple[ShapeType]], + strides: StrideType, + *, + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceLikeType] = None, + layout: torch.layout = torch.strided, + requires_grad: bool = False, + pin_memory: bool = False, +) -> TensorLikeType: + # Layout == strided, pin_memory is False + utils.check_layout(layout) + utils.check_pin_memory(pin_memory) + + shape = utils.extract_shape_from_varargs(shape) + dtype = torch.get_default_dtype() if dtype is None else dtype + device = torch.device("cpu") if device is None else device + + return prims.empty_strided( + shape, + strides, + dtype=dtype, + device=device, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.eye) +@out_wrapper() +def eye( + n: int, + m: Optional[int] = None, + *, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, # TODO: unused +) -> TensorLikeType: + """ + Reference implementation of torch.eye + """ + if m is None: + m = n + + torch._check(n >= 0, lambda: f"n must be greater or equal to 0, got {n}") + torch._check(m >= 0, lambda: f"m must be greater or equal to 0, got {m}") + + range_n = torch.arange(n, dtype=torch.int64, device=device, requires_grad=False) + range_m = torch.arange(m, dtype=torch.int64, device=device, requires_grad=False) + + cond = range_n.unsqueeze(-1) == range_m + if dtype is torch.bool: + return cond + else: + one = torch.ones( + (1,), + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=False, + ) + return torch.where(cond, one, 0) + # TODO: Use requires_grad. All refs taking the requires_grad kwarg must + # return a leaf tensor. + # result.requires_grad_(requires_grad) + + +@register_decomposition([aten.full.default, aten.full.out]) +@out_wrapper() +def full( + shape: ShapeType, + fill_value: NumberType, + *, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + utils.check_layout(layout) + utils.check_pin_memory(pin_memory) + + dtype = dtype if dtype is not None else utils.type_to_dtype(type(fill_value)) + device = device if device is not None else torch.device("cpu") + + e = empty( + shape, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + return torch.fill(e, fill_value) # type: ignore[arg-type] + + +def full_like( + a: TensorLikeType, + fill_value: NumberType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, + memory_format: torch.memory_format = torch.preserve_format, +) -> TensorLikeType: + e = torch.empty_like( + a, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + memory_format=memory_format, + ) + return fill(e, fill_value) + + +@register_decomposition(aten.zeros_like) +@out_wrapper() +def zeros_like( + a: TensorLikeType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, + memory_format: torch.memory_format = torch.preserve_format, +) -> TensorLikeType: + return torch.full_like( + a, + False if (dtype or a.dtype) == torch.bool else 0, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + memory_format=memory_format, + ) + + +@register_decomposition(aten.ones_like) +@out_wrapper() +def ones_like( + a: TensorLikeType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, + memory_format: torch.memory_format = torch.preserve_format, +) -> TensorLikeType: + return torch.full_like( + a, + True if (dtype or a.dtype) == torch.bool else 1, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + memory_format=memory_format, + ) + + +@register_decomposition(aten.randn.default) +@out_wrapper() +def randn( + *shape, + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceLikeType] = None, + layout: Optional[torch.layout] = None, + requires_grad: bool = False, + pin_memory: bool = False, +) -> TensorLikeType: + utils.check_pin_memory(pin_memory) + + shape_ = utils.extract_shape_from_varargs(shape) + + dtype = utils.dtype_or_default(dtype) + device = utils.device_or_default(device) + + return prims.normal( + shape_, + mean=0.0, + std=1.0, + dtype=dtype, + device=device, + requires_grad=requires_grad, + ) + + +def scalar_tensor( + a: NumberType, + *, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, +) -> TensorLikeType: + utils.check_layout(layout) + utils.check_pin_memory(pin_memory) + dtype = dtype if dtype is not None else utils.type_to_dtype(type(a)) + device = device if device is not None else torch.device("cpu") + return prims.scalar_tensor(a, dtype=dtype, device=device) + + +# +# Randomness References +# + + +def _uniform_helper( + shape: ShapeType, + low: Union[bool, int, float] = 0.0, + high: Union[bool, int, float] = 1.0, + *, + dtype: torch.dtype, + device: DeviceLikeType, +) -> TensorLikeType: + utils.validate_shape(shape) + + assert isinstance(low, Number) + assert isinstance(high, Number) + low = sym_float(low) + high = sym_float(high) + + assert isinstance(dtype, torch.dtype) + device = utils.canonicalize_device(device) + + return prims._uniform_helper(shape, low=low, high=high, dtype=dtype, device=device) + + +@register_decomposition(aten.masked_fill) +@out_wrapper() +def masked_fill(a: TensorLikeType, mask: TensorLikeType, value: TensorOrNumberLikeType): + python_type = utils.dtype_to_type(a.dtype) + if isinstance(value, Number): + value_type = type(value) + else: + # NOTE: Could not use value = item(value) as it resulted in + # RuntimeError: Cannot cast FakeTensor(cpu) to number + value_ndim = value.ndim + torch._check( + value_ndim == 0, + lambda: f"only supports a 0-dimensional value tensor, but got tensor with {value_ndim} dimension", + ) + # `masked_fill` allows cpu scalar to be moved to cuda and xpu but not otherwise. + is_cpu_scalar = a.device.type in ["cuda", "xpu"] and value.device.type == "cpu" + torch._check( + is_cpu_scalar or value.device == a.device, + lambda: "Expected `value` to be on same device as `a`", + ) + value_type = utils.dtype_to_type(value.dtype) + + if value_type is complex: + # only downcasting from complex to lower type is not allowed. + # We allow casting `value` to lower type for other case + # Eg. float -> int. + # Ref: https://github.com/pytorch/pytorch/issues/79195 + torch._check( + utils.is_weakly_lesser_type(value_type, python_type), + lambda: f"could not convert to type {python_type} without overflow", + ) + + # Since `where` allows type-promotion, + # cast value to correct type before passing to `where` + value = _maybe_convert_to_dtype(value, a.dtype) + r = torch.where(mask, value, a) # type: ignore[arg-type] + + # aten.mask_fill always return a new contiguous tensor + # contiguous() is needed to correctly model the output stride + return r.contiguous() + + +@register_decomposition(aten.masked_fill_) +def masked_fill_( + a: TensorLikeType, mask: TensorLikeType, value: TensorOrNumberLikeType +) -> TensorLikeType: + b = torch.masked_fill(a, mask, value) # type: ignore[arg-type] + a.copy_(b) + return a + + +# CompositeImplicitAutograd - don't register decomp +def allclose( + a: TensorLikeType, + b: TensorLikeType, + rtol: float = 1e-05, + atol: float = 1e-08, + equal_nan: bool = False, +) -> bool: + """ + Reference implementation of torch.allclose + """ + _check_close_args(name="torch.allclose", a=a, b=b, rtol=rtol, atol=atol) + + return bool( + torch.all(torch.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)).item() + ) + + +def equal(a: TensorLikeType, b: TensorLikeType) -> bool: + utils.check_same_device(a, b, allow_cpu_scalar_tensors=False) + utils.check_same_dtype(a, b) + + # Shape check + if a.ndim != b.ndim: + return False + + for x, y in zip(a.shape, b.shape): + if x != y: + return False + + # Short-circuits if there are no elements to validate + if a.numel() == 0: + return True + + return item(all(eq(a, b))) # type: ignore[return-value] + + +@register_decomposition(aten.norm) +@out_wrapper(exact_dtype=True) +def norm( + input: TensorLikeType, + p: Optional[Union[float, str]] = "fro", + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + # In these cases we compute the "Frobenius norm" + if ( + p == "fro" and (dim is None or isinstance(dim, Dim) or len(dim) <= 2) + ) or p is None: + p = 2 + if isinstance(dim, Dim): + dim = [dim] + if isinstance(p, str): + # Here we either call the nuclear norm, or we call matrix_norm with some arguments + # that will throw an error + if dim is None: + dim = tuple(range(input.ndim)) + return torch.linalg.matrix_norm(input, p, dim, keepdim, dtype=dtype) + else: + return torch.linalg.vector_norm(input, p, dim, keepdim, dtype=dtype) + + +@register_decomposition(aten.trace) +@out_wrapper() +def trace(self: TensorLikeType) -> TensorLikeType: + torch._check( + self.ndim == 2, lambda: "expected a matrix, but got tensor with dim {self.ndim}" + ) + return torch.sum(torch.diag(self, 0)) + + +def _make_r_binary_op(base_op): + def rop( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], + ) -> TensorLikeType: + return base_op(b, a) + + return rop + + +rtruediv = _make_r_binary_op(true_divide) +rfloordiv = _make_r_binary_op(floor_divide) +rpow = _make_r_binary_op(pow) + + +@register_decomposition(aten.triu) +@out_wrapper() +def triu(a: TensorLikeType, diagonal: int = 0) -> TensorLikeType: + torch._check( + a.ndim >= 2, lambda: "triu: input tensor must have at least 2 dimensions" + ) + h, w = a.shape[-2:] + mask = ( + torch.arange(w, device=a.device).unsqueeze(-2) + - torch.arange(h, device=a.device).unsqueeze(-1) + ) >= diagonal + + # aten.triu always returns a new contiguous tensor + # contiguous() is needed to correctly model the output stride + return utils.mask_tensor(mask, a).contiguous() + + +@register_decomposition(aten.tril) +@out_wrapper() +def tril(a: TensorLikeType, diagonal: int = 0) -> TensorLikeType: + torch._check( + a.ndim >= 2, lambda: "tril: input tensor must have at least 2 dimensions" + ) + h, w = a.shape[-2:] + mask = ( + torch.arange(w, device=a.device).unsqueeze(-2) + - torch.arange(h, device=a.device).unsqueeze(-1) + ) <= diagonal + + # aten.tril always returns a new contiguous tensor + # contiguous() is needed to correctly model the output stride + return utils.mask_tensor(mask, a).contiguous() + + +# This is based on get_tril_size in aten/src/ATen/native/TensorFactories.h +# The components of the matrix that belong to the lower triangle with offset +# form a pentagon that can be broken down into a top trapezoid and a bottom +# rectangle. For the implementation of tril_indices, we need the sizes of +# both of these, as well as the length of the top side of the trapezoid. +def _get_tril_sizes(row: int, col: int, offset: int) -> Tuple[int, int, int]: + if row == 0 or col == 0: + return 0, 0, 0 + + m_first_row = min(col, 1 + offset) if offset > 0 else int(row + offset > 0) + m_last_row = max(0, min(col, row + offset)) + n_row_all = max(0, min(row, row + offset)) + n_row_trapezoid = m_last_row - m_first_row + 1 + + # Number of elements in top trapezoid + trapezoid_size = (m_first_row + m_last_row) * n_row_trapezoid // 2 + # Number of elements in bottom rectangle + diff_row = n_row_all - n_row_trapezoid + rectangle_size = max(0, diff_row * col) + + return trapezoid_size, rectangle_size, m_first_row + + +def _trilu_checks( + name: str, + row: int, + col: int, + dtype: torch.dtype, + layout: torch.layout, + pin_memory: bool, +): + torch._check(row >= 0, lambda: f"row must be non-negative, got {row}") + torch._check(col >= 0, lambda: f"col must be non-negative, got {col}") + torch._check( + dtype in (torch.int32, torch.int64), + lambda: f"\"{name}\" not implemented for '{dtype}'", + ) + + +# This is based on tril_indices_cuda in aten/src/ATen/native/cuda/TensorFactories.cu +@register_decomposition(aten.tril_indices) +@out_wrapper() +def tril_indices( + row: int, + col: int, + offset: int = 0, + *, + dtype: torch.dtype = torch.long, + layout: torch.layout = torch.strided, + device: DeviceLikeType = "cpu", + pin_memory: bool = False, +) -> TensorLikeType: + _trilu_checks("tril_indices", row, col, dtype, layout, pin_memory) + + trapezoid_size, rectangle_size, m_first_row = _get_tril_sizes(row, col, offset) + row_offset = max(0, -offset) + + arange_kw = partial( + torch.arange, layout=layout, device=device, pin_memory=pin_memory + ) + + # first we do the indices for top trapezoid + xs1 = arange_kw(0, trapezoid_size, dtype=torch.float64) + b = m_first_row - 0.5 + row_inds1 = torch.floor(-b + torch.sqrt(b * b + 2 * xs1)) + col_inds1 = torch.floor(xs1 - (2 * m_first_row - 1 + row_inds1) * row_inds1 * 0.5) + row_inds1 = _maybe_convert_to_dtype(row_inds1 + row_offset, dtype) + col_inds1 = _maybe_convert_to_dtype(col_inds1, dtype) + + # then bottom rectangle + xs2 = arange_kw(0, rectangle_size, dtype=dtype) + row_inds2 = xs2 // col + (col - m_first_row + 1 + row_offset) + col_inds2 = xs2 % col + + return torch.stack( + (torch.cat((row_inds1, row_inds2)), torch.cat((col_inds1, col_inds2))) + ) + + +# Similar to _get_tril_sizes above, but here there is a top trapezoid and +# a bottom rectangle instead. Note that you can't reduce this to +# _get_tril_sizes(col, row, -offset) because that would correspond to +# decomposing into a left trapezoid and right rectangle. +def _get_triu_sizes(row: int, col: int, offset: int) -> Tuple[int, int, int]: + if row == 0 or col == 0: + return 0, 0, 0 + + m_first_row = max(0, col - offset) if offset > 0 else col + + # Number of elements in top rectangle + rectangle_size = max(0, min(row, -offset) * col) + + # Number of elements in bottom trapezoid + trapezoid_size_tril, rectangle_size_tril, _ = _get_tril_sizes(row, col, offset - 1) + triu_size = row * col - (trapezoid_size_tril + rectangle_size_tril) + trapezoid_size = triu_size - rectangle_size + + return trapezoid_size, rectangle_size, m_first_row + + +@register_decomposition(aten.triu_indices) +@out_wrapper() +def triu_indices( + row: int, + col: int, + offset: int = 0, + *, + dtype: torch.dtype = torch.long, + layout: torch.layout = torch.strided, + device: DeviceLikeType = "cpu", + pin_memory: bool = False, +) -> TensorLikeType: + _trilu_checks("triu_indices", row, col, dtype, layout, pin_memory) + + trapezoid_size, rectangle_size, m_first_row = _get_triu_sizes(row, col, offset) + col_offset = max(0, offset) + + arange_kw = partial( + torch.arange, layout=layout, device=device, pin_memory=pin_memory + ) + + # indices for top rectangle + xs2 = arange_kw(0, rectangle_size, dtype=dtype) + row_inds2 = xs2 // col + col_inds2 = xs2 % col + + # bottom trapezoid + xs1 = arange_kw(0, trapezoid_size, dtype=torch.float64) + b = -0.5 - m_first_row + row_inds1 = torch.floor(-b - torch.sqrt(b * b - 2 * xs1)) + col_inds1 = torch.floor(xs1 - ((2 * m_first_row - 1 - row_inds1) * row_inds1) * 0.5) + row_inds1 = _maybe_convert_to_dtype(row_inds1, dtype) + col_inds1 = _maybe_convert_to_dtype(col_inds1, dtype) + + if col: + row_inds1 = row_inds1 + (rectangle_size // col) + col_inds1 = col_inds1 + col_offset + + return torch.stack( + (torch.cat((row_inds2, row_inds1)), torch.cat((col_inds2, col_inds1))) + ) + + +@register_decomposition(aten.bucketize) +@out_wrapper(exact_dtype=True) +def bucketize( + a: TensorLikeType, + boundaries: TensorLikeType, + *, + out_int32: bool = False, + right: bool = False, +): + torch._check( + boundaries.dim() == 1, + lambda: f"boundaries tensor must be 1 dimension but got dim({boundaries.dim()})", + ) + + out_dtype = torch.int32 if out_int32 else torch.int64 + n_boundaries = boundaries.shape[-1] + if n_boundaries == 0: + return torch.zeros_like(a) + # We are trying to find the bucket (defined by pairs of consecutive elements of `boundaries`) + # each element of `a` belongs to. We use binary search to achieve logarithimic complexity, + # but each step of the search is done "in parallel" over all elements of `a` + # can't use int32 as indexes, so we have to do all computations with int64 and convert at the end + start = torch.zeros(a.shape, device=a.device, dtype=torch.int64) + end = start + n_boundaries + # Max depth of the binary search + # Since we can't break out of the loop at different points for different elements of a, + # we just do the max amount of iterations that binary search requires and add condition + # tensor (cond_update below) to stop updating once the search terminates + + # For first iteration through loop we can skip some checks, we have separate implementation + mid = start + (end - start) // 2 + mid_val = boundaries[mid] + if right: + cond_mid = mid_val > a + else: + cond_mid = mid_val >= a + start = torch.where(cond_mid, start, mid + 1) + + if n_boundaries > 1: + cond_update = torch.ones_like(a, dtype=torch.bool) + niters = int(math.log2(n_boundaries)) + for _ in range(niters): + end = torch.where(cond_mid & cond_update, mid, end) + cond_update = start < end + # start might end up pointing to 1 past the end, we guard against that + mid = torch.where(cond_update, start + (end - start) // 2, 0) + mid_val = boundaries[mid] + # If right is true, the buckets are closed on the *left* + # (i.e., we are doing the equivalent of std::upper_bound in C++) + # Otherwise they are closed on the right (std::lower_bound) + if right: + cond_mid = mid_val > a + else: + cond_mid = mid_val >= a + start = torch.where((~cond_mid) & cond_update, mid + 1, start) + + return start.to(dtype=out_dtype) + + +@register_decomposition(aten.cauchy) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def cauchy(self, median=0, sigma=1, generator=None): + assert generator is None + torch._check( + not utils.is_complex_dtype(self.dtype) + and not utils.is_integer_dtype(self.dtype) + and not utils.is_boolean_dtype(self.dtype), + lambda: f"Cauchy distribution is a continuous probability distribution. \ + dtype must be a floating point but you specified {self.dtype}", + ) + torch._check( + sigma > 0.0, + lambda: f"cauchy_ expects sigma > 0.0, but found sigma={sigma}", + ) + return median + sigma * torch.tan(math.pi * (torch.rand_like(self) - 0.5)) + + +@register_decomposition(aten.exponential) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def exponential(self, rate=1, generator=None): + assert generator is None + torch._check( + not utils.is_complex_dtype(self.dtype) + and not utils.is_integer_dtype(self.dtype) + and not utils.is_boolean_dtype(self.dtype), + lambda: f"Exponential distribution is a continuous probability distribution. \ + dtype must be a floating point but you specified {self.dtype}", + ) + torch._check( + rate > 0.0, + lambda: f"exponential_ expects lambda > 0.0, but found lambda={rate}", + ) + return -1 / rate * torch.log1p(-torch.rand_like(self)) + + +@register_decomposition(aten.geometric) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def geometric(self, p, generator=None): + assert generator is None + # TODO: fix inductor rand_like for integer, bool dtypes + torch._check( + not utils.is_complex_dtype(self.dtype) + and not utils.is_boolean_dtype(self.dtype), + lambda: f"geometric not implemented for {self.dtype}", + ) + torch._check( + 0 < p and p < 1, + lambda: f"geometric_ expects p to be in (0, 1), but got p={p}", + ) + return torch.floor(torch.log1p(-torch.rand_like(self)) / math.log1p(-p)) + 1 + + +@register_decomposition(aten.log_normal) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def log_normal(self, mean=1, std=2, generator=None): + assert generator is None + torch._check( + not utils.is_complex_dtype(self.dtype) + and not utils.is_integer_dtype(self.dtype) + and not utils.is_boolean_dtype(self.dtype), + lambda: f"log_normal not implemented for {self.dtype}", + ) + torch._check( + 0 < std, + lambda: f"log_normal_ expects std > 0.0, but found std={std}", + ) + return torch.exp(std * torch.randn_like(self) + mean) + + +# TODO: add support for functionalization aten.normal_functional +# NOTE: the device and dtype will be ignored when shape is None +@register_decomposition(aten.normal) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=( + "mean", + "std", + ), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def normal( + mean=0, + std=1, + size=None, + *, + generator=None, + dtype=None, + layout=None, + device=None, + pin_memory=None, +): + assert layout is None or layout == torch.strided + + if not isinstance(std, TensorLike): + torch._check( + std >= 0, lambda: f"normal expects std >= 0.0, but found std {std}" + ) + + if size is None: + tensors = tuple(t for t in (mean, std) if isinstance(t, TensorLike)) + torch._check( + len(tensors) > 0, + lambda: "normal expects that either mean or std is a tensor, or size is defined", + ) + torch._check( + layout is None and pin_memory is None, + lambda: "Cannot pass layout, or pin_memory without size", + ) + + size = _broadcast_shapes(*(t.shape for t in tensors)) + dtype = tensors[0].dtype + device = tensors[0].device + else: + torch._check( + not isinstance(mean, TensorLike) and not isinstance(std, TensorLike), + lambda: "normal expects mean and std to be scalars when size is defined", + ) + dtype = torch.get_default_dtype() if dtype is None else dtype + device = torch.device("cpu") if device is None else device + + normal_samples = prims.normal( + size, + mean=0.0, + std=1.0, + dtype=dtype, + device=device, + requires_grad=False, + generator=generator, + ) + return std * normal_samples + mean + + +@register_decomposition(aten.normal_) +def normal_(self, mean=0, std=1, *, generator=None): + return normal(mean, std, self.shape, out=self, generator=generator) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def rad2deg(self: TensorLikeType): + torch._check( + not utils.is_complex_dtype(self.dtype), + lambda: "rad2deg is not supported for complex tensors.", + ) + M_180_PI = 57.295779513082320876798154814105170332405472466564 + return self * M_180_PI + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def deg2rad(self: TensorLikeType): + torch._check( + not utils.is_complex_dtype(self.dtype), + lambda: "deg2rad is not supported for complex tensors.", + ) + M_PI_180 = 0.017453292519943295769236907684886127134428718885417 + return self * M_PI_180 + + +@register_decomposition(aten.count_nonzero) +@out_wrapper() +def count_nonzero(self, dim: Optional[DimsType] = None): + return (self != 0).sum(dim) + + +def _dot_check(self, other): + torch._check( + self.dim() == 1 and other.dim() == 1, + lambda: f"1D tensors expected, but got {self.dim()}D and {other.dim()}D tensors", + ) + + def numel_error(): + return ( + f"inconsistent tensor size, expected tensor [{self.numel()}] and src [{other.numel()}] to have the" + f"same number of elements, but got {self.numel()} and {other.numel()} elements respectively" + ) + + torch._check(self.numel() == other.numel(), numel_error) + + +@register_decomposition(aten.dot) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self", "other"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def dot(self, other): + if self.is_complex(): + if self.is_conj(): + if other.is_conj(): + return torch.dot(self.conj(), other.conj()).conj() + else: + return torch.vdot(self.conj(), other) + elif other.is_conj(): + return torch.vdot(other.conj(), self) + + _dot_check(self, other) + return (self * other).sum() + + +@register_decomposition(aten.vdot) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self", "other"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def vdot(self, other): + if not self.is_complex(): + return torch.dot(self, other) + + if self.is_conj(): + if other.is_conj(): + return torch.vdot(other.conj(), self.conj()) + else: + return torch.dot(self.conj(), other) + elif other.is_conj(): + return torch.dot(self, other.conj()).conj() + + _dot_check(self, other) + # The decomposition fails if you do self.conj()... not sure why + return (self.conj_physical() * other).sum() + + +# inplace +abs_ = _make_inplace(abs) +acos_ = _make_inplace(acos) +acosh_ = _make_inplace(acosh) +add_ = _make_inplace(add) +addcmul_ = _make_inplace(addcmul) +addcdiv_ = _make_inplace(addcdiv) +asin_ = _make_inplace(asin) +asinh_ = _make_inplace(asinh) +atan_ = _make_inplace(atan) +atanh_ = _make_inplace(atanh) +atan2_ = _make_inplace(atan2) +bitwise_and_ = _make_inplace(bitwise_and) +bitwise_left_shift_ = _make_inplace(bitwise_left_shift) +bitwise_not_ = _make_inplace(bitwise_not) +bitwise_or_ = _make_inplace(bitwise_or) +bitwise_right_shift_ = _make_inplace(bitwise_right_shift) +bitwise_xor_ = _make_inplace(bitwise_xor) +ceil_ = _make_inplace(ceil) +clamp_ = _make_inplace(clamp) +clamp_min_ = _make_inplace(clamp_min) +clamp_max_ = _make_inplace(clamp_max) +conj_physical_ = _make_inplace(conj_physical) +copysign_ = _make_inplace(copysign) +cos_ = _make_inplace(cos) +cosh_ = _make_inplace(cosh) +cumsum_ = _make_inplace(cumsum) +cumprod_ = _make_inplace(cumprod) +deg2rad_ = _make_inplace(deg2rad) +digamma_ = _make_inplace(digamma) +div_ = _make_inplace(div) +eq_ = _make_inplace(eq) +erf_ = _make_inplace(erf) +erfc_ = _make_inplace(erfc) +erfinv_ = _make_inplace(erfinv) +exp_ = _make_inplace(exp) +exp2_ = _make_inplace(exp2) +expm1_ = _make_inplace(expm1) +float_power_ = _make_inplace(float_power) +floor_ = _make_inplace(floor) +floor_divide_ = _make_inplace(floor_divide) +fmod_ = _make_inplace(fmod) +frac_ = _make_inplace(frac) +gcd_ = _make_inplace(gcd) +ge_ = _make_inplace(ge) +gt_ = _make_inplace(gt) +heaviside_ = _make_inplace(heaviside) +hypot_ = _make_inplace(hypot) +igamma_ = _make_inplace(igamma) +igammac_ = _make_inplace(igammac) +i0_ = _make_inplace(i0) +lcm_ = _make_inplace(lcm) +le_ = _make_inplace(le) +lerp_ = _make_inplace(lerp) +lgamma_ = _make_inplace(lgamma) +log10_ = _make_inplace(log10) +log1p_ = _make_inplace(log1p) +log2_ = _make_inplace(log2) +log_ = _make_inplace(log) +logical_and_ = _make_inplace(logical_and) +logical_not_ = _make_inplace(logical_not) +logical_or_ = _make_inplace(logical_or) +logical_xor_ = _make_inplace(logical_xor) +lt_ = _make_inplace(lt) +mul_ = _make_inplace(mul) +mvlgamma_ = _make_inplace(mvlgamma) +nan_to_num_ = _make_inplace(nan_to_num) +ne_ = _make_inplace(ne) +neg_ = _make_inplace(neg) +nextafter_ = _make_inplace(nextafter) +pow_ = _make_inplace(pow) +rad2deg_ = _make_inplace(rad2deg) +reciprocal_ = _make_inplace(reciprocal) +remainder_ = _make_inplace(remainder) +rsqrt_ = _make_inplace(rsqrt) +sgn_ = _make_inplace(sgn) +sigmoid_ = _make_inplace(sigmoid) +sign_ = _make_inplace(sign) +sin_ = _make_inplace(sin) +sinc_ = _make_inplace(sinc) +sinh_ = _make_inplace(sinh) +sqrt_ = _make_inplace(sqrt) +square_ = _make_inplace(square) +sub_ = _make_inplace(sub) +tan_ = _make_inplace(tan) +tanh_ = _make_inplace(tanh) +tril_ = _make_inplace(tril) +triu_ = _make_inplace(triu) +true_divide_ = _make_inplace(true_divide) +trunc_ = _make_inplace(trunc) +xlogy_ = _make_inplace(xlogy) +cauchy_ = _make_inplace(cauchy) +exponential_ = _make_inplace(exponential) +geometric_ = _make_inplace(geometric) +log_normal_ = _make_inplace(log_normal) +zero_ = _make_inplace(zero) + + +# xref: isStorage in torch/csrc/DynamicTypes.cpp +def _isStorage(obj): + return isinstance(obj, (torch.TypedStorage, torch.UntypedStorage)) + + +# xref: compute_sizes in torch/csrc/utils/tensor_new.cpp +def _compute_sizes(seq, scalar_type): + MAX_DIMS = 128 + is_storage = _isStorage(seq) + sizes = [] + # TODO: this is inaccurate, we actually test PySequence_Check + while isinstance(seq, (list, tuple)): + length = len(seq) + if is_storage: + length //= scalar_type.itemsize + sizes.append(length) + if len(sizes) > MAX_DIMS: + raise ValueError(f"too many dimensions '{type(seq).__name__}'") + if length == 0: + break + try: + handle = seq[0] + except Exception: + raise ValueError( # noqa: TRY200 + f"could not determine the shape of object type '{type(seq).__name__}'" + ) + seq = handle + + return sizes + + +# xref: infer_scalar_type in torch/csrc/utils/tensor_new.cpp +def _infer_scalar_type(obj): + if isinstance(obj, FloatLike): + return torch.get_default_dtype() + if isinstance(obj, IntLike) and not isinstance(obj, bool): # careful! + return torch.int64 + if isinstance(obj, bool): + return torch.bool + if isinstance(obj, complex): + default_dtype = torch.get_default_dtype() + if default_dtype is torch.float: + return torch.cfloat + elif default_dtype is torch.double: + return torch.cdouble + else: + raise RuntimeError("invalid default scalar type for complex") + if isinstance(obj, torch.Tensor): + return obj.dtype + if isinstance(obj, str): + raise TypeError(f"new(): invalid data type '{type(obj).__name__}'") + # TODO: this is inaccurate, we actually test PySequence_Check + if isinstance(obj, (list, tuple)): + scalarType = None + length = len(obj) + # match NumPy semantics, except use default tensor type instead of + # double. + if length == 0: + return torch.get_default_dtype() + for i in range(length): + cur_item = obj[i] + # TODO: test this + """ + if cur_item is obj: + raise TypeError("new(): self-referential lists are incompatible") + """ + item_scalarType = _infer_scalar_type(cur_item) # recurse! + if scalarType is not None: + scalarType = torch.promote_types(scalarType, item_scalarType) + else: + scalarType = item_scalarType + if scalarType is torch.cdouble: + # this won't change (unless we hit undefined, but that will + # fail later) + return scalarType + return scalarType + raise RuntimeError(f"Could not infer dtype of {type(obj).__name__}") + + +# Analogous to recursive_store +# xref: recursive_store in torch/csrc/utils/tensor_new.cpp +def _recursive_build(sizes, dim, scalarType, obj): + ndim = len(sizes) + assert dim <= ndim + if dim == ndim: + return torch.scalar_tensor(obj, dtype=scalarType) + n = sizes[dim] + seq = obj + seq_size = len(seq) + if seq_size != n: + raise ValueError( + f"expected sequence of length {n} at dim {dim} (got {seq_size})" + ) + return torch.stack( + [_recursive_build(sizes, dim + 1, scalarType, item) for item in seq] + ) + + +# xref: internal_new_from_data in torch/csrc/utils/tensor_new.cpp +def _internal_new_from_data( + options, + scalar_type, + device_opt, + data, + copy_variables, + copy_numpy, + type_inference, + pin_memory=False, +): + if isinstance(data, torch.Tensor): + torch._check( + not pin_memory, lambda: "Can't pin tensor constructed from a variable" + ) + var = data + if copy_variables: + var = var.detach() + inferred_scalar_type = var.dtype if type_inference else scalar_type + device = device_opt if device_opt is not None else var.device + return var.to( + device=device, + dtype=inferred_scalar_type, + non_blocking=False, + copy=copy_variables, + ) + + # TODO + if hasattr(data, "__cuda_array_interface__"): + return NotImplemented + + # TODO: test for numpy input with PyArray_Check + + device = device_opt if device_opt is not None else options["device"] + sizes = _compute_sizes(data, scalar_type) + inferred_scalar_type = _infer_scalar_type(data) if type_inference else scalar_type + + # NB: Don't need to avoid tracing, as we aren't going to do any manual + # pointer filling tricks + if _isStorage(data): + return NotImplemented + else: + if torch.device(device).type == "meta": + return NotImplemented + + # In the C implementation, we would directly start poking the memory + # of a freshly allocated CPU tensor. Here, we're going to do an + # alternate, heinously slow implementation: turn each individual + # scalar into a tensor, and then repeatedly cat them together + tensor = _recursive_build(sizes, 0, inferred_scalar_type, data) + + tensor = tensor.to(device, inferred_scalar_type, non_blocking=False, copy=False) + + # NB: lift_fresh is not needed, because we built the tensor from scalars + # guaranteeing a fresh tensor in this case + return tensor + + +# xref: tensor_ctor in torch/csrc/utils/tensor_new.cpp +def tensor(data, *, dtype=None, device=None, pin_memory=False, requires_grad=False): + # TODO (or not): support names kwarg + if isinstance(data, torch.Tensor): + warnings.warn( + "To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() " + "or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor)" + ) + type_inference = dtype is None + new_tensor = _internal_new_from_data( + # device="cpu" because that's what you get with torch.tensor(2) no + # device by default + {"device": "cpu"}, # TODO: use torch.get_default_tensor_type + dtype if dtype is not None else torch.get_default_dtype(), + device, + data, + copy_variables=True, + copy_numpy=True, + type_inference=type_inference, + pin_memory=pin_memory, + ) + new_tensor.detach_() + new_tensor.requires_grad_(requires_grad) + return new_tensor + + +# Views +# We can't model these as above, as the pattern of doing `op(a, out=a)` does not work for a view function +# given that it does not reshape the input (it just copies the result into it) + +# squeeze_ = _make_inplace(squeeze) +# t_ = _make_inplace(t) +# transpose_ = _make_inplace(transpose) +# unsqueeze_ = _make_inplace(unsqueeze) + + +import torch._refs._conversions +import torch._refs.fft +import torch._refs.linalg +import torch._refs.nn.functional +import torch._refs.special diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48cccf84b93d9b7e130f68268931809efd2e9aef Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_refs/__pycache__/_conversions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_refs/__pycache__/_conversions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b21461eeed791b606c420ad749cc92c29f8e436 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_refs/__pycache__/_conversions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_refs/__pycache__/fft.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_refs/__pycache__/fft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63a41dcbda4dd252d2c38b79107e3fb63bce67c7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_refs/__pycache__/fft.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_refs/_conversions.py b/env-llmeval/lib/python3.10/site-packages/torch/_refs/_conversions.py new file mode 100644 index 0000000000000000000000000000000000000000..fa1ca2428255aa9fe3892328f6ab95cc5f5b7568 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_refs/_conversions.py @@ -0,0 +1,118 @@ +import torch +import torch._prims_common as utils + +# Utilities should come BEFORE this import +from torch._decomp import register_decomposition + +from torch._prims_common import TensorLikeType +from torch._prims_common.wrappers import out_wrapper +from torch._refs import _broadcast_shapes + +# Data conversion references. +# +# Note: this module breaks the usual _refs to torch naming scheme where +# _refs.foo.bar is a ref for torch.foo.bar. The following definitions are not +# part of _refs/__init__.py to avoid name clashes with Python builtin types +# (like int). + +__all__ = [ + # dtypes + "bfloat16", + "bool", + "byte", + "cdouble", + "cfloat", + "chalf", + "char", + "double", + "float", + "half", + "int", + "long", + "short", + # misc + "complex", + "polar", +] + + +def _make_conversion_method(name: str, dtype: torch.dtype): + def fn( + self: TensorLikeType, memory_format: torch.memory_format = torch.preserve_format + ) -> TensorLikeType: + return self.to(dtype, memory_format=memory_format) # type: ignore[call-overload] + + fn.__name__ = name + return fn + + +bfloat16 = _make_conversion_method("bfloat16", torch.bfloat16) + +bool = _make_conversion_method("bool", torch.bool) + +byte = _make_conversion_method("byte", torch.uint8) + +cdouble = _make_conversion_method("cdouble", torch.cdouble) + +cfloat = _make_conversion_method("cfloat", torch.cfloat) + +chalf = _make_conversion_method("chalf", torch.complex32) + +char = _make_conversion_method("char", torch.int8) + +double = _make_conversion_method("double", torch.double) + +float = _make_conversion_method("float", torch.float) + +half = _make_conversion_method("half", torch.half) + +int = _make_conversion_method("int", torch.int) + +long = _make_conversion_method("long", torch.long) + +short = _make_conversion_method("short", torch.short) + + +@register_decomposition(torch._ops.ops.aten.complex) +# Note: complex has type promotion tests disabled due to different semantics. +# exact_dtype is for compat with complex_check_dtype from core. +@out_wrapper(exact_dtype=True) +def complex(real: TensorLikeType, imag: TensorLikeType) -> TensorLikeType: + allowed_dtypes = (torch.float32, torch.float64, torch.float16) + torch._check( + real.dtype in allowed_dtypes and imag.dtype in allowed_dtypes, + lambda: ( + f"Expected both inputs to be Half, Float or Double tensors but got " + f"{real.dtype} and {imag.dtype}" + ), + ) + torch._check( + real.dtype == imag.dtype, + lambda: ( + f"Expected object of scalar type {real.dtype} but got " + f"scalar type {imag.dtype} for second argument" + ), + ) + result_dtype = utils.corresponding_complex_dtype(real.dtype) # type: ignore[arg-type] + common_shape = _broadcast_shapes(real.shape, imag.shape) + result = real.new_empty( + common_shape, + dtype=result_dtype, + layout=real.layout, + device=real.device, + # pin_memory=real.is_pinned(), # NYI + ) + result.real = real + result.imag = imag + return result + + +@register_decomposition(torch._ops.ops.aten.polar) +# Note: polar has type promotion tests disabled due to different semantics. +# exact_dtype is for compat with complex_check_dtype from core. +@out_wrapper(exact_dtype=True) +def polar(abs: TensorLikeType, angle: TensorLikeType) -> TensorLikeType: + result = torch.complex(abs, angle) + result.real = abs * torch.cos(angle) + result.imag = abs * torch.sin(angle) + return result diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_refs/fft.py b/env-llmeval/lib/python3.10/site-packages/torch/_refs/fft.py new file mode 100644 index 0000000000000000000000000000000000000000..cc2cae10fb0d8594a2d609168f89a81a032a5f34 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_refs/fft.py @@ -0,0 +1,590 @@ +import math + +from typing import Iterable, List, Literal, NamedTuple, Optional, Sequence, Tuple, Union + +import torch +import torch._prims as prims +import torch._prims_common as utils +from torch._decomp import register_decomposition +from torch._prims_common import DimsType, ShapeType, TensorLikeType +from torch._prims_common.wrappers import _maybe_convert_to_dtype, out_wrapper + +__all__ = [ + # Transforms + "fft", + "fft2", + "fftn", + "hfft", + "hfft2", + "hfftn", + "rfft", + "rfft2", + "rfftn", + "ifft", + "ifft2", + "ifftn", + "ihfft", + "ihfft2", + "ihfftn", + "irfft", + "irfft2", + "irfftn", + # Helpers + "fftshift", + "ifftshift", +] + +NormType = Union[None, Literal["forward", "backward", "ortho"]] +_NORM_VALUES = {None, "forward", "backward", "ortho"} +aten = torch._ops.ops.aten + + +def _apply_norm( + x: TensorLikeType, norm: NormType, signal_numel: int, forward: bool +) -> TensorLikeType: + """Apply normalization to the un-normalized FFT result""" + torch._check(norm in _NORM_VALUES, lambda: f"Invalid normalization mode: {norm}") + + if norm == "ortho": + return x * (1 / math.sqrt(signal_numel)) + + normalize = (not forward and (norm is None or norm == "backward")) or ( + forward and norm == "forward" + ) + return x * (1 / signal_numel) if normalize else x + + +def _promote_type_fft( + dtype: torch.dtype, require_complex: bool, device: torch.device +) -> torch.dtype: + """Helper to promote a dtype to one supported by the FFT primitives""" + if dtype.is_complex: + return dtype + + # Promote integral to default float type + if not dtype.is_floating_point: + dtype = torch.get_default_dtype() + + allowed_types = [torch.float32, torch.float64] + maybe_support_half = device.type in ["cuda", "meta"] and not torch.version.hip + + if maybe_support_half: + allowed_types.append(torch.float16) + torch._check(dtype in allowed_types, lambda: f"Unsupported dtype {dtype}") + + if require_complex: + dtype = utils.corresponding_complex_dtype(dtype) + + return dtype + + +def _maybe_promote_tensor_fft( + t: TensorLikeType, require_complex: bool = False +) -> TensorLikeType: + """Helper to promote a tensor to a dtype supported by the FFT primitives""" + cur_type = t.dtype + new_type = _promote_type_fft(cur_type, require_complex, t.device) + return _maybe_convert_to_dtype(t, new_type) # type: ignore[return-value] + + +def _resize_fft_input( + x: TensorLikeType, dims: Tuple[int, ...], sizes: Tuple[int, ...] +) -> TensorLikeType: + """ + Fixes the shape of x such that x.size(dims[i]) == sizes[i], + either by zero-padding, or by slicing x starting from 0. + """ + assert len(dims) == len(sizes) + must_copy = False + x_sizes = x.shape + pad_amount = [0] * len(x_sizes) * 2 + for i in range(len(dims)): + if sizes[i] == -1: + continue + + if x_sizes[dims[i]] < sizes[i]: + must_copy = True + pad_idx = len(pad_amount) - 2 * dims[i] - 1 + pad_amount[pad_idx] = sizes[i] - x_sizes[dims[i]] + + if x_sizes[dims[i]] > sizes[i]: + x = x.narrow(dims[i], 0, sizes[i]) + + return torch.constant_pad_nd(x, pad_amount) if must_copy else x + + +def _fft_c2r( + func_name: str, + input: TensorLikeType, + n: Optional[int], + dim: int, + norm: NormType, + forward: bool, +) -> TensorLikeType: + """Common code for performing any complex to real FFT (irfft or hfft)""" + input = _maybe_promote_tensor_fft(input, require_complex=True) + dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),) + last_dim_size = n if n is not None else 2 * (input.shape[dim] - 1) + torch._check( + last_dim_size >= 1, + lambda: f"Invalid number of data points ({last_dim_size}) specified", + ) + + if n is not None: + input = _resize_fft_input(input, dims=dims, sizes=(last_dim_size // 2 + 1,)) + + if forward: + input = torch.conj(input) + + output = prims.fft_c2r(input, dim=dims, last_dim_size=last_dim_size) + return _apply_norm(output, norm=norm, signal_numel=last_dim_size, forward=forward) + + +def _fft_r2c( + func_name: str, + input: TensorLikeType, + n: Optional[int], + dim: int, + norm: NormType, + forward: bool, + onesided: bool, +) -> TensorLikeType: + """Common code for performing any real to complex FFT (rfft or ihfft)""" + torch._check( + not input.dtype.is_complex, + lambda: f"{func_name} expects a floating point input tensor, but got {input.dtype}", + ) + input = _maybe_promote_tensor_fft(input) + dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),) + dim_size = n if n is not None else input.shape[dim] + torch._check( + dim_size >= 1, lambda: f"Invalid number of data points ({dim_size}) specified" + ) + + if n is not None: + input = _resize_fft_input(input, dims, (n,)) + + ret = prims.fft_r2c(input, dim=dims, onesided=onesided) + ret = _apply_norm(ret, norm, dim_size, forward) + return ret if forward else torch.conj(ret) + + +def _fft_c2c( + func_name: str, + input: TensorLikeType, + n: Optional[int], + dim: int, + norm: NormType, + forward: bool, +) -> TensorLikeType: + """Common code for performing any complex to complex FFT (fft or ifft)""" + torch._check( + input.dtype.is_complex, + lambda: f"{func_name} expects a complex input tensor, but got {input.dtype}", + ) + dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),) + dim_size = n if n is not None else input.shape[dim] + torch._check( + dim_size >= 1, lambda: f"Invalid number of data points ({dim_size}) specified" + ) + + if n is not None: + input = _resize_fft_input(input, dims, (n,)) + + ret = prims.fft_c2c(input, dim=dims, forward=forward) + return _apply_norm(ret, norm, dim_size, forward) + + +@register_decomposition(aten.fft_fft) +@out_wrapper() +def fft( + input: TensorLikeType, + n: Optional[int] = None, + dim: int = -1, + norm: NormType = None, +) -> TensorLikeType: + if input.dtype.is_complex: + return _fft_c2c("fft", input, n, dim, norm, forward=True) + else: + return _fft_r2c("fft", input, n, dim, norm, forward=True, onesided=False) + + +@register_decomposition(aten.fft_ifft) +@out_wrapper() +def ifft( + input: TensorLikeType, + n: Optional[int] = None, + dim: int = -1, + norm: NormType = None, +) -> TensorLikeType: + if input.dtype.is_complex: + return _fft_c2c("ifft", input, n, dim, norm, forward=False) + else: + return _fft_r2c("ifft", input, n, dim, norm, forward=False, onesided=False) + + +@register_decomposition(aten.fft_rfft) +@out_wrapper() +def rfft( + input: TensorLikeType, + n: Optional[int] = None, + dim: int = -1, + norm: NormType = None, +) -> TensorLikeType: + return _fft_r2c("rfft", input, n, dim, norm, forward=True, onesided=True) + + +@register_decomposition(aten.fft_irfft) +@out_wrapper() +def irfft( + input: TensorLikeType, + n: Optional[int] = None, + dim: int = -1, + norm: NormType = None, +) -> TensorLikeType: + return _fft_c2r("irfft", input, n, dim, norm, forward=False) + + +@register_decomposition(aten.fft_hfft) +@out_wrapper() +def hfft( + input: TensorLikeType, + n: Optional[int] = None, + dim: int = -1, + norm: NormType = None, +) -> TensorLikeType: + return _fft_c2r("hfft", input, n, dim, norm, forward=True) + + +@register_decomposition(aten.fft_ihfft) +@out_wrapper() +def ihfft( + input: TensorLikeType, + n: Optional[int] = None, + dim: int = -1, + norm: NormType = None, +) -> TensorLikeType: + return _fft_r2c("ihfft", input, n, dim, norm, forward=False, onesided=True) + + +class _ShapeAndDims(NamedTuple): + shape: Tuple[int, ...] + dims: Tuple[int, ...] + + +def _canonicalize_fft_shape_and_dim_args( + input: TensorLikeType, shape: Optional[ShapeType], dim: Optional[DimsType] +) -> _ShapeAndDims: + """Convert the shape and dim arguments into a canonical form where neither are optional""" + input_dim = input.ndim + input_sizes = input.shape + + if dim is not None: + if not isinstance(dim, Sequence): + dim = (dim,) + ret_dims = utils.canonicalize_dims(input_dim, dim, wrap_scalar=False) + + # Check dims are unique + torch._check( + len(set(ret_dims)) == len(ret_dims), lambda: "FFT dims must be unique" + ) + + if shape is not None: + if not isinstance(shape, Sequence): + shape = (shape,) + + # Has shape, might have dim + torch._check( + dim is None or len(dim) == len(shape), + lambda: "When given, dim and shape arguments must have the same length", + ) + transform_ndim = len(shape) + + torch._check( + transform_ndim <= input_dim, + lambda: f"Got shape with {transform_ndim} values but input tensor " + f"only has {input_dim} dimensions.", + ) + + # If shape is given, dims defaults to the last len(shape) dimensions + if dim is None: + ret_dims = tuple(range(input_dim - transform_ndim, input_dim)) + + # Translate any -1 values in shape to the default length + ret_shape = tuple( + s if s != -1 else input_sizes[d] for (s, d) in zip(shape, ret_dims) + ) + elif dim is None: + # No shape, no dim + ret_dims = tuple(range(input_dim)) + ret_shape = tuple(input_sizes) + else: + # No shape, has dim + ret_shape = tuple(input_sizes[d] for d in ret_dims) + + for n in ret_shape: + torch._check(n > 0, lambda: f"Invalid number of data points ({n}) specified") + + return _ShapeAndDims(shape=ret_shape, dims=ret_dims) + + +def _prod(xs: Iterable[int]) -> int: + """Compute product of a list""" + prod = 1 + for x in xs: + prod *= x + return prod + + +def _fftn_c2c( + function_name: str, + input: TensorLikeType, + shape: Tuple[int, ...], + dim: Tuple[int, ...], + norm: NormType, + forward: bool, +) -> TensorLikeType: + """Common code for n-dimensional complex to complex FFTs (fftn or ifftn)""" + torch._check( + input.dtype.is_complex, + lambda: f"{function_name} expects a complex input tensor, " + f"but got {input.dtype}", + ) + x = _resize_fft_input(input, dim, shape) + output = prims.fft_c2c(x, dim=dim, forward=forward) + return _apply_norm(output, norm=norm, signal_numel=_prod(shape), forward=forward) + + +@register_decomposition(aten.fft_fftn) +@out_wrapper() +def fftn( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = None, + norm: NormType = None, +) -> TensorLikeType: + (shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim) + x = _maybe_promote_tensor_fft(input, require_complex=True) + return _fftn_c2c("fftn", x, shape, dim, norm, forward=True) + + +@register_decomposition(aten.fft_ifftn) +@out_wrapper() +def ifftn( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = None, + norm: NormType = None, +) -> TensorLikeType: + (shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim) + x = _maybe_promote_tensor_fft(input, require_complex=True) + return _fftn_c2c("ifftn", x, shape, dim, norm, forward=False) + + +@register_decomposition(aten.fft_rfftn) +@out_wrapper() +def rfftn( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = None, + norm: NormType = None, +) -> TensorLikeType: + torch._check( + not input.dtype.is_complex, + lambda: f"rfftn expects a real-valued input tensor, but got {input.dtype}", + ) + shape, dim = _canonicalize_fft_shape_and_dim_args(input, s, dim) + input = _maybe_promote_tensor_fft(input, require_complex=False) + input = _resize_fft_input(input, dim, shape) + out = prims.fft_r2c(input, dim=dim, onesided=True) + return _apply_norm(out, norm=norm, signal_numel=_prod(shape), forward=True) + + +@register_decomposition(aten.fft_ihfftn) +@out_wrapper() +def ihfftn( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = None, + norm: NormType = None, +) -> TensorLikeType: + torch._check( + not input.dtype.is_complex, + lambda: f"ihfftn expects a real-valued input tensor, but got {input.dtype}", + ) + shape, dim = _canonicalize_fft_shape_and_dim_args(input, s, dim) + torch._check(len(shape) > 0, lambda: "ihfftn must transform at least one axis") + input = _maybe_promote_tensor_fft(input, require_complex=False) + input = _resize_fft_input(input, dim, shape) + + tmp = prims.fft_r2c(input, dim=dim[-1:], onesided=True) + + if len(dim) == 1: + tmp = _apply_norm(tmp, norm=norm, signal_numel=shape[0], forward=False) + return prims.conj(tmp) + + tmp = prims.conj_physical(tmp) + tmp = prims.fft_c2c(tmp, dim=dim[:-1], forward=False) + return _apply_norm(tmp, norm=norm, signal_numel=_prod(shape), forward=False) + + +class _CanonicalizeC2rReturn(NamedTuple): + shape: Tuple[int, ...] + dim: Tuple[int, ...] + last_dim_size: int + + +def _canonicalize_fft_c2r_shape_and_dim_args( + fname: str, + input: TensorLikeType, + s: Optional[ShapeType], + dim: Optional[DimsType], +) -> _CanonicalizeC2rReturn: + """Canonicalize shape and dim arguments for n-dimensional c2r transforms, + as well as calculating the last_dim_size which is shape[dim[-1]] for the output""" + (shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim) + torch._check(len(shape) > 0, lambda: f"{fname} must transform at least one axis") + + if s is None or s[-1] == -1: + last_dim_size = 2 * (input.shape[dim[-1]] - 1) + else: + last_dim_size = shape[-1] + + torch._check( + last_dim_size >= 1, + lambda: f"Invalid number of data points ({last_dim_size}) specified", + ) + + shape_list = list(shape) + shape_list[-1] = last_dim_size // 2 + 1 + return _CanonicalizeC2rReturn( + shape=tuple(shape_list), dim=dim, last_dim_size=last_dim_size + ) + + +@register_decomposition(aten.fft_irfftn) +@out_wrapper() +def irfftn( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = None, + norm: NormType = None, +) -> TensorLikeType: + shape, dim, last_dim_size = _canonicalize_fft_c2r_shape_and_dim_args( + "irfftn", input, s, dim + ) + input = _maybe_promote_tensor_fft(input, require_complex=True) + input = _resize_fft_input(input, dim, shape) + out = prims.fft_c2r(input, dim=dim, last_dim_size=last_dim_size) + return _apply_norm(out, norm, _prod(out.shape[d] for d in dim), forward=False) + + +@register_decomposition(aten.fft_hfftn) +@out_wrapper() +def hfftn( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = None, + norm: NormType = None, +) -> TensorLikeType: + shape, dim, last_dim_size = _canonicalize_fft_c2r_shape_and_dim_args( + "hfftn", input, s, dim + ) + input = _maybe_promote_tensor_fft(input, require_complex=True) + input = _resize_fft_input(input, dim, shape) + + tmp = prims.fft_c2c(input, dim=dim[:-1], forward=True) if len(dim) > 1 else input + tmp = _apply_norm(tmp, norm, _prod(shape[:-1]), forward=True) + tmp = prims.conj_physical(tmp) + out = prims.fft_c2r(tmp, dim=dim[-1:], last_dim_size=last_dim_size) + return _apply_norm(out, norm, last_dim_size, forward=True) + + +@register_decomposition(aten.fft_fft2) +@out_wrapper() +def fft2( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = (-2, -1), + norm: NormType = None, +) -> TensorLikeType: + return torch.fft.fftn(input, s=s, dim=dim, norm=norm) + + +@register_decomposition(aten.fft_ifft2) +@out_wrapper() +def ifft2( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = (-2, -1), + norm: NormType = None, +) -> TensorLikeType: + return torch.fft.ifftn(input, s=s, dim=dim, norm=norm) + + +@register_decomposition(aten.fft_rfft2) +@out_wrapper() +def rfft2( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = (-2, -1), + norm: NormType = None, +) -> TensorLikeType: + return torch.fft.rfftn(input, s=s, dim=dim, norm=norm) + + +@register_decomposition(aten.fft_irfft2) +@out_wrapper() +def irfft2( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = (-2, -1), + norm: NormType = None, +) -> TensorLikeType: + return torch.fft.irfftn(input, s=s, dim=dim, norm=norm) + + +@register_decomposition(aten.fft_hfft2) +@out_wrapper() +def hfft2( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = (-2, -1), + norm: NormType = None, +) -> TensorLikeType: + return torch.fft.hfftn(input, s=s, dim=dim, norm=norm) + + +@register_decomposition(aten.fft_ihfft2) +@out_wrapper() +def ihfft2( + input: TensorLikeType, + s: Optional[ShapeType] = None, + dim: Optional[DimsType] = (-2, -1), + norm: NormType = None, +) -> TensorLikeType: + return torch.fft.ihfftn(input, s=s, dim=dim, norm=norm) + + +def _default_alldims(dim: Optional[DimsType], x: TensorLikeType) -> List[int]: + """Convert Optional[DimsType] to a simple list, defaulting to all dimensions""" + if dim is None: + return list(range(x.ndim)) + elif not isinstance(dim, Sequence): + return [dim] + else: + return list(dim) + + +@register_decomposition(aten.fft_fftshift) +def fftshift(input: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType: + dims = _default_alldims(dim, input) + shift = [input.shape[d] // 2 for d in dims] + return torch.roll(input, shift, dims) + + +@register_decomposition(aten.fft_ifftshift) +def ifftshift(input: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType: + dims = _default_alldims(dim, input) + shift = [(input.shape[d] + 1) // 2 for d in dims] + return torch.roll(input, shift, dims) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_refs/linalg/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/_refs/linalg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..824cd56438fec288b6ac97ddc84d92c97bfb0891 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_refs/linalg/__init__.py @@ -0,0 +1,276 @@ +from functools import partial + +from typing import List, Optional, Tuple, Union + +import torch + +import torch._prims as prims + +import torch._prims_common as utils +import torch._refs as refs +import torch._refs.linalg as linalg +from torch import Tensor +from torch._prims_common import ( + check_fp_or_complex, + check_is_matrix, + Dim, + DimsType, + ELEMENTWISE_TYPE_PROMOTION_KIND, + NumberType, + TensorLikeType, +) +from torch._prims_common.wrappers import ( + _maybe_convert_to_dtype, + elementwise_type_promotion_wrapper, + out_wrapper, +) + + +__all__ = ["diagonal", "matrix_norm", "norm", "svd", "svdvals", "vector_norm", "vecdot"] + + +def _check_norm_dtype(dtype: Optional[torch.dtype], x_dtype: torch.dtype, fn_name: str): + """ + Checks related to the dtype kwarg in `linalg.*norm` functions + """ + if dtype is not None: + torch._check( + utils.is_float_dtype(dtype) or utils.is_complex_dtype(dtype), + lambda: f"{fn_name}: dtype should be floating point or complex. Got {dtype}", + ) + torch._check( + utils.is_complex_dtype(dtype) == utils.is_complex_dtype(x_dtype), + lambda: "{fn_name}: dtype should be {d} for {d} inputs. Got {dtype}".format( + fn_name=fn_name, + d="complex" if utils.is_complex_dtype(x_dtype) else "real", + dtype=dtype, + ), + ) + torch._check( + utils.get_higher_dtype(dtype, x_dtype) == dtype, + lambda: f"{fn_name}: the dtype of the input ({x_dtype}) should be convertible " + "without narrowing to the specified dtype ({dtype})", + ) + + +# Utilities should come BEFORE this import +from torch._decomp import register_decomposition + + +def diagonal( + input: TensorLikeType, + *, + offset: int = 0, + dim1: int = -2, + dim2: int = -1, +) -> TensorLikeType: + return torch.diagonal(input, offset=offset, dim1=dim1, dim2=dim2) + + +@register_decomposition(torch._ops.ops.aten.linalg_vector_norm) +@out_wrapper(exact_dtype=True) +def vector_norm( + x: TensorLikeType, + ord: float = 2.0, + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + dtype: Optional[torch.dtype] = None, +) -> Tensor: + # Checks + check_fp_or_complex(x.dtype, "linalg.vector_norm") + + if isinstance(dim, Dim): + dim = [dim] # type: ignore[assignment] + + if x.numel() == 0 and (ord < 0.0 or ord == float("inf")): + torch._check( + dim is not None and len(dim) != 0, + lambda: f"linalg.vector_norm cannot compute the {ord} norm on an empty tensor " + "because the operation does not have an identity", + ) + shape = x.shape + assert dim is not None # mypy does not seem to be able to see through check? + for d in dim: + torch._check( + shape[d] != 0, + lambda: f"linalg.vector_norm cannot compute the {ord} norm on the " + f"dimension {d} because this dimension is empty and the " + "operation does not have an identity", + ) + _check_norm_dtype(dtype, x.dtype, "linalg.vector_norm") + + computation_dtype, result_dtype = utils.reduction_dtypes( + x, utils.REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT, dtype + ) + + to_result_dtype = partial(_maybe_convert_to_dtype, dtype=result_dtype) + + # Implementation + if ord == 0.0: + return torch.sum(torch.ne(x, 0.0), dim=dim, keepdim=keepdim, dtype=result_dtype) + elif ord == float("inf"): + return to_result_dtype(torch.amax(torch.abs(x), dim=dim, keepdim=keepdim)) # type: ignore[return-value,arg-type] + elif ord == float("-inf"): + return to_result_dtype(torch.amin(torch.abs(x), dim=dim, keepdim=keepdim)) # type: ignore[return-value,arg-type] + else: + # From here on the computation dtype is important as the reduction is non-trivial + x = _maybe_convert_to_dtype(x, computation_dtype) # type: ignore[assignment] + reduce_sum = partial(torch.sum, dim=dim, keepdim=keepdim) + + if not (ord % 2.0 == 0.0 and utils.is_float_dtype(x.dtype)): + x = torch.abs(x) + return to_result_dtype(torch.pow(reduce_sum(torch.pow(x, ord)), 1.0 / ord)) # type: ignore[return-value] + + +def _backshift_permutation(dim0, dim1, ndim): + # Auxiliary function for matrix_norm + # Computes the permutation that moves the two given dimensions to the back + ret = [i for i in range(ndim) if i != dim0 and i != dim1] + ret.extend((dim0, dim1)) + return ret + + +def _inverse_permutation(perm): + # Given a permutation, returns its inverse. It's equivalent to argsort on an array + return [i for i, j in sorted(enumerate(perm), key=lambda i_j: i_j[1])] + + +# CompositeImplicitAutograd +@out_wrapper(exact_dtype=True) +def matrix_norm( + A: TensorLikeType, + ord: Union[float, str] = "fro", + dim: DimsType = (-2, -1), + keepdim: bool = False, + *, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + # shape + check_is_matrix(A, "linalg.matrix_norm") + # dim + dim = utils.canonicalize_dims(A.ndim, dim) + if isinstance(dim, Dim): + dim = (dim,) # type: ignore[assignment] + torch._check( + len(dim) == 2, lambda: "linalg.matrix_norm: dim must be a 2-tuple. Got {dim}" + ) + torch._check( + dim[0] != dim[1], + lambda: "linalg.matrix_norm: dims must be different. Got ({dim[0]}, {dim[1]})", + ) + # dtype arg + _check_norm_dtype(dtype, A.dtype, "linalg.matrix_norm") + + if isinstance(ord, str): + # ord + torch._check( + ord in ("fro", "nuc"), + lambda: "linalg.matrix_norm: Order {ord} not supported.", + ) + # dtype + check_fp_or_complex( + A.dtype, "linalg.matrix_norm", allow_low_precision_dtypes=ord != "nuc" + ) + + if ord == "fro": + return vector_norm(A, 2, dim, keepdim, dtype=dtype) + else: # ord == "nuc" + if dtype is not None: + A = _maybe_convert_to_dtype(A, dtype) # type: ignore[assignment] + perm = _backshift_permutation(dim[0], dim[1], A.ndim) + result = torch.sum(svdvals(prims.transpose(A, perm)), -1, keepdim) + if keepdim: + inv_perm = _inverse_permutation(perm) + result = prims.transpose(torch.unsqueeze(result, -1), inv_perm) + return result + else: + # ord + abs_ord = abs(ord) + torch._check( + abs_ord in (2, 1, float("inf")), + lambda: "linalg.matrix_norm: Order {ord} not supported.", + ) + # dtype + check_fp_or_complex( + A.dtype, "linalg.matrix_norm", allow_low_precision_dtypes=ord != 2 + ) + + max_min = partial(torch.amax if ord > 0.0 else torch.amin, keepdim=keepdim) + + if abs_ord == 2.0: + if dtype is not None: + A = _maybe_convert_to_dtype(A, dtype) # type: ignore[assignment] + perm = _backshift_permutation(dim[0], dim[1], A.ndim) + result = max_min(svdvals(prims.transpose(A, perm)), dim=-1) + if keepdim: + inv_perm = _inverse_permutation(perm) + result = prims.transpose(torch.unsqueeze(result, -1), inv_perm) + return result + else: # 1, -1, inf, -inf + dim0, dim1 = dim + if abs_ord == float("inf"): + dim0, dim1 = dim1, dim0 + if not keepdim and (dim0 < dim1): + dim1 -= 1 + return max_min( + vector_norm(A, 1.0, dim=dim0, keepdim=keepdim, dtype=dtype), dim1 + ) + + +# CompositeImplicitAutograd +@out_wrapper(exact_dtype=True) +def norm( + A: TensorLikeType, + ord: Optional[Union[float, str]] = None, + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + if dim is not None: + if isinstance(dim, Dim): + dim = (dim,) # type: ignore[assignment] + torch._check( + len(dim) in (1, 2), + lambda: "linalg.norm: If dim is specified, it must be of length 1 or 2. Got {dim}", + ) + elif ord is not None: + torch._check( + A.ndim in (1, 2), + lambda: "linalg.norm: If dim is not specified but ord is, the input must be 1D or 2D. Got {A.ndim}D", + ) + + if ord is not None and ( + (dim is not None and len(dim) == 2) or (dim is None and A.ndim == 2) + ): + if dim is None: + dim = (0, 1) + return matrix_norm(A, ord, dim, keepdim, dtype=dtype) + else: + if ord is None: + ord = 2.0 + return vector_norm(A, ord, dim, keepdim, dtype=dtype) + + +# CompositeImplicitAutograd +@out_wrapper("U", "S", "Vh", exact_dtype=True) +def svd(A: TensorLikeType, full_matrices: bool = True) -> Tuple[Tensor, Tensor, Tensor]: + return prims.svd(A, full_matrices=full_matrices) + + +# CompositeImplicitAutograd +@out_wrapper(exact_dtype=True) +def svdvals(A: TensorLikeType) -> Tensor: + return svd(A, full_matrices=False)[1] + + +# CompositeImplicitAutograd +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("x", "y"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def vecdot(x: Tensor, y: Tensor, dim: int = -1) -> Tensor: + check_fp_or_complex(x.dtype, "linalg.vecdot") + return (x.conj() * y).sum(dim=dim) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_refs/linalg/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_refs/linalg/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eab6b16d990632dbb0ec58d5434aac6858969ab3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_refs/linalg/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_refs/nn/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/_refs/nn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3646b96be90be51e86070360b23212ed450186a6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_refs/nn/__init__.py @@ -0,0 +1,3 @@ +from typing import List + +__all__: List[str] = [] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_refs/nn/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_refs/nn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a83c5f3566f5e1a87655ab4847dbc6391ccd70e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_refs/nn/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_refs/nn/functional/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/_refs/nn/functional/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9fb1b373f56ead061e5c87e894c9055d012649be --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_refs/nn/functional/__init__.py @@ -0,0 +1,1174 @@ +import math +from functools import wraps +from typing import Callable, Optional, Union + +import torch +import torch._prims as prims +import torch._prims_common as utils +import torch._refs as refs +from torch._decomp import register_decomposition +from torch._prims_common import ( + ELEMENTWISE_TYPE_PROMOTION_KIND, + NumberType, + ShapeType, + TensorLike, + TensorLikeType, +) +from torch._prims_common.wrappers import ( + elementwise_type_promotion_wrapper, + elementwise_unary_scalar_wrapper, + out_wrapper, +) +from torch._refs import _make_inplace + +__all__ = [ + "alpha_dropout", + "celu", + "celu_", + "dropout", + "elu", + "elu_", + "gelu", + "glu", + "group_norm", + "hardshrink", + "hardtanh", + "hinge_embedding_loss", + "huber_loss", + "l1_loss", + "layer_norm", + "leaky_relu", + "log_softmax", + "margin_ranking_loss", + "mish", + "mish_", + "mse_loss", + "nll_loss", + "pairwise_distance", + "pdist", + "poisson_nll_loss", + "prelu", + "relu", + "relu6", + "selu", + "selu_", + "smooth_l1_loss", + "softmax", + "softmin", + "softplus", + "softshrink", + "tanhshrink", + "threshold", + "threshold_", + "triplet_margin_loss", +] + +Tensor = torch.Tensor +aten = torch._ops.ops.aten +DispatchKey = torch._C.DispatchKey # type: ignore[attr-defined] + + +def _dropout_helper( + self: TensorLikeType, + val: float, +) -> TensorLikeType: + """ + Helper function for all dropout-type operators. During training, + some of the elements of the input tensor are randomly masked. + + Returns the masked tensor of the boolean values. + + """ + + return ( + refs._uniform_helper( + self.shape, low=0.0, high=1.0, dtype=torch.float32, device=self.device + ) + < val + ) + + +@register_decomposition(aten.alpha_dropout) +def alpha_dropout( + self: TensorLikeType, p: float = 0.5, training: bool = False, inplace: bool = False +) -> TensorLikeType: + if inplace: + raise NotImplementedError + + if not training: + return self + + torch._check( + p <= 1 and p >= 0, + lambda: f"dropout probability has to be between 0 and 1, but got, {p}", + ) + + if p == 1: + return torch.zeros_like(self) + + if p == 0: + return self + + dropout_mask = _dropout_helper(self, 1 - p) + + # From paper: Self-Normalizing Neural Networks (https://arxiv.org/pdf/1706.02515.pdf) + # alpha = - SELU.alpha * SELU.scale, here + # SELU.alpha = 1.6732632423543772848170429916717 and + # SELU.scale = 1.0507009873554804934193349852946 + alpha = -1.7580993408473766 + + a = 1.0 / math.sqrt((alpha * alpha * p + 1) * (1 - p)) + b = torch.logical_not(dropout_mask) + b = b * (alpha * a) + alpha * a * p + dropout_mask = a * dropout_mask + + return self * dropout_mask + b + + +def _inplace_wrapper(fn): + """ + Given a nn.functional non-linearity, implements its `inplace: bool` argument + """ + + # nb. We use the name of the first argument used in the unary references + @wraps(fn) + def _fn(a, *args, inplace=False, **kwargs): + if inplace: + torch._check( + "out" not in kwargs, + lambda: "Cannot set inplace=True and pass out= at the same time", + ) + return fn(a, *args, inplace=False, out=a, **kwargs) + else: + return fn(a, *args, inplace=False, **kwargs) + + return _fn + + +# celu is implemented specially because it has an alpha argument +# celu is very similar to elu +@register_decomposition(aten.celu) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def celu( + a: TensorLikeType, alpha: Optional[NumberType] = None, inplace: bool = False +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.celu + """ + + if inplace: + raise NotImplementedError + + rhs: TensorLikeType + if alpha is not None: + python_type = utils.dtype_to_type(a.dtype) + if not utils.is_weakly_lesser_type(type(alpha), python_type): + msg = f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + rhs = alpha * torch.expm1(torch.true_divide(a, alpha)) # type: ignore[arg-type] + else: + rhs = torch.expm1(a) + + return torch.where(a > 0, a, rhs) + + +@_inplace_wrapper +@out_wrapper() +def dropout( + a: TensorLikeType, p: float = 0.5, training: bool = True, inplace: bool = False +) -> TensorLikeType: + if inplace: + raise NotImplementedError + + if not training: + return a + + torch._check( + p <= 1 and p >= 0, + lambda: f"dropout probability has to be between 0 and 1, but got, {p}", + ) + + if p == 1: + return torch.zeros_like(a) + + if p == 0: + return a + + scale = 1 / (1 - p) + dropout_mask = _dropout_helper(a, 1 - p) + + return a * dropout_mask * scale + + +@register_decomposition(aten.elu) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def elu( + a: TensorLikeType, + alpha: NumberType = 1.0, + scale: NumberType = 1.0, + input_scale: NumberType = 1.0, + inplace: bool = False, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.elu + """ + if inplace: + raise NotImplementedError + + # nb. This should be factored out into a can_cast aux function + python_type = utils.dtype_to_type(a.dtype) + torch._check( + utils.is_weakly_lesser_type(type(input_scale), python_type), + lambda: f"input_scale argument of type {type(input_scale)} cannot be safely cast to type {python_type}!", + ) + torch._check( + utils.is_weakly_lesser_type(type(scale), python_type), + lambda: f"scale argument of type {type(scale)} cannot be safely cast to type {python_type}!", + ) + torch._check( + utils.is_weakly_lesser_type(type(alpha), python_type), + lambda: f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!", + ) + + return torch.where(a > 0, scale * a, (alpha * scale) * torch.expm1(a * input_scale)) + + +@register_decomposition(aten.relu) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def relu(a: TensorLikeType, inplace: bool = False) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.relu + """ + + if inplace: + raise NotImplementedError + + return torch.where(torch.le(a, 0), 0, a) + + +def group_norm( + input: Tensor, + num_groups: int, + weight: Optional[Tensor] = None, + bias: Optional[Tensor] = None, + eps: float = 1e-5, +) -> Tensor: + """ + Reference implementation of :func:`torch.nn.functional.group_norm`. + """ + torch._check( + input.ndim >= 2, + lambda: f"Expected at least 2 dimensions for input tensor but received {input.ndim}", + ) + + batch_size = input.shape[0] + num_channels = input.shape[1] + torch._check( + num_channels % num_groups == 0, + lambda: "Expected number of channels in input to be divisible by num_groups, " + + f"but got input of shape {input.shape} and num_groups = {num_groups}", + ) + + # input shape is (N, C, *), so we flatten all inner dimensions except (N, C) + flattened_inner_size = 1 + for dim_length in input.shape[2:]: + flattened_inner_size *= dim_length + + return torch.native_group_norm( + input, + weight, + bias, + batch_size, + num_channels, + flattened_inner_size, + num_groups, + eps, + )[0] + + +def layer_norm( + input: Tensor, + normalized_shape: ShapeType, + weight: Optional[Tensor] = None, + bias: Optional[Tensor] = None, + eps: float = 1e-5, +) -> Tensor: + """ + Reference implementation of :func:`torch.nn.functional.layer_norm`. + """ + return torch.native_layer_norm(input, normalized_shape, weight, bias, eps)[0] + + +@register_decomposition(aten.leaky_relu) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def leaky_relu( + a: TensorLikeType, negative_slope: float = 0.01, inplace: bool = False +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.leaky_relu + """ + + if inplace: + raise NotImplementedError + + python_type = utils.dtype_to_type(a.dtype) + if not utils.is_weakly_lesser_type(type(negative_slope), python_type): + msg = f"negative_slope argument of type {type(negative_slope)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + return torch.where(torch.gt(a, 0), a, torch.mul(a, negative_slope)) + + +@register_decomposition(aten.mish) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def mish(a: TensorLikeType, inplace: bool = False) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.mish + """ + + if inplace: + raise NotImplementedError + return a * torch.tanh(torch.nn.functional.softplus(a)) + + +@register_decomposition(aten.selu) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def selu(a: TensorLikeType, inplace: bool = False) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.selu + """ + if inplace: + raise NotImplementedError + + alpha = 1.6732632423543772848170429916717 + scale = 1.0507009873554804934193349852946 + + rhs = alpha * torch.expm1(a) + + return scale * torch.where(a > 0, a, rhs) + + +# Forwarding alias: the functional variant doesn't support the out kwarg +# CompositeImplicitAutograd - don't register decomp +def softmax( + a: TensorLikeType, + dim: Optional[int] = None, + _stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True) + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + # The error is for compat with regular PyTorch, which has this behavior + # deprecated. For PrimTorch, it's fine to drop support for deprecated + # behavior because it requires explicit opt in. This error is to inform + # users how to update their calls. + torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X") + return torch.softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload] + + +# CompositeImplicitAutograd - don't register decomp +def softmin( + a: TensorLikeType, + dim: Optional[int] = None, + _stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True) + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + # The error is for compat with regular PyTorch, which has this behavior + # deprecated. For PrimTorch, it's fine to drop support for deprecated + # behavior because it requires explicit opt in. This error is to inform + # users how to update their calls. + torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X") + return torch.softmax(a=-a, dim=dim, dtype=dtype) # type: ignore[call-overload] + + +# softplus is implemented specially because it has beta and threshold arguments +@register_decomposition(aten.softplus) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def softplus( + a: TensorLikeType, + beta: Optional[NumberType] = None, + threshold: NumberType = 20, + inplace: bool = False, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.softplus + """ + + if inplace: + raise NotImplementedError + + rhs: TensorLikeType + if beta is not None: + python_type = utils.dtype_to_type(a.dtype) + if not utils.is_weakly_lesser_type(type(beta), python_type): + msg = f"beta argument of type {type(beta)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + scaled_input = a * beta + rhs = torch.true_divide(torch.log1p(torch.exp(scaled_input)), beta) # type: ignore[arg-type] + + else: + scaled_input = a + rhs = torch.log1p(torch.exp(scaled_input)) + + return torch.where(scaled_input > threshold, a, rhs) + + +@aten.hardshrink.default.py_impl(DispatchKey.Autograd) +@register_decomposition(aten.hardshrink) +@out_wrapper() +def hardshrink(a: TensorLikeType, lambd: float = 0.5): + # Formula for reference, + # hardshrink(x) = x if x > lambd + # = x if x < -lambd + # = 0 otherwise + return torch.where(torch.abs(a) <= lambd, 0, a) + + +@aten.softshrink.default.py_impl(DispatchKey.Autograd) +@register_decomposition(aten.softshrink) +@out_wrapper() +def softshrink(a: TensorLikeType, lambd: float = 0.5): + # Formula for reference, + # softshrink(x) = x - lambd if x > lambd + # = x + lambd if x < -lambd + # = 0 otherwise + torch._check( + lambd >= 0, + lambda: f"lambda must be greater or equal to 0, but found to be {lambd}", + ) + # We implement this in one torch.where to generate better code in the backward + # see https://github.com/pytorch/pytorch/pull/107052#discussion_r1293748211 + return torch.where(torch.abs(a) > lambd, a - torch.sign(a) * lambd, 0) + + +# Losses +def _reduction_int_to_str(reduction: int) -> str: + from torch._decomp.decompositions import Reduction + + if reduction == Reduction.NONE.value: + return "none" + elif reduction == Reduction.MEAN.value: + return "mean" + elif reduction == Reduction.SUM.value: + return "sum" + else: + raise ValueError(f"{reduction} is not a valid value for reduction") + + +def _apply_loss_reduction(loss: TensorLikeType, reduction: str) -> TensorLikeType: + if reduction == "sum": + return torch.sum(loss) + elif reduction == "mean": + return torch.mean(loss) + else: # reduction == "none" + return loss + + +def _check_reduction_value(reduction: str): + if reduction not in ("mean", "sum", "none"): + raise ValueError(f"{reduction} is not a valid value for reduction") + + +# This helper function maps depreciated arguments, "size_average" and "reduce" +# to their corresponding "reduction" string argument +def _get_string_reduction_arg( + *, size_average: Optional[bool], reduce: Optional[bool] +) -> str: + if size_average is None: + size_average = True + if reduce is None: + reduce = True + if size_average and reduce: + ret = "mean" + elif reduce: + ret = "sum" + else: + ret = "none" + return ret + + +# CompositeImplicitAutograd - don't register decomp +@elementwise_type_promotion_wrapper( + type_promoting_args=("input", "target"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT, +) +def l1_loss( + input: TensorLikeType, + target: TensorLikeType, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.l1_loss + """ + if size_average is not None or reduce is not None: + # TODO: Raise exception instead of converting value. This is only for + # primTorch since it can drop support for deprecated arguments. + # msg = "size_average and reduce args are deprecated, please use reduction argument." + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + _check_reduction_value(reduction) + loss = torch.abs(input - target) + return _apply_loss_reduction(loss, reduction) + + +@elementwise_type_promotion_wrapper( + type_promoting_args=("input", "target"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT, +) +def smooth_l1_loss( + input: TensorLikeType, + target: TensorLikeType, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", + beta: float = 1.0, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.smooth_l1_loss + """ + if size_average is not None or reduce is not None: + # TODO: Raise exception instead of converting value. This is only for + # primTorch since it can drop support for deprecated arguments. + # msg = "size_average and reduce args are deprecated, please use reduction argument." + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + _check_reduction_value(reduction) + + if beta == 0.0: + return torch.nn.functional.l1_loss( + input, target, size_average=size_average, reduce=reduce, reduction=reduction + ) + else: + loss = torch.abs(input - target) + loss = torch.where(loss < beta, 0.5 * loss**2 / beta, loss - 0.5 * beta) + return _apply_loss_reduction(loss, reduction) + + +# Forwarding alias: the functional variant doesn't support the out kwarg +# CompositeImplicitAutograd - don't register decomp +def log_softmax( + a: TensorLikeType, + dim: Optional[int] = None, + _stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True) + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + # The error is for compat with regular PyTorch, which has this behavior + # deprecated. For PrimTorch, it's fine to drop support for deprecated + # behavior because it requires explicit opt in. This error is to inform + # users how to update their calls. + torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X") + return torch.log_softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload] + + +@register_decomposition(aten.margin_ranking_loss) +def margin_ranking_loss( + input1: TensorLikeType, + input2: TensorLikeType, + target: TensorLikeType, + margin: float = 0.0, + reduction: str = "mean", +) -> TensorLikeType: + # loss_without_reduction = max(0, −target * (input1 − input2) + margin) + if input1.ndim != input2.ndim or input1.ndim != target.ndim: + raise RuntimeError( + "margin_ranking_loss : All input tensors should have same dimension but got sizes: " + f"input1: {input1.shape}, input2: {input2.shape}, target: {target.shape} " + ) + _check_reduction_value(reduction) + loss = torch.clamp_min(-target * (input1 - input2) + margin, 0) + return _apply_loss_reduction(loss, reduction) + + +@elementwise_type_promotion_wrapper( + type_promoting_args=("input", "target"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT, +) +def mse_loss( + input: TensorLikeType, + target: TensorLikeType, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> TensorLikeType: + if size_average is not None or reduce is not None: + # TODO: Raise exception instead of converting value. This is only for + # primTorch since it can drop support for deprecated arguments. + # msg = "size_average and reduce args are deprecated, please use reduction argument." + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + _check_reduction_value(reduction) + loss = torch.pow(input - target, 2) + return _apply_loss_reduction(loss, reduction) + + +@register_decomposition(aten.hinge_embedding_loss) +def hinge_embedding_loss( + input: TensorLikeType, + target: TensorLikeType, + margin: float = 1.0, + reduction: str = "mean", +) -> TensorLikeType: + # loss_without_reduction = input if y == 1 + # = max(0, margin - input) if y == -1 + _check_reduction_value(reduction) + margin_clamp = torch.clamp_min(margin - input, 0) + output_margin = torch.where(target != 1, margin_clamp, 0) + output_self = torch.where(target != -1, input, 0) + loss = output_margin + output_self + return _apply_loss_reduction(loss, reduction) + + +def _nll_loss_nd( + input: TensorLikeType, + target: TensorLikeType, + weight: Optional[TensorLikeType], + reduction: str, + ignore_index: int, +) -> TensorLikeType: + torch._check( + input.ndim > 0 and input.ndim <= 3, + lambda: f"Expected input dimension to be either [1, 2, 3] but received {input.ndim}.", + ) + + torch._check( + (input.ndim == 1) or (input.shape[0] == target.shape[0]), + lambda: f"Expected input batch size {input.shape[0]} to match target batch size {target.shape[0]}.", + ) + + _check_reduction_value(reduction) + + flat_target = torch.flatten(target) + ignore_classes_mask = torch.eq(flat_target, ignore_index) + + # TODO: Enable data-dependent checks with debug mode + # TODO: This check does not work with FakeTensor inputs; See Issue #85834 + # Explicit cast for class_check to bool; See Issue #78071 + """ + from torch._subclasses.fake_tensor import FakeTensor + num_classes = input.shape[1] if input.ndim > 1 else input.shape[0] + valid_classes_mask = torch.logical_and( + (flat_target >= 0), (flat_target < num_classes) + ) + class_check = torch.all(torch.logical_or(ignore_classes_mask, valid_classes_mask)) + torch._check( + isinstance(target, FakeTensor) or bool(class_check.item()), + lambda: "A target class is out-of-bounds and not the ignore index.", + ) + """ + + ignore_class_weight = torch.scalar_tensor(0, dtype=input.dtype, device=input.device) + class_weight = ( + torch.scalar_tensor(1, dtype=input.dtype, device=input.device) + if weight is None + else weight[flat_target] + ) + current_weight = torch.where( + ignore_classes_mask, + ignore_class_weight, + class_weight, + ) + + if input.ndim == 1: + # implicit batch size = 1 + # input (1 batch size, C classes) + loss = -input[target] * current_weight + elif input.ndim == 2: + # input (N batch size, C classes) + batch_size = input.shape[0] + loss = -input[torch.arange(batch_size), target] * current_weight + else: + # 3D case (N batch size, C classe, K dimensions) + # input (N batch size, C classes, K) + batch_size = input.shape[0] + extent = input.shape[2] + numel = batch_size * extent + indices = torch.arange(numel) + bdx = indices // extent + kdx = indices % extent + loss = -input[bdx, flat_target, kdx] * current_weight + loss = torch.reshape(loss, target.shape) + + if reduction == "none": + return loss + elif reduction == "sum": + return torch.sum(loss) + else: + # calculate weighted mean of the loss function + return torch.sum(loss) / torch.sum(current_weight) + + +@register_decomposition(aten.nll_loss) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("input",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def nll_loss( + input: TensorLikeType, + target: TensorLikeType, + weight: Optional[TensorLikeType] = None, + size_average: Optional[bool] = None, + ignore_index: int = -100, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.nll_loss + """ + torch._check( + input.ndim > 0, + lambda: f"Expected input tensor to have 1 or more dimensions (got {input.ndim})", + ) + + # TODO: raise exception instead of converting value + # msg = "size_average and reduce args are deprecated, please use reduction argument." + # Convert these options for consistency with the eager mode + if size_average is not None or reduce is not None: + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + + # The expected behavior when the target and input have zero elements: + # reduction = 'none' --- tensor([]) + # reduction = 'sum' --- tensor(0.) + # reduction = 'mean' --- tensor(nan) + # Mean reduction on empty tensors produces NaN. See the discussion in + # https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162 + if input.numel() == 0 and target.numel() == 0: + if reduction == "none": + return torch.zeros_like(target) + elif reduction == "sum": + return torch.empty_like(target) + else: + return torch.full_like(target, float("nan")) + + # The _nll_loss_nd helper function handles the most common cases. + # ndim == 1 (Single Example) + # => Batch Size: 1, Input: (C), Target: () + # ndim == 2 (k = 1) + # => Batch Size: N, Input: (N, C), Target: (N) + # ndim == 3 (k > 1) + # => Batch Size: N, Input: (N, C, K), Target: (N, K) + if input.ndim <= 3: + return _nll_loss_nd(input, target, weight, reduction, ignore_index) + + # For ndim > 3, we reshape the input and target to 3-D case. + # Input (N batch-size, C classes, k-dimensions) + # Target (N batch-size, k-dimensions) + torch._check( + input.ndim > 0 and target.ndim > 0 and target.shape[1:] == input.shape[2:], + lambda: ( + "Expected input and target to both have ndim > 0 and " + "target.shape[1:] == input.shape[2:], but got " + f"target.shape {target.shape} and input.shape {input.shape}" + ), + ) + + batch_size = input.shape[0] + num_classes = input.shape[1] + out_size = [batch_size] + list(target.shape[1:]) + + input = torch.reshape(input, [batch_size, num_classes, -1]) + target = torch.reshape(target, [batch_size, -1]) + if reduction != "none": + return _nll_loss_nd(input, target, weight, reduction, ignore_index) + else: + result = _nll_loss_nd(input, target, weight, reduction, ignore_index) + # reshape flattened inner-dim to original k-dimensions + return torch.reshape(result, out_size) + + +# TODO: This ref supports int reduction and out kwarg to be compatible with ATen: +# https://github.com/pytorch/pytorch/issues/83931 +# TODO: Could be rewritten to support complex: +# https://github.com/pytorch/pytorch/pull/85041 +@register_decomposition(aten.huber_loss) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("input", "target"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def huber_loss( + input: TensorLikeType, + target: TensorLikeType, + reduction: Union[str, int] = "mean", + delta: float = 1.0, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.huber_loss + """ + if type(reduction) is int: + reduction = _reduction_int_to_str(reduction) + _check_reduction_value(reduction) # type: ignore[arg-type] + torch._check( + delta > 0, + lambda: "huber_loss does not support non-positive values for delta.", + ) + z = (input - target).abs() + loss = torch.where(z < delta, 0.5 * z * z, delta * (z - 0.5 * delta)) + return _apply_loss_reduction(loss, reduction) # type: ignore[arg-type] + + +# tanhshrink does not use _make_elementwise_unary_reference because it does not support out +@elementwise_unary_scalar_wrapper +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def tanhshrink(a: TensorLikeType) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.tanhshrink + """ + if not isinstance(a, TensorLike): + raise RuntimeError( + "Expected a tensor input for an elementwise unary operation!" + ) + return a - torch.tanh(a) + + +@register_decomposition(aten.threshold) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def threshold( + a: TensorLikeType, + threshold: NumberType, + value: Union[bool, int, float], + inplace: bool = False, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.threshold + """ + + if inplace: + raise NotImplementedError + + return torch.where(a <= threshold, value, a) + + +# CompositeImplicitAutograd - don't register decomp +# No elementwise type promotion - core op doesn't explicitly type promote +def triplet_margin_loss( + anchor: TensorLikeType, + positive: TensorLikeType, + negative: TensorLikeType, + margin: float = 1.0, + p: float = 2, + eps: float = 1e-6, + swap: bool = False, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> TensorLikeType: + if size_average is not None or reduce is not None: + # TODO: Raise exception instead of converting value. This is only for + # primTorch since it can drop support for deprecated arguments. + # msg = "size_average and reduce args are deprecated, please use reduction argument." + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + + # torch.nn.functional.triplet_margin_with_distance_loss has no ref defined + # since it's a pure Python implementation. Use this helper instead. + return _triplet_margin_with_distance_loss( + anchor=anchor, + positive=positive, + negative=negative, + distance_function=lambda x, y: torch.pairwise_distance(x, y, p, eps), + margin=margin, + swap=swap, + reduction=reduction, + ) + + +# Pure Python impl - don't register decomp and don't add a ref. Defined as a +# helper here since triplet_margin_loss can be nicely implemented with it. +def _triplet_margin_with_distance_loss( + anchor: TensorLikeType, + positive: TensorLikeType, + negative: TensorLikeType, + *, + distance_function: Optional[ + Callable[[TensorLikeType, TensorLikeType], TensorLikeType] + ] = None, + margin: float = 1.0, + swap: bool = False, + reduction: str = "mean", +) -> TensorLikeType: + _check_reduction_value(reduction) + + a_dim = anchor.ndim + p_dim = positive.ndim + n_dim = negative.ndim + torch._check( + a_dim == p_dim and p_dim == n_dim, + lambda: ( + f"The anchor, positive, and negative tensors are expected to have " + f"the same number of dimensions, but got: anchor {a_dim}D, " + f"positive {p_dim}D, and negative {n_dim}D inputs" + ), + ) + + if distance_function is None: + distance_function = torch.pairwise_distance + + dist_pos = distance_function(anchor, positive) + dist_neg = distance_function(anchor, negative) + # The distance swap is described in the paper "Learning shallow + # convolutional feature descriptors with triplet losses" by V. Balntas, E. + # Riba et al. If True, and if the positive example is closer to the + # negative example than the anchor is, swaps the positive example and the + # anchor in the loss computation. + if swap: + dist_swap = distance_function(positive, negative) + dist_neg = torch.minimum(dist_neg, dist_swap) + loss = torch.clamp_min(margin + dist_pos - dist_neg, 0) + return _apply_loss_reduction(loss, reduction) + + +@register_decomposition(aten.hardtanh) +@_inplace_wrapper +@out_wrapper() +@elementwise_unary_scalar_wrapper +@elementwise_type_promotion_wrapper( + type_promoting_args=("a"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def hardtanh( + a: TensorLikeType, + min_val: NumberType = -1, + max_val: NumberType = 1, + inplace: bool = False, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.hardtanh + """ + if inplace: + raise NotImplementedError + if utils.is_boolean_dtype(a.dtype): + raise RuntimeError("Bool inputs not supported for hardtanh") + + # preserve legacy behavior of boundaries not causing type promotion + if utils.is_integer_dtype(a.dtype): + min_val = int(min_val) # type: ignore[arg-type] + max_val = int(max_val) # type: ignore[arg-type] + if not (a.dtype != torch.uint8 or (min_val >= 0 and max_val >= 0)): + raise RuntimeError( + "Cannot do hardtanh on an unsigned type with negative limits" + ) + return torch.clamp(a, min_val, max_val) # type: ignore[arg-type] + + +@register_decomposition(aten.gelu) +@out_wrapper() +@elementwise_unary_scalar_wrapper +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def gelu(a: TensorLikeType, approximate: str = "none") -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.gelu + """ + if not isinstance(a, TensorLike): + raise RuntimeError( + "Expected a tensor input for an elementwise unary operation!" + ) + M_SQRT2 = 1.41421356237309504880 + M_SQRT1_2 = 0.70710678118654752440 + M_2_SQRTPI = 1.12837916709551257390 + if approximate == "tanh": + kBeta = M_SQRT2 * M_2_SQRTPI * 0.5 + kKappa = 0.044715 + a_cube = a * a * a + inner = kBeta * (a + kKappa * a_cube) + return 0.5 * a * (1 + torch.tanh(inner)) + elif approximate == "none": + kAlpha = M_SQRT1_2 + return a * 0.5 * (1 + torch.erf(a * kAlpha)) + else: + raise RuntimeError("approximate argument must be either none or tanh.") + + +# CompositeImplicitAutograd - don't register decomp +@elementwise_type_promotion_wrapper( + type_promoting_args=("input", "target"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def poisson_nll_loss( + input: TensorLikeType, + target: TensorLikeType, + log_input: bool = True, + full: bool = False, + size_average: Optional[bool] = None, + eps: float = 1e-8, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.poisson_nll_loss + """ + if size_average is not None or reduce is not None: + # TODO: Raise exception instead of converting value. This is only for + # primTorch since it can drop support for deprecated arguments. + # msg = "size_average and reduce args are deprecated, please use reduction argument." + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + _check_reduction_value(reduction) + if log_input: + loss = torch.exp(input) - target * input + else: + loss = input - target * torch.log(input + eps) + + if full: + stirling_term = ( + target * torch.log(target) - target + 0.5 * torch.log(2 * torch.pi * target) + ) + # avoid inplace add + loss = loss + stirling_term.masked_fill(target <= 1, 0) + return _apply_loss_reduction(loss, reduction) + + +@register_decomposition(aten.prelu) +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "weight"), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def prelu(a: TensorLikeType, weight: TensorLikeType) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.prelu + """ + torch._check( + isinstance(a, TensorLike), + lambda: f"prelu: Expected `a` to be tensor, but got: {type(a)}", + ) + torch._check( + isinstance(weight, TensorLike), + lambda: f"prelu: Expected `weight` to be tensor, but got: {type(weight)}", + ) + + if weight.numel() != 1: + torch._check(a.ndim > 0, lambda: "Not allow zero-dim input tensor.") + channel_size = a.shape[1] if a.ndim >= 2 else 1 + torch._check( + weight.numel() == channel_size, + lambda: f"Mismatch of parameter numbers and input channel size. Found parameter numbers =" + f" {weight.numel()} and channel size = {channel_size}.", + ) + + torch._check( + weight.ndim == 0 or weight.ndim == 1, + lambda: f"prelu: Expected `weight` to be a scalar or 1D tensor, but got: " + f"ndim = {weight.ndim}", + ) + if a.ndim == 0: + weight = weight[0] if weight.ndim == 1 else weight + else: + weight = prims.broadcast_in_dim( + weight, a.shape, tuple() if weight.ndim == 0 else (0 if a.ndim == 1 else 1,) + ) + + return torch.where(a > 0, a, a * weight) + + +@register_decomposition(aten.relu6) +@_inplace_wrapper +@out_wrapper() +def relu6(a: TensorLikeType, inplace: bool = False) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.relu6 + """ + if inplace: + raise NotImplementedError + + # See https://github.com/pytorch/pytorch/pull/81142#discussion_r918220126 + # It may be better to use clamp here, but we use hardtanh to replicate + # the behavior of the existing implementation + return torch.nn.functional.hardtanh(a, 0, 6) + + +@register_decomposition(aten.glu) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def glu(a: TensorLikeType, dim: int = -1) -> TensorLikeType: + dim = utils.canonicalize_dims(a.ndim, dim) + torch._check( + a.shape[dim] % 2 == 0, + lambda: f"Halving dimension must be even, but dimension {dim} is size {a.shape[dim]}", + ) + b, c = torch.tensor_split(a, 2, dim) + + return b * torch.sigmoid(c) + + +@register_decomposition(aten.pairwise_distance) +@out_wrapper() +def pairwise_distance( + x1: TensorLikeType, + x2: TensorLikeType, + p: NumberType = 2.0, + eps: NumberType = 1e-6, + keepdim=False, +) -> TensorLikeType: + return torch.linalg.vector_norm(x1 - x2 + eps, ord=p, dim=-1, keepdim=keepdim) + + +@register_decomposition(aten.pdist) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def pdist(a: TensorLikeType, p: float = 2) -> TensorLikeType: + torch._check(a.ndim == 2, lambda: f"pdist only supports 2D tensors, got: {a.ndim}D") + torch._check(p >= 0, lambda: "pdist only supports non-negative p values") + # For p == 2 we can use an efficient implementation, but other values of p + # require creating a much bigger tensor for an intermediate step + if p == 2: + aTa = torch.mm(a, a.T) + aTa_diag = torch.diag(aTa) + t = torch.sqrt(torch.clamp(aTa_diag + aTa_diag.unsqueeze(-1) - 2 * aTa, min=0)) + else: + t = torch.linalg.vector_norm(a.unsqueeze(1) - a, ord=p, dim=2) + i = torch.triu_indices(t.shape[0], t.shape[1], offset=1, device=a.device) + return t.flatten().index_select(0, i[0] * t.shape[0] + i[1]) + + +# Needed as aten.{celu_,elu_...} exist (even if they don't have the in-place kwarg) +celu_ = _make_inplace(celu) +elu_ = _make_inplace(elu) +mish_ = _make_inplace(mish) +selu_ = _make_inplace(selu) +threshold_ = _make_inplace(threshold) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_refs/nn/functional/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_refs/nn/functional/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6c6354dee9a4ec5f4887c484d97ffe805a90500 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_refs/nn/functional/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_refs/special/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/_refs/special/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..048de83506d2919fd858e871290871bb0f558289 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_refs/special/__init__.py @@ -0,0 +1,236 @@ +import math +from typing import Optional, Union + +import torch +import torch._prims as prims +import torch._prims_common as utils +import torch._refs as refs + +from torch import Tensor +from torch._decomp import register_decomposition +from torch._prims_common import ( + ELEMENTWISE_TYPE_PROMOTION_KIND, + Number, + NumberType, + TensorLike, + TensorLikeType, +) +from torch._prims_common.wrappers import elementwise_type_promotion_wrapper, out_wrapper +from torch._refs import ( + _make_alias, + _make_elementwise_binary_reference, + _make_elementwise_unary_reference, +) + + +__all__ = [ + "bessel_j0", + "bessel_j1", + "entr", + "erfcx", + "expit", + "i0e", + "i1", + "i1e", + "log_ndtr", + "logit", + "log_softmax", + "multigammaln", + "ndtr", + "ndtri", + "softmax", + "spherical_bessel_j0", + "xlog1py", + "zeta", +] +aten = torch._ops.ops.aten + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def bessel_j0(a: TensorLikeType) -> TensorLikeType: + return prims.bessel_j0(a) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def bessel_j1(a: TensorLikeType) -> TensorLikeType: + return prims.bessel_j1(a) + + +@register_decomposition(aten.special_entr) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def entr(a: TensorLikeType) -> TensorLikeType: + return torch.where( + torch.isnan(a), + a, + torch.where(a > 0, -a * torch.log(a), torch.where(a == 0, 0, -torch.inf)), + ) + + +@register_decomposition(aten.special_erfcx) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def erfcx(a: TensorLikeType) -> TensorLikeType: + return prims.erfcx(a) + + +# alias for sigmoid +expit = _make_alias(torch.sigmoid, "expit") + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def i0e(a: TensorLikeType) -> TensorLikeType: + return prims.bessel_i0e(a) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def i1(a: TensorLikeType) -> TensorLikeType: + return prims.bessel_i1(a) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def i1e(a: TensorLikeType) -> TensorLikeType: + return prims.bessel_i1e(a) + + +@register_decomposition(aten.special_log_ndtr) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def log_ndtr(a: TensorLikeType) -> TensorLikeType: + # Note: M_SQRT1_2 is the value of 1 / √2 + M_SQRT1_2 = 0.707106781186547524400844362104849039 + t = a * M_SQRT1_2 + return torch.where( + a < 1.0, + torch.log(torch.special.erfcx(-t) / 2) - t * t, + torch.log1p(-torch.erfc(t) / 2), + ) + + +@register_decomposition(aten.logit) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def logit(self: TensorLikeType, eps: Optional[float] = None) -> TensorLikeType: + if eps is None: + eps = -1.0 + lo = eps + hi = 1 - eps + self = torch.clamp(self, lo, hi) + return torch.log(torch.true_divide(self, torch.sub(1, self))) + + +@register_decomposition(aten.special_xlog1py) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def xlog1py(a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType]): + torch._check( + isinstance(a, TensorLike) or isinstance(b, TensorLike), + lambda: 'Expected either argument a or b to be a Tensor"', + ) + + # Operations like eq and log do not handle scalar values, so we convert them to scalar_tensors. + if isinstance(a, TensorLike) and isinstance(b, Number): + b = refs.scalar_tensor(b, dtype=a.dtype, device=a.device) + elif isinstance(b, TensorLike) and isinstance(a, Number): + a = refs.scalar_tensor(a, dtype=b.dtype, device=b.device) + + # mypy: expected "Tensor" + assert isinstance(a, TensorLike) + assert isinstance(b, TensorLike) + rhs = torch.where(torch.eq(a, 0), 0, torch.mul(a, torch.log1p(b))) + return torch.where(torch.isnan(b), float("nan"), rhs) + + +@register_decomposition(aten.mvlgamma) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def multigammaln(a: TensorLikeType, p: int) -> TensorLikeType: + c = 0.25 * p * (p - 1) * math.log(math.pi) + b = 0.5 * torch.arange(start=(1 - p), end=1, step=1, dtype=a.dtype, device=a.device) + return torch.sum(torch.lgamma(a.unsqueeze(-1) + b), dim=-1) + c + + +@register_decomposition(aten.special_ndtr) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def ndtr(a: TensorLikeType) -> TensorLikeType: + # Note: M_SQRT1_2 is the value of 1 / √2 + M_SQRT1_2 = 0.707106781186547524400844362104849039 + a_sqrt_2 = a * M_SQRT1_2 + return (1 + torch.erf(a_sqrt_2)) * 0.5 + + +@register_decomposition(aten.special_ndtri) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def ndtri(a: TensorLikeType) -> TensorLikeType: + return prims.ndtri(a) + + +# Forwarding alias: the special variant doesn't support the out kwarg +# CompositeImplicitAutograd - don't register decomp +def log_softmax( + a: TensorLikeType, + dim: int, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + return torch.log_softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload] + + +# Forwarding alias: the special variant doesn't support the out kwarg +# CompositeImplicitAutograd - don't register decomp +def softmax( + a: TensorLikeType, + dim: int, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + return torch.softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload] + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def spherical_bessel_j0(a: TensorLikeType) -> TensorLikeType: + return prims.spherical_bessel_j0(a) + + +# TODO: add docstring +@_make_elementwise_binary_reference( + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def zeta(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.zeta(a, b) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_refs/special/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_refs/special/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c066f950bf693a56eef05fb53ec7000d299237e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_refs/special/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e78c0e57bc0b7eeda6178f1c8cb5f74051d3db29 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_tensor.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4aa0f6d690893e5b9d37b235c171543e7c093372 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_tensor.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0a4bfa653e3a641f9ced0a8b15787aeacf1da16 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/functional_tensor.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/functional_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d5903afc6dca66e97193bff0bcf58a4153754ee Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/functional_tensor.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/meta_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/meta_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a5838e58cdb4fbd5bc7046094b1b94324e2ab17 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/meta_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/schema_check_mode.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/schema_check_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82dcd9f422fdffef5352939b7d24f345c7fe1b07 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__pycache__/schema_check_mode.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/fake_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/fake_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1814a2a82374c857dff7cbd63fcc93586ac2ad66 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/fake_utils.py @@ -0,0 +1,188 @@ +import functools +import warnings +from typing import Callable, Union + +import torch +import torch.utils._pytree as pytree +from torch._ops import OpOverload +from torch._subclasses.fake_tensor import ( + FakeTensorMode, + tree_flatten_only, + UnsupportedFakeTensorException, +) +from torch.utils._python_dispatch import TorchDispatchMode + + +aten = torch._ops.ops.aten + + +def outputs_alias_inputs(outputs, inputs): + input_storages = { + inp._typed_storage()._cdata + for inp in tree_flatten_only(torch.Tensor, inputs) + if torch._C._has_storage(inp) + } + return any( + torch._C._has_storage(out) and out._typed_storage()._cdata in input_storages + for out in tree_flatten_only(torch.Tensor, outputs) + ) + + +def outputs_are_inputs(outputs, inputs): + input_ids = {id(inp) for inp in tree_flatten_only(torch.Tensor, inputs)} + return any(id(out) in input_ids for out in tree_flatten_only(torch.Tensor, outputs)) + + +def output_alias_each_other(outputs): + storages = set() + for out in tree_flatten_only(torch.Tensor, outputs): + if not torch._C._has_storage(out): + continue + stor = out._typed_storage()._cdata + if stor in storages: + return True + storages.add(stor) + return False + + +def is_sdpa_error(func, idx, e): + if ( + ( + func is aten._scaled_dot_product_flash_attention.default + or func is aten._flash_attention_forward.default + ) + and idx in (6, 7) + and "Devices" in repr(e) + ): + return True + if ( + ( + func is aten._scaled_dot_product_efficient_attention.default + or func is aten._efficient_attention_forward.default + ) + and idx in (2, 3) + and "Devices" in repr(e) + ): + return True + return False + + +class CrossRefFakeMode(TorchDispatchMode): + def __init__( + self, + ignore_op_fn: Union[Callable[[OpOverload], bool], None] = None, + *, + check_strides=True, + check_aliasing=True, + ): + self.ignore_op_fn = ( + ignore_op_fn if ignore_op_fn is not None else lambda fn: False + ) + self.check_strides = check_strides + self.check_aliasing = check_aliasing + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + kwargs = kwargs or {} + + fake_r = None + + # empty_like excluded for now due to sparse complex + # aten._to_dense.default this one is getting called with csc + if ( + func + not in ( + aten.lift_fresh.default, + aten.lift_fresh_copy.default, + aten.set_.source_Storage_storage_offset, + ) + and not self.ignore_op_fn(func) + and torch.Tag.dynamic_output_shape not in func.tags + and torch.Tag.inplace_view not in func.tags + and torch.Tag.data_dependent_output not in func.tags + ): + # Do not import symbolic_shapes at the top of the module as it imports sympy and that's slow + from torch.fx.experimental.symbolic_shapes import ShapeEnv + + try: + # TODO: enable_python_dispatcher() here + with FakeTensorMode(shape_env=ShapeEnv()) as fake_mode: + fake_args, fake_kwargs = pytree.tree_map_only( + torch.Tensor, + functools.partial(fake_mode.from_tensor, static_shapes=True), + (args, kwargs), + ) + with warnings.catch_warnings(): + fake_r = func(*fake_args, **fake_kwargs) + except UnsupportedFakeTensorException: + pass + + context = ( + f"When comparing the output of {func} on FakeTensor and concrete Tensors, " + f"found" + ) + r = func(*args, **kwargs) + if fake_r is not None: + r_flat = pytree.tree_leaves(r) + f_flat = pytree.tree_leaves(fake_r) + assert len(f_flat) == len( + r_flat + ), f"{context} mismatch in number of returns {len(f_flat)} != {len(r_flat)}" + + if self.check_aliasing: + r_aliasing = outputs_alias_inputs(r, (args, kwargs)) + f_aliasing = outputs_alias_inputs(fake_r, (fake_args, fake_kwargs)) + assert ( + r_aliasing == f_aliasing + ), f"{context} mismatch in outputs_alias_inputs check {f_aliasing} != {r_aliasing}" + + r_identity_eq = outputs_are_inputs(r, (args, kwargs)) + f_identity_eq = outputs_are_inputs(fake_r, (fake_args, fake_kwargs)) + assert ( + r_identity_eq == f_identity_eq + ), f"{context} mismatch in outputs_are_inputs check {f_identity_eq} != {r_identity_eq}" + + r_output_alias_each_other = output_alias_each_other(r) + f_output_alias_each_other = output_alias_each_other(fake_r) + assert r_output_alias_each_other == f_output_alias_each_other, ( + f"{context} mismatch in outputs_alias_each_other check " + f"{f_output_alias_each_other} != {r_output_alias_each_other}" + ) + + for idx, (r_out, fake_out) in enumerate( + zip(pytree.tree_leaves(r), pytree.tree_leaves(fake_r)) + ): + r_is_ten = isinstance(r_out, torch.Tensor) + assert r_is_ten == isinstance( + fake_out, torch.Tensor + ), f"{context} mismatched number of tensor outputs" + if r_is_ten: + assert r_out.requires_grad == fake_out.requires_grad, ( + f"{context} mismatched requires_grad-ness of outputs. " + f"This usually means that you have added autograd support " + f"for your operator at a dispatch key other than Autograd, " + f"which will lead to problems" + ) + if torch._C._has_storage(r_out): + r_offset = r_out.storage_offset() + f_offset = fake_out.storage_offset() + assert ( + r_offset == f_offset + ), f"{context} mismatched storage offset" + + try: + torch._prims.utils.compare_tensor_meta( + r_out, + fake_out, + check_strides=self.check_strides, + allow_rhs_unbacked=True, + ) + except Exception as e: + if is_sdpa_error(func, idx, e): + continue + error_message = ( + f"{context} mismatched tensor metadata: {e}" + if len(r_flat) == 1 + else f"{context} mismatched tensor metadata for output[{idx}]: {e}" + ) + raise RuntimeError(error_message) from e + return r diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/functional_tensor.py b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/functional_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..375def672fb442306dd736de3a6163c8f4dad656 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/functional_tensor.py @@ -0,0 +1,552 @@ +import contextlib +from abc import ABC, abstractmethod +from typing import Any, Callable, ContextManager, Tuple + +import torch +import torch.utils._pytree as pytree +from torch._C import _functionalization_reapply_views_tls as _reapply_views +from torch.utils._python_dispatch import return_and_correct_aliasing, TorchDispatchMode + +not_implemented_log = torch._logging.getArtifactLogger(__name__, "not_implemented") + + +class FunctionalTensor(torch.Tensor): + """ + Functional tensors represent tensors that will remove mutations + from a program. If you perform a mutable operation on a functional tensor, + it will re-dispatch to the functional variant of that operation. + + Historically, functionalization is implemented in C++ in the dispatcher. + This class is a lightweight python shim around the C++ functionalization logic. + + FunctionalTensor is required to be used with a corresponding + FunctionalTensormode active, because it relies + on using the mode for dispatch (which can properly handle factory functions). + """ + + elem: torch.Tensor + # Indicates to our torch_dispatch dispatching infra that + # this is an "infra" mode with lower dispatching precedence. + _mode_key = torch._C._TorchDispatchModeKey.FUNCTIONAL + + # Note: The reason we add these extra keys to our FunctionalTensor subclass + # is to mirror the behavior of C++ functionalization (we can choose to change this + # later, as long as it doesn't break anything). + # FunctionalTensorWrapper copies **all** dispatch keys from the inner tensor + # to the wrapper, excluding functorch and python dispatch keys. + # Here I'm trying to re-use the keyset the functorch wrapper subclasses copy, + # except that they don't include ZeroTensor so I'm manually adding it in. + _extra_dispatch_keys = torch._C._additional_keys_to_prop_for_wrapper_tensors.add( + torch._C.DispatchKey.ZeroTensor + ) + + # These are all aten ops that correspond to metadata queries. + # We want FunctionalTensor to be able to handle them directly. + metadata_fns = [ + torch.ops.aten.is_contiguous.default, # type: ignore[has-type] + torch.ops.aten.is_contiguous.memory_format, # type: ignore[has-type] + torch.ops.aten.is_strides_like_format.default, # type: ignore[has-type] + torch.ops.aten.is_non_overlapping_and_dense.default, # type: ignore[has-type] + torch.ops.aten.size.default, # type: ignore[has-type] + torch.ops.aten.sym_size.default, # type: ignore[has-type] + torch.ops.aten.stride.default, # type: ignore[has-type] + torch.ops.aten.sym_stride.default, # type: ignore[has-type] + torch.ops.aten.storage_offset.default, # type: ignore[has-type] + torch.ops.aten.sym_storage_offset.default, # type: ignore[has-type] + torch.ops.aten.numel.default, # type: ignore[has-type] + torch.ops.aten.sym_numel.default, # type: ignore[has-type] + torch.ops.aten.dim.default, # type: ignore[has-type] + ] + + def __new__(cls, elem): + assert torch._is_functional_tensor(elem) + + # In general, we'd like our functional tensor subclass to only be in charge of functionalization, + # and defer to the inner subclass for all other functionality. + # Example: If our inner tensor is a ZeroTensor, we would want to defer running the ZeroTensor fallback + # until after we redispatch to our inner ZeroTensor. + # However, there are a few keys that we need to mirror between the inner and outer tensors. + # Conjugate + # Negative + # Why? These keys are used to test metadata queries, like `.is_conj()` and `.is_neg()`. + # We **need** calls to is_conj() to return the same thing on the outer and inner tensors, + # Because user code / framework code that branches like so needs to do the same thing + # when it sees the outer FunctionalTensor: + # if (x.is_conj()) { + # return at::view_as_real(x.resolve_conj()); + # } else { + # return at::view_as_real(x); + # } + extra_dispatch_keys = ( + FunctionalTensor._extra_dispatch_keys & torch._C._dispatch_keys(elem) + ) + + out = torch.Tensor._make_wrapper_subclass( # type: ignore[arg-type, attr-defined] + # TODO: right now, _make_wrapper_subclass's dynamic shape interaction is not great. + # Calling the overload that has kwargs causes us to go down the first overload path, + # which will **always** specialize sizes. + # We should probably eventually fix this so that the first overload can just handle dynamic shapes. + cls, + elem.shape, # sizes + elem.stride(), # strides + elem.storage_offset(), # storage_offset + None, # memory_format + elem.dtype, # dtype + elem.layout, # layout + elem.device, # device + False, # pin_memory + elem.requires_grad, # requires_grad + "sizes", # dispatch_sizes_strides_policy + False, # dispatch_device + False, # dispatch_layout + extra_dispatch_keys, # _extra_dispatch_keys + ) + out.elem = elem + return out + + # Need to disable default torch_function. Why? + # Default torch_function will always wrap outputs into a subclass if they aren't already a subclass. + # We actually.. don't want to do this sometimes, see Note [FunctionalTensorMode inputs are sometimes plain tensors] + __torch_function__ = torch._C._disabled_torch_function_impl + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + unrecognized_types = [ + t + for t in types + if t not in [torch.Tensor, torch._subclasses.FakeTensor, FunctionalTensor] + ] + if unrecognized_types: + not_implemented_log.debug( + "FunctionalTensor unrecognized subclass(es): %s", unrecognized_types + ) + return NotImplemented + + if kwargs is None: + kwargs = {} + + # FunctionalTensor needs to plumb all metadata requests to the inner tensor. + # In theory we don't have to do this - but if we want to service metadata requests here, + # we need to carefully make sure all metadata is accurate (including metadata mutations) + if func in FunctionalTensor.metadata_fns: + + def unwrap(x): + return x.elem + + assert len(args) == 1 and isinstance(args[0], FunctionalTensor) + assert len(kwargs) == 0 + # All metadata accesses should be plumbed to the inner tensor, that way we don't have to worry + # about the problem of keeping metadata in sync between the wrapper and inner tensor. + # This also alleviates us from having to manually handle metadata mutations on the wrapper. + return func(args[0].elem) + # Originally I tried to implement my subclass without giving it a torch_dispatch, but I gave up: + # - _make_wrapper_subclass requires a __torch_dispatch__ + # - If we want to use _make_subclass(), we have a problem: the subclass will share a TensorImpl with the inner tensor, + # which is of type FunctionalTensorWrapper! We explicitly do not want our wrapper to be a FunctionalTensorWrapper. + # - If we use the default tensor.__new__(), we have another problem: it returns inner_tensor.alias(), + # which causes every subclass created above autograd to have autograd view metadata + # (in addition to also being a FunctionalTensorWrapper). + raise RuntimeError( + "Attempting to use FunctionalTensor on its own. Instead, please use it with a corresponding FunctionalTensorMode()" + ) + + def __repr__(self): + return f"FunctionalTensor({repr(self.elem)})" + + @staticmethod + def to_functional(x): + # We will do the wrapping for the user. + assert not torch._is_functional_tensor(x) + # The only autograd metadata we care about on the FunctionalTensor is: + # - requires_grad (so autograd runs) + # - is_leaf (so that mutations on graph inputs that are not leaves are allowed by the autograd engine) + # this is handled by FunctionalTensor.to_functional + x_functional = torch._to_functional_tensor(x) + # Technically the FunctionalTensormode here is unnecessary, + # but it avoids spurious NotImplemented logs during `ProxyTorchDispatchMode` tracing. + # _mirror_autograd_meta_to queries tensor sizes, + # and otherwise the sym_size() call will go to the proxy mode before hitting + # FunctionalTensor.__torch_dispatch__ + with FunctionalTensorMode(): + torch._mirror_autograd_meta_to(x, x_functional) # type: ignore[attr-defined] + out = FunctionalTensor(x_functional) + torch._mirror_autograd_meta_to(x_functional, out) # type: ignore[attr-defined] + return out + + def from_functional(self): + torch._sync(self) + return torch._from_functional_tensor(self.elem) + + def replace_(self, output) -> None: + torch._functionalize_replace(self.elem, output) + + def commit_update(self) -> None: + torch._functionalize_commit_update(self.elem) + + def sync(self) -> None: + torch._functionalize_sync(self.elem) + + def mark_mutation_hidden_from_autograd(self) -> None: + torch._functionalize_mark_mutation_hidden_from_autograd(self.elem) + + +class FunctionalTensorMode(TorchDispatchMode): + def __init__(self): + self.is_on_stack = False + self.enter_stack = [] + # Indicates to our torch_dispatch dispatching infra that + # this is an "infra" mode with lower dispatching precedence. + self._mode_key = torch._C._TorchDispatchModeKey.FUNCTIONAL + # This will be turned off later for pre-dispatch functionalization + self.decompose_composite_implicit_ops = True + + # No-op if FunctionalTensorMode is already in use + def __enter__(self): + if ( + torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.FUNCTIONAL) + is None + ): + self.enter_stack.append(True) + + return super().__enter__() + else: + self.enter_stack.append(False) + return self + + def __exit__(self, a, b, c): + is_on_stack = self.enter_stack.pop() + if is_on_stack: + super().__exit__(a, b, c) + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + if kwargs is None: + kwargs = {} + + unrecognized_types = [ + t + for t in types + if not issubclass(t, torch._subclasses.FakeTensor) + and t not in [torch.Tensor, FunctionalTensor] + ] + if unrecognized_types: + not_implemented_log.debug( + "FunctionalTensor unrecognized subclass(es): %s", unrecognized_types + ) + return NotImplemented + + if ( + func not in FunctionalTensor.metadata_fns + and self.decompose_composite_implicit_ops + # Not all funcs from __torch_dispatch__ are actual dispatcher ops, + # e.g. prim.device + and torch._C._dispatch_has_kernel(func.name()) + ): + with self: + # Decomposes CompositeImplicitAutograd ops + r = func.decompose(*args, **kwargs) + if r is not NotImplemented: + return r + + def assert_is_functional(x): + assert torch._is_functional_tensor(x) + + def wrap(x): + # Only wrap our outputs in subclasses if the inner functionalization call + # also wrapped outputs into FunctionalTensorWrappers. + # When can this happen? e.g. `torch.div(2, 2)` + assert not isinstance(x, FunctionalTensor) + if isinstance(x, torch.Tensor) and torch._is_functional_tensor(x): + return FunctionalTensor(x) + return x + + any_functional_inputs = False + + def unwrap(x): + any_functional_inputs = True + return x.elem + + from torch._higher_order_ops.auto_functionalize import ( + can_auto_functionalize, + do_auto_functionalize, + ) + + if can_auto_functionalize( + func + ) and not torch._C._dispatch_has_kernel_for_dispatch_key( + func.name(), torch._C.DispatchKey.Functionalize + ): + return do_auto_functionalize(func, args, kwargs) + + args_unwrapped, kwargs_unwrapped = pytree.tree_map_only( + FunctionalTensor, unwrap, (args, kwargs) + ) + + # Expectation: functionalization should not **already** be enabled above our mode. + # Why would that be bad? when we return a FunctionalTensor here, we don't want functionalization + # to run above this mode and further wrap that output in **another** C++ FunctionalTensorWrapper. + is_included = torch._C._dispatch_tls_is_dispatch_key_included( + torch._C.DispatchKey.Functionalize + ) + is_excluded = torch._C._dispatch_tls_is_dispatch_key_excluded( + torch._C.DispatchKey.Functionalize + ) + assert is_excluded or not is_included + include_to_set = ( + torch._C._dispatch_tls_local_include_set() + | torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize) + ) + exclude_to_set = ( + torch._C._dispatch_tls_local_exclude_set().remove( + torch._C.DispatchKey.Functionalize + ) + - FunctionalTensor._extra_dispatch_keys + ) + # All we want to do here is re-use the existing C++ functionalization logic. + # This requires swizzling our TLS dispatch keys so that the Functionalize key is active. + with torch._C._ForceDispatchKeyGuard(include_to_set, exclude_to_set): + try: + # By default for python functionalization (for AOTAutograd), we reapply views. + old_apply_views = torch._functionalize_enable_reapply_views(True) # type: ignore[attr-defined] + outs_unwrapped = func(*args_unwrapped, **kwargs_unwrapped) + outs_wrapped = pytree.tree_map_only(torch.Tensor, wrap, outs_unwrapped) + finally: + torch._disable_functionalization() + torch._functionalize_enable_reapply_views(old_apply_views) # type: ignore[attr-defined] + + is_included = torch._C._dispatch_tls_is_dispatch_key_included( + torch._C.DispatchKey.Functionalize + ) + is_excluded = torch._C._dispatch_tls_is_dispatch_key_excluded( + torch._C.DispatchKey.Functionalize + ) + assert is_excluded or not is_included + + if ( + # If no outputs are our functional subclass, then don't try to fix up aliasing + not any( + isinstance(x, FunctionalTensor) + for x in pytree.tree_leaves(outs_wrapped) + ) + # Since lift_fresh lifts its argument into a functional tensor, we can skip the + # aliasing correction step. Otherwise, we would be setting the storage of a + # lifted tensor to that of an unlifted tensor. + # Ref: https://github.com/pytorch/pytorch/issues/111506 + or func == torch.ops.aten.lift_fresh.default + ): + return outs_wrapped + # Wrapper tensor subclasses do not have correct aliasing info! Use this util to manually correct the output aliasing. + # inplace ops like `aten.add_()` are expected to return inputs **directly**, instead of creating fresh tensor objects. + # Use this util to figure out the right thing to return. + # If none of our inputs were wrapped, then we have no FunctionalTensor outputs that we need to fix up storages for. + return return_and_correct_aliasing(func, args, kwargs, outs_wrapped) + + +@contextlib.contextmanager +def maybe_disable_functional_mode(): + maybe_func_mode = torch._C._unset_dispatch_mode( + torch._C._TorchDispatchModeKey.FUNCTIONAL + ) + try: + yield + finally: + if maybe_func_mode is not None: + torch._C._set_dispatch_mode(maybe_func_mode) + + +# TODO: clean up the redundancy here, +# unify on a single context manager for all mode keys. +@contextlib.contextmanager +def unset_functional_temporarily(): + old = torch._C._unset_dispatch_mode(torch._C._TorchDispatchModeKey.FUNCTIONAL) + try: + yield old + finally: + if old is not None: + torch._C._set_dispatch_mode(old) + + +# This is similar to torch.func.functionalize, but: +# - It uses FunctionalTensorMode, and FunctionalTensor (a python subclass). +# One important advantage to using this mode is that it will let us +# run functionalization underneath __torch_dispatch__, +# which we need in AOTAutograd. +# - Doing so means that it does not automatically compose with other +# functorch transforms, since these transforms always run above __torch_dispatch__. +# That's why this util lives here, and not in functorch. +def dispatch_functionalize(func): + # TODO: pull these from aot autograd + def to_fun(t): + if isinstance(t, torch.Tensor): + return FunctionalTensor.to_functional(t) + return t + + def from_fun(t): + if not isinstance(t, FunctionalTensor): + # quick sanity assert + if isinstance(t, torch.Tensor): + assert not torch._is_functional_tensor(t) + return t + torch._sync(t) + return torch._from_functional_tensor(t.elem) + + def inner(*args, **kwargs): + func_args = pytree.tree_map_only(torch.Tensor, to_fun, args) + func_kwargs = pytree.tree_map_only(torch.Tensor, to_fun, kwargs) + + flattened_wrapped_args = pytree.arg_tree_leaves(*func_args) + flattened_wrapped_kwargs = pytree.arg_tree_leaves(**func_kwargs) + + disable_above = torch._C._ExcludeDispatchKeyGuard( + torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize) + ) + with disable_above, FunctionalTensorMode(): + func_outputs = func(*func_args, **func_kwargs) + outputs = pytree.tree_map_only(FunctionalTensor, from_fun, func_outputs) + + return outputs + + return inner + + +class BaseFunctionalizeAPI(ABC): + @abstractmethod + def wrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]: + pass + + @abstractmethod + def unwrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]: + pass + + @abstractmethod + def functionalize(self, inner_f: Callable) -> Callable: + pass + + @abstractmethod + def redispatch_to_next(self) -> ContextManager: + pass + + @abstractmethod + def replace(self, input_tensor, output_tensor) -> None: + pass + + @abstractmethod + def commit_update(self, tensor) -> None: + pass + + @abstractmethod + def sync(self, tensor) -> None: + pass + + @abstractmethod + def mark_mutation_hidden_from_autograd(self, tensor) -> None: + pass + + +class PythonFunctionalizeAPI(BaseFunctionalizeAPI): + def wrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]: + return torch.utils._pytree.tree_map_only( + FunctionalTensor, FunctionalTensor.to_functional, args + ) + + def unwrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]: + return torch.utils._pytree.tree_map_only( + FunctionalTensor, FunctionalTensor.from_functional, args + ) + + def functionalize(self, inner_f: Callable) -> Callable: + return dispatch_functionalize(inner_f) + + def redispatch_to_next(self) -> ContextManager: + return unset_functional_temporarily() + + def replace(self, input_tensor, output_tensor) -> None: + assert isinstance(input_tensor, FunctionalTensor) + assert not isinstance(output_tensor, FunctionalTensor) + input_tensor.replace_(output_tensor) + + def commit_update(self, tensor) -> None: + assert isinstance(tensor, FunctionalTensor) + tensor.commit_update() + + def sync(self, tensor) -> None: + assert isinstance(tensor, FunctionalTensor) + tensor.sync() + + def mark_mutation_hidden_from_autograd(self, tensor) -> None: + assert isinstance(tensor, FunctionalTensor) + tensor.mark_mutation_hidden_from_autograd() + + +class CppFunctionalizeAPI(BaseFunctionalizeAPI): + def wrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]: + from torch._functorch.eager_transforms import _wrap_all_tensors_to_functional + + return _wrap_all_tensors_to_functional(args, level=0) + + def unwrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]: + from torch._functorch.eager_transforms import ( + _unwrap_all_tensors_from_functional, + ) + + return _unwrap_all_tensors_from_functional(args, reapply_views=_reapply_views()) + + def functionalize(self, inner_f: Callable) -> Callable: + return torch.func.functionalize(inner_f) + + def redispatch_to_next(self) -> ContextManager: + return torch._C._ExcludeDispatchKeyGuard( + torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize) + ) + + def replace(self, input_tensor, output_tensor) -> None: + torch._functionalize_replace(input_tensor, output_tensor) + + def commit_update(self, tensor) -> None: + torch._functionalize_commit_update(tensor) + + def sync(self, tensor) -> None: + torch._functionalize_sync(tensor) + + def mark_mutation_hidden_from_autograd(self, tensor) -> None: + torch._functionalize_mark_mutation_hidden_from_autograd(tensor) + + +class FunctorchFunctionalizeAPI(BaseFunctionalizeAPI): + def __init__(self, interpreter): + self.interpreter = interpreter + + def wrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]: + from torch._functorch.eager_transforms import _wrap_all_tensors_to_functional + + return _wrap_all_tensors_to_functional(args, level=self.interpreter.level()) + + def unwrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]: + from torch._functorch.eager_transforms import ( + _unwrap_all_tensors_from_functional, + ) + + return _unwrap_all_tensors_from_functional( + args, reapply_views=self.interpreter.functionalize_add_back_views() + ) + + def functionalize(self, inner_f: Callable) -> Callable: + return torch.func.functionalize( + inner_f, + remove="mutations_and_views" + if self.interpreter.functionalize_add_back_views() + else "mutations", + ) + + def redispatch_to_next(self) -> ContextManager: + return self.interpreter.lower() + + def replace(self, input_tensor, output_tensor) -> None: + torch._functionalize_replace(input_tensor, output_tensor) + + def commit_update(self, tensor) -> None: + torch._functionalize_commit_update(tensor) + + def sync(self, tensor) -> None: + torch._functionalize_sync(tensor) + + def mark_mutation_hidden_from_autograd(self, tensor) -> None: + torch._functionalize_mark_mutation_hidden_from_autograd(tensor) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/meta_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/meta_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8db8f94b1b4195cd1be7f5a8a502bcbfe4d3994d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/meta_utils.py @@ -0,0 +1,730 @@ +import contextlib +import warnings +import weakref +from typing import ContextManager, List, Optional, Tuple, TYPE_CHECKING + +import torch +from torch._C._functorch import ( + _unwrap_functional_tensor, + _wrap_functional_tensor, + current_level, + peek_interpreter_stack, + TransformType, +) +from torch._guards import Source + +from torch.multiprocessing.reductions import StorageWeakRef +from torch.utils._python_dispatch import ( + is_traceable_wrapper_subclass, + transform_subclass, +) +from torch.utils.weak import WeakIdRef + +if TYPE_CHECKING: + # Import the following modules during type checking to enable code intelligence features, + # Do not import unconditionally, as they import sympy and importing sympy is very slow + from torch.fx.experimental.symbolic_shapes import SymbolicContext + +DimList = List + + +def safe_is_leaf(t): + try: + return t.is_leaf + except RuntimeError: + # inference mode can trigger this + return False + + +def safe_grad(t): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "The .grad attribute of a Tensor") + return t.grad + + +def assert_eq(a, b): + assert a == b, f"{a} != {b}" + + +def assert_metadata_eq(assert_eq, m1, m2, *, skip_symbolic=False): + def go(m1, m2): + assert_eq(m1.dtype, m2.dtype) + if not skip_symbolic: + assert_eq(m1.shape, m2.shape) + assert_eq(m1.requires_grad, m2.requires_grad) + assert_eq(m1.is_leaf, m2.is_leaf) + assert_eq(m1.grad_fn is None, m2.grad_fn is None) + assert_eq(m1.is_sparse, m2.is_sparse) + assert_eq(m1.is_inference(), m2.is_inference()) + assert_eq(m1.is_conj(), m2.is_conj()) + assert_eq(m1.is_neg(), m2.is_neg()) + assert_eq(safe_grad(m1) is not None, safe_grad(m2) is not None) + if safe_grad(m1) is not None: + go(safe_grad(m1), safe_grad(m2)) + if m1.is_sparse: + assert_eq(m1.dense_dim(), m2.dense_dim()) + assert_eq(m1.sparse_dim(), m2.sparse_dim()) + assert_eq(m1.is_coalesced(), m2.is_coalesced()) + else: + if not skip_symbolic: + assert_eq(m1.stride(), m2.stride()) + assert_eq(m1.storage_offset(), m2.storage_offset()) + assert_eq(m1._is_view(), m2._is_view()) + if m1._is_view(): + go(m1._base, m2._base) + # TODO: test if is resizable (no direct query for this atm) + # TODO: audit AutogradMeta to see if it matches + # TODO: test forward AD + + return go(m1, m2) + + +# This is a class for converting multiple tensors into meta tensors which +# share the same view/storage structure. The operation model is you allocate +# one of these, and then call it repeatedly on all the tensors you want to +# convert. It's important to use the same object for tensors you want to +# share storage because this is how we correlate shared storages to the same +# meta storages. This class will hold weak references to cached tenosrs +# and tensor storages. +class MetaConverter: + def __init__(self): + self.storage_memo = {} + self.tensor_memo: weakref.WeakValueDictionary = weakref.WeakValueDictionary() + self.maybe_storages_to_delete = [] + self.check_expired_frequency = 128 + self.check_expired_count = 0 + self.hit = 0 + self.miss = 0 + self.del_hook = None + self.arg_cnt = 0 + + def successful(self): + return self.hit > 0 and self.miss == 0 + + def check_for_expired_weak_storages(self): + new_li = [] + stor_to_delete = [] + for obj in self.maybe_storages_to_delete: + if not obj.expired(): + new_li.append(obj) + else: + stor_to_delete.append(obj) + for obj in stor_to_delete: + self.storage_memo.pop(obj, None) + self.maybe_storages_to_delete = new_li + + # if for some reason we have aquired many storages which have not expired + # even though a tensor with their storage has expired (aliasing or otherwise) + # check for expired storages less often so as to bound the amount of work we + # do checking for expired storages + self.check_expired_frequency = max( + self.check_expired_frequency, len(self.maybe_storages_to_delete) + ) + + def get_tensor_memo(self, t): + return self.tensor_memo.get(WeakIdRef(t), None) + + def set_tensor_memo(self, t, v): + # hold a weak ref to self, otherwise it will be kept alive + # by the del_ten closure + self_weak_ref = weakref.ref(self) + if t.is_sparse or t.is_mkldnn: + weak_st = None + else: + weak_st = StorageWeakRef(t._typed_storage()) + tensor_ref_key = WeakIdRef(t) + + def del_ten(): + # tensor outlives the converter + self_ref = self_weak_ref() + if self_ref is None: + return + # on shutdown, tensor_ref_key may not be in memo + self_ref.tensor_memo.pop(tensor_ref_key, None) + if weak_st and weak_st.expired(): + self_ref.storage_memo.pop(weak_st, None) + elif weak_st is not None: + # [expired-storages] + # NB: even though the tensor has died, + # the deallocation of its storage can take longer, + # even when the storage has no other uses/views. + # In this case, the StorageWeakRef object will be kept alive + # longer than it needs to be, however the storage itself + # will be deallocated. We retain the possibly dead storages + # and periodically check if any of them are expired and + # can be freed. + self_ref.maybe_storages_to_delete.append(weak_st) + + weakref.finalize(t, del_ten) + self.tensor_memo[tensor_ref_key] = v + + # NB: doesn't actually return a storage, because meta storage is + # not supported + def meta_storage(self, s, callback): + # NB: TypedStorage is freshly allocated and cannot be used as hash + # key index. + + # Use a Weak Ref to s in order to not leak memory + swr = StorageWeakRef(s) + if swr not in self.storage_memo: + self.storage_memo[swr] = callback( + lambda: torch.empty(s.size(), dtype=torch.uint8, device="meta") + ).untyped_storage() + return self.storage_memo[swr] + + # This function assumes that it's possible to do the conversion + # NB: name here is used in a conventional way by Dynamo; it corresponds + # precisely to the Source.name() of the tensor we're fakeifying and + # corresponds to a valid Python expression. When we construct sub-names + # as part of this process, we will maintain this invariant! (Even though + # other users of this may not need it this property to be upheld.) + def meta_tensor( + self, + t, + shape_env=None, + callback=lambda t: t(), + source: Optional[Source] = None, + symbolic_context: Optional["SymbolicContext"] = None, + ): + from torch._subclasses.fake_tensor import FakeTensor + + if source is None: + from torch._dynamo.source import ConstantSource + + # TODO: make a dedicated UnknownSource for this? + source = ConstantSource( + f"__meta_utils_unknown_tensor{len(self.tensor_memo)}" + ) + + # This indicates you set no_dispatch() before calling into this + # function. This is an error: we may be creating fake tensors and + # will perform operations on them which need fake tensor mode to + # be active. You will segfault if you are in a no_dispatch() block. + assert not torch._C._dispatch_tls_local_exclude_set().has( + torch._C.DispatchKey.Python + ) + arg_cnt = self.arg_cnt + self.arg_cnt += 1 + + # When we make as_strided calls, we end up generating a guard + # that the new as_strided tensor is in bounds for the old storage + # for the base (since as_strided calls can "bust" out of their + # bounding box.) This guard is unnecessary: if a user is able + # to provide us a tensor with the view base setup this way, we + # don't need to produce a guard, because the fact that they + # were able to produce the view base means its in bounds. + # + # Now, ordinarily, this guard would be harmless. However, the + # generated guard refers to variables bound on the base variable. + # At the moment, Dynamo doesn't actually guard on x._base, because + # according to Voz this results in a lot of spurious invalidations, + # and also if the user doesn't directly make use of _base, its + # pointless anyway (because programs should be parametric over + # whether or not the input tensor is a view or not--unless you're + # mutating the input, but that's a whole 'nother ballgame). So + # for expediency, we suppress these guards so we don't have to + # deal with this (yet, anyway.) + # + # NB: An old version of this code suppressed guards for ALL operations + # happening during meta conversion, not just as_strided calls. + # This is too aggressive: we do duck sizing and 0/1 simplification + # as we allocate variables, and we do need to register guards for + # these cases. + maybe_suppress = contextlib.nullcontext + if shape_env is not None: + maybe_suppress = shape_env.suppress_guards + + def sym_sizes_strides_storage_offset( + t, src + ) -> Tuple[Tuple[int, ...], Tuple[int, ...], int]: + if shape_env is not None: + if isinstance(t, FakeTensor) and t.fake_mode.shape_env is shape_env: + # Don't reallocate the sizes; the shape envs are the same, + # so reuse the old sizes/strides/etc + return (t.size(), t.stride(), t.storage_offset()) + else: + return shape_env.create_symbolic_sizes_strides_storage_offset( + t, + src, + # Assume that the set of dims that are dynamic are the same between + # the wrapper tensor and any inner tensors. + # We can revisit this if this assumption does not hold + # for any important subclasses later. + symbolic_context=symbolic_context, + ) + else: + assert symbolic_context is None + return (t.size(), t.stride(), t.storage_offset()) + + # see expired-storages + self.check_expired_count += 1 + if self.check_expired_count >= self.check_expired_frequency: + self.check_for_expired_weak_storages() + self.check_expired_count = 0 + + if self.get_tensor_memo(t) is None: + with torch.inference_mode(t.is_inference()): + if t.is_sparse: + is_leaf = safe_is_leaf(t) + r = callback( + lambda: torch.ops.aten._sparse_coo_tensor_with_dims( + t.sparse_dim(), + t.dense_dim(), + t.shape, + dtype=t.dtype, + layout=torch.sparse_coo, + device="meta", + ) + ) + assert safe_is_leaf(r), "the callback you passed in doesn't detach" + # Note [is_coalesced is dispatched] + # Strangely enough, is_coalesced() is a dispatched operator, + # which means that it will get caught by fake tensor mode. + # Ordinarily this would error, but there's some logic in + # fake tensor ensure this doesn't happen. + r._coalesced_(t.is_coalesced()) + if t.requires_grad: + r.requires_grad = True + if t.requires_grad and not is_leaf: + with torch.enable_grad(): + r = r.clone() + r._coalesced_(t.is_coalesced()) + elif t.is_mkldnn: + is_leaf = safe_is_leaf(t) + sizes, strides, _storage_offset = sym_sizes_strides_storage_offset( + t, source + ) + r = callback( + lambda: torch.empty_strided( + sizes, strides, dtype=t.dtype, device="meta" + ) + ) + assert safe_is_leaf(r), "the callback you passed in doesn't detach" + if t.requires_grad: + r.requires_grad = True + if t.requires_grad and not is_leaf: + with torch.enable_grad(): + r = r.clone() + elif t._is_view(): + # Construct views in two steps: recursively meta-fy their + # base, and then create view(s) off that. NB: doing it + # directly from storage is WRONG because this won't cause + # version counters to get shared. + assert t._is_view() + + from torch._dynamo.source import AttrSource + from torch.fx.experimental.symbolic_shapes import ( + DimDynamic, + StatelessSymbolicContext, + ) + + if shape_env and not t.is_nested and not t._base.is_nested: + base_symbolic_context = StatelessSymbolicContext( + dynamic_sizes=[DimDynamic.STATIC] * t._base.dim(), + constraint_sizes=[None] * t._base.dim(), + ) + else: + base_symbolic_context = None + base = self.meta_tensor( + t._base, + shape_env, + callback, + source=AttrSource(source, "_base"), + symbolic_context=base_symbolic_context, + ) + + def is_c_of_r(complex_dtype, real_dtype): + return ( + utils.is_complex_dtype(complex_dtype) + and utils.corresponding_real_dtype(complex_dtype) + == real_dtype + ) + + # In some situations, MetaConverter may be called in a + # context where autograd is disabled. For the _is_view + # assert to pass, we have to setup the autograd view + # metadata anyway. Do this by reenabling the + # ADInplaceOrView key. This is kind of a hack. + old_exclude = torch._C._dispatch_tls_is_dispatch_key_excluded( + torch._C.DispatchKey.ADInplaceOrView + ) + torch._C._dispatch_tls_set_dispatch_key_excluded( + torch._C.DispatchKey.ADInplaceOrView, False + ) + try: + if base.dtype == t.dtype: + pass + elif is_c_of_r(base.dtype, t.dtype): + base = torch.view_as_real(base) + elif is_c_of_r(t.dtype, base.dtype): + base = torch.view_as_complex(base) + else: + # This is not guaranteed to succeed. If it fails, it + # means there is another dtype-converting view function + # that hasn't been handled here + base = base.view(t.dtype) + + # This is very tricky. Naively, you might expect this + # to hold: + # + # if t.requires_grad and not safe_is_leaf(t) + # assert t._base.requires_grad + # + # But it's not true! As you can see in the following + # program: + # + # x = torch.zeros(4) + # y = x.view(1, 4) + # y.requires_grad = True + # z = y.view(1, 1, 4) + # assert z._base is x + # + # So we may have to do *two* views out of the base to + # recreate this situation. + def _view_from_base(base, t): + if t.is_nested: + # Nested tensors do not support as_strided, and + # hence,always have _view_func available. + # + # The unsafe version of _view_func omits + # checking whether the base passed in has the same + # metadata as the original base the view_func + # was originally executed with. (1) It is OK here, + # because we're calling it on the meta-ified base, + # so the metadata is guaranteed to be the same. + # (2) It is necessary because we don't actually + # want to guard on the base's metadata here. + return t._view_func_unsafe(base) + else: + ( + sizes, + strides, + storage_offset, + ) = sym_sizes_strides_storage_offset(t, source) + return base.as_strided(sizes, strides, storage_offset) + + if safe_is_leaf(t): + # Leaf views that track view metadata are created by + # creating a view inside a no_grad block + with torch.no_grad(), maybe_suppress(): + r = _view_from_base(base, t) + # As it's a leaf, we can directly assign requires_grad + r.requires_grad = t.requires_grad + else: + if t._base.requires_grad == t.requires_grad: + # Easy case, just run the view op + with torch.enable_grad(), maybe_suppress(): + r = _view_from_base(base, t) + + # NB: We don't actaully faithfully replicate + # autograd connectivity, but that doesn't matter + # today. See following for more info: + # https://gist.github.com/soulitzer/e03f015b314c3f5fcf80888c69390913 + else: + # Obscure case. Create a leaf view and give it the + # correct requires_grad, then do the final view. + # NB: Can't have a non-leaf without requiring grad! + assert t.requires_grad + with torch.no_grad(): + mid = base.view(base.shape) + mid.requires_grad = t.requires_grad + with torch.enable_grad(), maybe_suppress(): + r = _view_from_base(mid, t) + # The CreationMeta influences whether or not inplace + # mutation is an error or not. So we need to make + # sure we properly propagate this as well. + torch._C._autograd._set_creation_meta( + r, torch._C._autograd._get_creation_meta(t) + ) + finally: + torch._C._dispatch_tls_set_dispatch_key_excluded( + torch._C.DispatchKey.ADInplaceOrView, old_exclude + ) + + else: + is_leaf = safe_is_leaf(t) + if not t.is_nested: + # Nested tensor subclasses have special logic for + # creating symbolic size/strides/storage_offset + ( + sizes, + strides, + storage_offset, + ) = sym_sizes_strides_storage_offset(t, source) + + def empty_create(inner_t, inner_src): + ( + inner_sizes, + inner_strides, + inner_storage_offset, + ) = sym_sizes_strides_storage_offset(inner_t, inner_src) + return torch.empty_strided( + inner_sizes, + inner_strides, + dtype=inner_t.dtype, + device="meta", + ) + + # If we have a subclass that desugars into dense tensors, + # perform our callback on each inner tensor. + if is_traceable_wrapper_subclass(t): + # Note: transform_subclass will use __tensor_unflatten__ to generate + # a fresh subclass wrapper, which is why sizes/strides are not passed in + # to the creation function here. + # We assume that if the inner tensors of the subclass are given symbolic sizes, + # their sizes will be used to construct the (symbolic) sizes of the wrapper tensor. + from torch._dynamo.source import AttrSource + + if t.is_nested: + # Avoid circular import + from torch._dynamo.source import ( + TensorProperty, + TensorPropertySource, + ) + + # For nested tensors, manually do transform_subclass + # so we can insert some special processing on ctx + attrs, ctx = t.__tensor_flatten__() + transformed_tensors_dict = {} + orig_shape_env = None + for attr in attrs: + inner_t = getattr(t, attr) + if orig_shape_env is None: + orig_shape_env = ( + inner_t.fake_mode.shape_env + if isinstance(inner_t, FakeTensor) + else None + ) + transformed_tensors_dict[attr] = callback( + lambda: empty_create( + inner_t, AttrSource(source, attr) + ) + ) + # We expect JaggedTensor to have a 'ragged_size' in + # its context + assert isinstance(ctx, dict) + assert "ragged_size" in ctx + assert isinstance(t._size[1], torch.SymInt) + if orig_shape_env is shape_env: + # It's already fake and the shape envs line up, reuse the old size + # Do not assert singleton_int; it may already + # be a variable + ctx["ragged_size"] = t._size[1] + else: + assert t._size[1].node.singleton_int() is not None + # Replace the eager ragged size with our freshly + # allocated jagged size that has a source + ctx["ragged_size"] = shape_env.create_symintnode( + shape_env.create_symbol( + t._size[1], + TensorPropertySource( + source, TensorProperty.SIZE, 1 + ), + ), + hint=t._size[1], + ) + r = type(t).__tensor_unflatten__( + transformed_tensors_dict, ctx + ) + else: + r = transform_subclass( + t, + lambda attr, inner_t: callback( + lambda: empty_create( + inner_t, + AttrSource(source, attr), + ) + ), + ) + else: + r = callback( + lambda: torch.empty_strided( + sizes, + strides, + dtype=t.dtype, + device="meta", + ) + ) + assert safe_is_leaf(r), "the callback you passed in doesn't detach" + if t.requires_grad: + r.requires_grad = t.requires_grad + if not is_leaf: + # Fake up some autograd history. + with torch.enable_grad(): + # preserve_format is the default, but we want to + # emphasize how important it is to preserve + # format here + r = r.clone(memory_format=torch.preserve_format) + + # Graph-Break for wrapped tensors + if torch._C._functorch.is_functorch_wrapped_tensor(t): + return NotImplemented + + s = t.untyped_storage() + swr = StorageWeakRef(s) + if swr not in self.storage_memo and ( + r.is_nested + or ( + r.stride() == strides + and r.storage_offset() == storage_offset + ) + ): + # You're normal and happy, install the fresh storage into the memo + self.storage_memo[swr] = r.untyped_storage() + else: + # You're in crazy town; somehow you gave us a tensor + # that wasn't a view, but had nonzero storage offset, + # nontrivial strides (such that clone() couldn't + # preserve them), or already aliases with another + # tensor's storage. The most typical way to end + # up here is with set_. So use set_ to bludgeon this + # in. + r_s = self.meta_storage(s, callback=callback) + # NB: In principle, this should always work, but there + # is some subtle difference in the autograd metadata + # that means we will backprop the set_ call, even if + # r is declared as an input to grad. + # See https://github.com/pytorch/pytorch/issues/87956 + # for the reproducer. + # NB: The in_kernel_invocation_manager here is necessary + # for fake tensor. If we run the set_ call with fake + # tensor on, r will improperly report that it is NOT a + # meta tensor but a cpu tensor, and then the set_ call + # will fail due to device mismatch. no_dispatch() is + # not enough, because the fake tensor will still claim + # to be a CPU tensor and you'll end up in the CPU + # kernel. Arguably this is a hack; a cleaner way to + # solve this is to have a FakeStorage concept which + # would report it's CPU device--no problem now! But + # this is difficult to do because we don't have storage + # subclasses. Relevant test is + # DynamicShapesFunctionTests::test_add_dynamic_shapes in + # test/dynamo/test_dynamic_shapes.py + maybe_fake_mgr: ContextManager[None] = contextlib.nullcontext() + from torch._subclasses.fake_tensor import ( + in_kernel_invocation_manager, + maybe_get_fake_mode, + ) + + mb_fake_mode = maybe_get_fake_mode(r) + if mb_fake_mode is not None: + maybe_fake_mgr = in_kernel_invocation_manager(mb_fake_mode) + with maybe_fake_mgr, torch.no_grad(): + r.set_(r_s, storage_offset, sizes, strides) + + if safe_grad(t) is not None: + from torch._dynamo.source import AttrSource + + r.grad = self.meta_tensor( + safe_grad(t), + shape_env, + callback, + source=AttrSource(source, "grad"), + symbolic_context=symbolic_context, + ) + torch._C._set_conj(r, t.is_conj()) + torch._C._set_neg(r, t.is_neg()) + # This can be skipped if necessary for performance reasons + assert_metadata_eq(assert_eq, t, r, skip_symbolic=True) + self.set_tensor_memo(t, r) + + return self.get_tensor_memo(t) + + def __call__( + self, + t, + shape_env=None, + *, + callback=lambda t: t(), + source=None, + symbolic_context=None, + ): + # TODO: zero tensors? We appear to have eliminated them by + # excluding complex for now + + if isinstance(t, torch.Tensor) or is_traceable_wrapper_subclass(t): + if t.device.type != "xla" and any( + [ + t.is_sparse_csr, + t.layout in [torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc], + t.is_quantized, + t._is_view() and t._base is not None and t._base.is_sparse, + torch._is_functional_tensor(t), + t.device.type in ("lazy"), + # We need a way to test if a tensor is batched but there + # is no official APi to do it + # torch._C._is_batched(t), + ] + ): + # TODO: sparse should support meta + # NB technically to('meta') does work but our logging + # instrumentation will see the meta conversions and the + # tests all break so we just exclude this. In any case + # the to conversion isn't really right anyhow. + + if torch._is_functional_tensor(t) and t.device.type != "lazy": + if t._is_view(): + raise RuntimeError( + "Cannot safely fakify a view because this process drops the view information right now." + ) + + st = peek_interpreter_stack() + assert ( + st is None or st.key() == TransformType.Functionalize + ), "Expect st to be either None or have Functionalize transform key." + if st is None: + # the case of AOTAutograd + torch._sync(t) + unwrap_t = torch._from_functional_tensor(t) + with torch._dispatch.python.suspend_functionalization(): + fake_t = self.meta_tensor( + unwrap_t, + shape_env=shape_env, + callback=callback, + source=source, + symbolic_context=symbolic_context, + ) + out = torch._to_functional_tensor(fake_t) + torch._mirror_autograd_meta_to(fake_t, out) + return out + else: + # torch.func.functionalize + reapply_views = torch._C._functionalization_reapply_views_tls() + unwrap_t = _unwrap_functional_tensor(t, reapply_views) + pop_st_ctx = ( + torch._functorch.pyfunctorch.temporarily_pop_interpreter_stack() + ) + with pop_st_ctx: + fake_t = self.meta_tensor( + unwrap_t, + shape_env=shape_env, + callback=callback, + source=source, + symbolic_context=symbolic_context, + ) + return _wrap_functional_tensor(fake_t, current_level()) + self.miss += 1 + return NotImplemented + else: + self.hit += 1 + r = self.meta_tensor( + t, + shape_env=shape_env, + callback=callback, + source=source, + symbolic_context=symbolic_context, + ) + if type(t) is torch.nn.Parameter: + # NB: Cannot directly use Parameter constructor + # because that would force a detach, not desirable + r._is_param = True + return r + elif torch.overrides.is_tensor_like(t): + self.miss += 1 + return NotImplemented + else: + # non-Tensor types don't count as hit or miss + return t + + +import torch._prims_common as utils diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/schema_check_mode.py b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/schema_check_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..411e81325aab450023855b454955bdc0f4ae0237 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/schema_check_mode.py @@ -0,0 +1,196 @@ +from collections import namedtuple +from copy import deepcopy +from itertools import combinations + +import torch +from torch.fx.operator_schemas import normalize_function +from torch.testing._internal.jit_utils import clone_inputs +from torch.utils import _pytree as pytree +from torch.utils._python_dispatch import TorchDispatchMode +from torch.utils._pytree import tree_map + +# Named Tuples used within SchemaCheckMode +Mutation = namedtuple("Mutation", ["op_name", "arg_name"]) +Aliasing = namedtuple("Aliasing", ["op_name", "arg_name", "output_number"]) + +# Simplified naming for C++ classes +SchemaArgument = torch._C._SchemaArgument +SchemaArgType = torch._C._SchemaArgType +SchemaInfo = torch._C._SchemaInfo + +# This TorchDispatchMode Subclass is used to verify op schemas +# This TorchDispatchMode Scubclass currently: +# - Records the called ops +# - Checks for mutations on all inputs +# - Checks for aliasing on all inputs + + +class SchemaCheckMode(TorchDispatchMode): + def __init__(self): + # Information recorded for testing purposes. For example: + # - incorrect schemas + # - overly conservative schemas + self.ops = [] + self.mutated = [] + self.aliasing = [] + + def reset_cache(self): + self.ops.clear() + self.mutated.clear() + self.aliasing.clear() + + def display_ops(self): + print(*self.ops, sep=",") + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + def bitwise_equal(lhs, rhs): + if lhs.is_quantized: + # TODO: This is only OK if can't have NaN quantized; idk if + # this is actually true + return torch.equal(lhs, rhs) + else: + return torch.allclose(lhs, rhs, equal_nan=True) + + def has_mutated(before, after, md): + are_tensors = type(before) == torch.Tensor and type(after) == torch.Tensor + if ( + are_tensors + and before.layout != torch.sparse_csr + and after.layout != torch.sparse_csr + ): + return not ( + before.size() == after.size() + and bitwise_equal(before, after) + and md[0] == after.stride() + and md[1] == after._typed_storage()._cdata + ) + return False + + def has_aliased(lhs, rhs): + try: + return torch._C._overlaps(lhs, rhs) + except Exception as exception: + if str(exception).startswith("Cannot inspect value of type "): + return False + else: + raise exception + + def standardize_name(name): + return name if name != "self" else "input" + + def unwrap(e): + if isinstance(e, torch.Tensor) and not type(e) == torch.Tensor: + try: + return e.elem + except AttributeError as t: + return e + return e + + def parse_metadata(e): + if isinstance(e, torch.Tensor): + if not type(e) == torch.Tensor: + try: + current = e.elem + return ( + deepcopy(current.stride()), + current._typed_storage()._cdata, + ) + except AttributeError as t: + return None + # Sparse CSR tensors do not have strides or storage + elif e.layout != torch.sparse_csr: + return (deepcopy(e.stride()), e._typed_storage()._cdata) + return None + + self.ops.append(func._schema.name) + + # Clone and process arguments and outputs + pre_arguments = normalize_function( + func, args, kwargs, normalize_to_only_use_kwargs=True + ).kwargs + + c_p_args = dict(zip(pre_arguments.keys(), clone_inputs(pre_arguments.values()))) + cloned_arguments = { + name: tree_map(unwrap, c_p_args.get(name)) for name in c_p_args + } + cloned_metadata = { + name: [ + parse_metadata(a) for a in pytree.tree_leaves(pre_arguments.get(name)) + ] + for name in pre_arguments + } + + out = func(*args, **kwargs) + arguments = { + name: tree_map(unwrap, pre_arguments.get(name)) for name in pre_arguments + } + tuple_out = out if isinstance(out, tuple) else (out,) + tuple_out = tree_map(unwrap, tuple_out) + + schema_info = SchemaInfo(func._schema) + schema_info.add_argument_values(pre_arguments) + + # Process arguments with outputs + for i in range(len(func._schema.arguments)): + arg = func._schema.arguments[i] + name = standardize_name(arg.name) + if arguments.get(name) is not None: + before = cloned_arguments.get(name) + md = cloned_metadata.get(name) + after = arguments.get(name) + for j in range(len(tuple_out)): + # aten::_unsafe_view is intended to have incorrect aliasing notation (hence unsafe) + unsafe_ops = ("aten::_unsafe_view", "aten::unsafe_split") + if ( + has_aliased(tuple_out[j], after) + and func._schema.name not in unsafe_ops + ): + if not schema_info.may_contain_alias( + SchemaArgument(SchemaArgType.output, j), + SchemaArgument(SchemaArgType.input, i), + ): + raise RuntimeError( + f"Argument {name} is not defined to alias output but was aliasing" + ) + else: + self.aliasing.append( + Aliasing(func._schema.name, name, f"output_{j}") + ) + if after is tuple_out[j] and isinstance(after, torch.Tensor): + # Only mutable ops e.g. (add_, add.out) are allowed to directly return inputs. + if not schema_info.is_mutable( + SchemaArgument(SchemaArgType.input, i) + ) and func not in [ + torch.ops.aten.lift.default, + torch.ops.aten.lift_fresh.default, + ]: + raise RuntimeError( + f"""\ +Dispatcher operators below autograd are not allowed to directly return inputs. +However, we found that `outputs[{str(j)}] is {name}""" + ) + if any( + has_mutated(a, b, c) + for a, b, c in zip( + pytree.tree_leaves(before), pytree.tree_leaves(after), md + ) + ): + if not schema_info.is_mutable( + SchemaArgument(SchemaArgType.input, i) + ): + raise RuntimeError( + f"Argument {name} is not defined as mutable but was mutated" + ) + else: + self.mutated.append(Mutation(func._schema.name, name)) + + # Aliasing between outputs + for i, j in combinations(range(len(func._schema.returns)), 2): + if has_aliased(tuple_out[i], tuple_out[j]): + if not schema_info.may_contain_alias( + SchemaArgument(SchemaArgType.output, i), + SchemaArgument(SchemaArgType.output, j), + ): + raise RuntimeError(f"Outputs {i} and {j} alias unexpectedly") + + return out diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/backends/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db25fac23891bff47edf02284d7bf9be9dae9001 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/backends/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/_coreml/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/backends/_coreml/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/_coreml/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/backends/_coreml/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3299ac8a6fd2168fa21aebd82fc9da037bd68e1a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/backends/_coreml/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/_coreml/__pycache__/preprocess.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/backends/_coreml/__pycache__/preprocess.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e8808a8981d8ddf14a18e98652fff02980a3c38 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/backends/_coreml/__pycache__/preprocess.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/_coreml/preprocess.py b/env-llmeval/lib/python3.10/site-packages/torch/backends/_coreml/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..f393929bb7c2bbca028d43d8bf1ef27af7ccb23f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/backends/_coreml/preprocess.py @@ -0,0 +1,146 @@ +import hashlib +import json +from typing import Dict, Tuple + +import coremltools as ct # type: ignore[import] +from coremltools.converters.mil.input_types import TensorType # type: ignore[import] +from coremltools.converters.mil.mil import types # type: ignore[import] +from coremltools.models.neural_network import quantization_utils # type: ignore[import] + +import torch + +CT_METADATA_VERSION = "com.github.apple.coremltools.version" +CT_METADATA_SOURCE = "com.github.apple.coremltools.source" + + +class ScalarType: + Float = 0 + Double = 1 + Int = 2 + Long = 3 + Undefined = 4 + + +# Supported Tensor types in coremltools: +# https://github.com/apple/coremltools/blob/main/coremltools/converters/mil/frontend/torch/converter.py#L28 +torch_to_mil_types = { + ScalarType.Float: types.fp32, + ScalarType.Double: types.fp64, + ScalarType.Int: types.int32, + ScalarType.Long: types.int64, +} + + +class CoreMLComputeUnit: + CPU = "cpuOnly" + CPUAndGPU = "cpuAndGPU" + ALL = "all" + + +class CoreMLQuantizationMode: + LINEAR = "linear" + LINEAR_SYMMETRIC = "linear_symmetric" + NONE = "none" + + +def TensorSpec(shape, dtype=ScalarType.Float): + return (shape, dtype) + + +def CompileSpec( + inputs, + outputs, + backend=CoreMLComputeUnit.CPU, + allow_low_precision=True, + quantization_mode=CoreMLQuantizationMode.NONE, + mlmodel_export_path=None, +): + return ( + inputs, + outputs, + backend, + allow_low_precision, + quantization_mode, + mlmodel_export_path, + ) + + +def _check_enumerated_shape(shape): + for s in shape: + if not isinstance(s, (list, tuple)): + return False + return True + + +def _convert_to_mil_type(shape, dtype, name: str): + mil_shape = shape + if _check_enumerated_shape(shape): + mil_shape = ct.EnumeratedShapes(shape) + ml_type = TensorType(shape=mil_shape, dtype=torch_to_mil_types[dtype]) + ml_type.name = name + return ml_type + + +def preprocess(script_module: torch._C.ScriptObject, compile_spec: Dict[str, Tuple]): + spec = compile_spec["forward"] + ( + input_specs, + output_specs, + backend, + allow_low_precision, + quantization_mode, + mlmodel_export_path, + ) = spec + mil_inputs = [] + inputs = [] + for index, input in enumerate(input_specs): + shape, dtype = input + name = "input_" + str(index) + inputs.append([name, str(dtype), str(shape)]) + ml_type = _convert_to_mil_type(shape, dtype, name) + mil_inputs.append(ml_type) + model = torch.jit.RecursiveScriptModule._construct(script_module, lambda x: None) + mlmodel = ct.convert(model, inputs=mil_inputs) + + if quantization_mode != CoreMLQuantizationMode.NONE: + quant_model_spec = quantization_utils.quantize_weights( + mlmodel, nbits=8, quantization_mode=quantization_mode + ) + mlmodel = ct.models.MLModel(quant_model_spec) + + spec = mlmodel.get_spec() + assert len(spec.description.output) == len(output_specs) # type: ignore[attr-defined] + outputs = [] + for index, output in enumerate(output_specs): + shape, dtype = output + name = spec.description.output[index].name # type: ignore[attr-defined] + outputs.append([name, str(dtype), str(shape)]) + mlmodel = ct.models.model.MLModel(spec) + print(mlmodel) + + if mlmodel_export_path is not None: + print(f"Saving CoreML .mlmodel file to {mlmodel_export_path}") + mlmodel.save(mlmodel_export_path) + + config = { + "spec_ver": str(spec.specificationVersion), # type: ignore[attr-defined] + "backend": backend, + "allow_low_precision": str(allow_low_precision), + } + metadata = { + "coremltool_ver": mlmodel.user_defined_metadata[CT_METADATA_VERSION], + "torch_ver": mlmodel.user_defined_metadata[CT_METADATA_SOURCE], + } + coreml_compile_spec = { + "inputs": inputs, + "outputs": outputs, + "config": config, + "metadata": metadata, + } + mlmodel = spec.SerializeToString() # type: ignore[attr-defined] + + return { + "model": mlmodel, + "hash": str(hashlib.sha256(mlmodel).hexdigest()), + "extra": json.dumps(coreml_compile_spec), + } diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/_nnapi/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/backends/_nnapi/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ac0c097503f6c95cced3f70f97dbac7eb98391c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/prepare.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/prepare.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be6f4e6f054863961146e46f6bf7111c7d894140 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/prepare.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/serializer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/serializer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9b8c66f37a524df103750cdaed1f21016a45423 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/serializer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/_nnapi/prepare.py b/env-llmeval/lib/python3.10/site-packages/torch/backends/_nnapi/prepare.py new file mode 100644 index 0000000000000000000000000000000000000000..d5d69b9d648b3280c96becb53e8d6af90c9a57e2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/backends/_nnapi/prepare.py @@ -0,0 +1,198 @@ +from typing import List, Optional + +import torch +from torch.backends._nnapi.serializer import _NnapiSerializer + +ANEURALNETWORKS_PREFER_LOW_POWER = 0 +ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1 +ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2 + + +class NnapiModule(torch.nn.Module): + """Torch Module that wraps an NNAPI Compilation. + + This module handles preparing the weights, initializing the + NNAPI TorchBind object, and adjusting the memory formats + of all inputs and outputs. + """ + + # _nnapi.Compilation is defined + comp: Optional[torch.classes._nnapi.Compilation] # type: ignore[name-defined] + weights: List[torch.Tensor] + out_templates: List[torch.Tensor] + + def __init__( + self, + shape_compute_module: torch.nn.Module, + ser_model: torch.Tensor, + weights: List[torch.Tensor], + inp_mem_fmts: List[int], + out_mem_fmts: List[int], + compilation_preference: int, + relax_f32_to_f16: bool, + ): + super().__init__() + self.shape_compute_module = shape_compute_module + self.ser_model = ser_model + self.weights = weights + self.inp_mem_fmts = inp_mem_fmts + self.out_mem_fmts = out_mem_fmts + self.out_templates = [] + self.comp = None + self.compilation_preference = compilation_preference + self.relax_f32_to_f16 = relax_f32_to_f16 + + @torch.jit.export + def init(self, args: List[torch.Tensor]): + assert self.comp is None + self.out_templates = self.shape_compute_module.prepare(self.ser_model, args) # type: ignore[operator] + self.weights = [w.contiguous() for w in self.weights] + comp = torch.classes._nnapi.Compilation() + comp.init2( + self.ser_model, + self.weights, + self.compilation_preference, + self.relax_f32_to_f16, + ) + + self.comp = comp + + def forward(self, args: List[torch.Tensor]) -> List[torch.Tensor]: + if self.comp is None: + self.init(args) + comp = self.comp + assert comp is not None + outs = [torch.empty_like(out) for out in self.out_templates] + + assert len(args) == len(self.inp_mem_fmts) + fixed_args = [] + for idx in range(len(args)): + fmt = self.inp_mem_fmts[idx] + # These constants match the values in DimOrder in serializer.py + # TODO: See if it's possible to use those directly. + if fmt == 0: + fixed_args.append(args[idx].contiguous()) + elif fmt == 1: + fixed_args.append(args[idx].permute(0, 2, 3, 1).contiguous()) + else: + raise Exception("Invalid mem_fmt") + comp.run(fixed_args, outs) + assert len(outs) == len(self.out_mem_fmts) + for idx in range(len(self.out_templates)): + fmt = self.out_mem_fmts[idx] + # These constants match the values in DimOrder in serializer.py + # TODO: See if it's possible to use those directly. + if fmt in (0, 2): + pass + elif fmt == 1: + outs[idx] = outs[idx].permute(0, 3, 1, 2) + else: + raise Exception("Invalid mem_fmt") + return outs + + +def convert_model_to_nnapi( + model, + inputs, + serializer=None, + return_shapes=None, + use_int16_for_qint16=False, + compilation_preference=ANEURALNETWORKS_PREFER_SUSTAINED_SPEED, + relax_f32_to_f16=False, +): + ( + shape_compute_module, + ser_model_tensor, + used_weights, + inp_mem_fmts, + out_mem_fmts, + retval_count, + ) = process_for_nnapi( + model, inputs, serializer, return_shapes, use_int16_for_qint16 + ) + + nnapi_model = NnapiModule( + shape_compute_module, + ser_model_tensor, + used_weights, + inp_mem_fmts, + out_mem_fmts, + compilation_preference, + relax_f32_to_f16, + ) + + class NnapiInterfaceWrapper(torch.nn.Module): + """NNAPI list-ifying and de-list-ifying wrapper. + + NNAPI always expects a list of inputs and provides a list of outputs. + This module allows us to accept inputs as separate arguments. + It returns results as either a single tensor or tuple, + matching the original module. + """ + + def __init__(self, mod): + super().__init__() + self.mod = mod + + wrapper_model_py = NnapiInterfaceWrapper(nnapi_model) + wrapper_model = torch.jit.script(wrapper_model_py) + # TODO: Maybe make these names match the original. + arg_list = ", ".join(f"arg_{idx}" for idx in range(len(inputs))) + if retval_count < 0: + ret_expr = "retvals[0]" + else: + ret_expr = "".join(f"retvals[{idx}], " for idx in range(retval_count)) + wrapper_model.define( + f"def forward(self, {arg_list}):\n" + f" retvals = self.mod([{arg_list}])\n" + f" return {ret_expr}\n" + ) + return wrapper_model + + +def process_for_nnapi( + model, inputs, serializer=None, return_shapes=None, use_int16_for_qint16=False +): + model = torch.jit.freeze(model) + + if isinstance(inputs, torch.Tensor): + inputs = [inputs] + + serializer = serializer or _NnapiSerializer( + config=None, use_int16_for_qint16=use_int16_for_qint16 + ) + ( + ser_model, + used_weights, + inp_mem_fmts, + out_mem_fmts, + shape_compute_lines, + retval_count, + ) = serializer.serialize_model(model, inputs, return_shapes) + ser_model_tensor = torch.tensor(ser_model, dtype=torch.int32) + + # We have to create a new class here every time this function is called + # because module.define adds a method to the *class*, not the instance. + class ShapeComputeModule(torch.nn.Module): + """Code-gen-ed module for tensor shape computation. + + module.prepare will mutate ser_model according to the computed operand + shapes, based on the shapes of args. Returns a list of output templates. + """ + + pass + + shape_compute_module = torch.jit.script(ShapeComputeModule()) + real_shape_compute_lines = [ + "def prepare(self, ser_model: torch.Tensor, args: List[torch.Tensor]) -> List[torch.Tensor]:\n", + ] + [f" {line}\n" for line in shape_compute_lines] + shape_compute_module.define("".join(real_shape_compute_lines)) + + return ( + shape_compute_module, + ser_model_tensor, + used_weights, + inp_mem_fmts, + out_mem_fmts, + retval_count, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/_nnapi/serializer.py b/env-llmeval/lib/python3.10/site-packages/torch/backends/_nnapi/serializer.py new file mode 100644 index 0000000000000000000000000000000000000000..af6b046ec2133bac28712e98ffbdcce509351aab --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/backends/_nnapi/serializer.py @@ -0,0 +1,2193 @@ +import array +import enum +import functools +import logging +import struct +import sys +from typing import List, NamedTuple, Optional, Tuple + +import torch + + +# TODO: Add type annotations +# TODO: Check tensor types for ops + + +LOG = logging.getLogger("nnapi_serialize") + + +class NNAPI_OperandCode: + FLOAT32 = 0 + INT32 = 1 + UINT32 = 2 + TENSOR_FLOAT32 = 3 + TENSOR_INT32 = 4 + TENSOR_QUANT8_ASYMM = 5 + BOOL = 6 + TENSOR_QUANT16_SYMM = 7 + TENSOR_FLOAT16 = 8 + TENSOR_BOOL8 = 9 + FLOAT16 = 10 + TENSOR_QUANT8_SYMM_PER_CHANNEL = 11 + TENSOR_QUANT16_ASYMM = 12 + + +class NNAPI_OperationCode: + ADD = 0 + AVERAGE_POOL_2D = 1 + CONCATENATION = 2 + CONV_2D = 3 + DEPTHWISE_CONV_2D = 4 + DEPTH_TO_SPACE = 5 + DEQUANTIZE = 6 + EMBEDDING_LOOKUP = 7 + FLOOR = 8 + FULLY_CONNECTED = 9 + HASHTABLE_LOOKUP = 10 + L2_NORMALIZATION = 11 + L2_POOL_2D = 12 + LOCAL_RESPONSE_NORMALIZATION = 13 + LOGISTIC = 14 + LSH_PROJECTION = 15 + LSTM = 16 + MAX_POOL_2D = 17 + MUL = 18 + RELU = 19 + RELU1 = 20 + RELU6 = 21 + RESHAPE = 22 + RESIZE_BILINEAR = 23 + RNN = 24 + SOFTMAX = 25 + SPACE_TO_DEPTH = 26 + SVDF = 27 + TANH = 28 + BATCH_TO_SPACE_ND = 29 + DIV = 30 + MEAN = 31 + PAD = 32 + SPACE_TO_BATCH_ND = 33 + SQUEEZE = 34 + STRIDED_SLICE = 35 + SUB = 36 + TRANSPOSE = 37 + ABS = 38 + ARGMAX = 39 + ARGMIN = 40 + AXIS_ALIGNED_BBOX_TRANSFORM = 41 + BIDIRECTIONAL_SEQUENCE_LSTM = 42 + BIDIRECTIONAL_SEQUENCE_RNN = 43 + BOX_WITH_NMS_LIMIT = 44 + CAST = 45 + CHANNEL_SHUFFLE = 46 + DETECTION_POSTPROCESSING = 47 + EQUAL = 48 + EXP = 49 + EXPAND_DIMS = 50 + GATHER = 51 + GENERATE_PROPOSALS = 52 + GREATER = 53 + GREATER_EQUAL = 54 + GROUPED_CONV_2D = 55 + HEATMAP_MAX_KEYPOINT = 56 + INSTANCE_NORMALIZATION = 57 + LESS = 58 + LESS_EQUAL = 59 + LOG = 60 + LOGICAL_AND = 61 + LOGICAL_NOT = 62 + LOGICAL_OR = 63 + LOG_SOFTMAX = 64 + MAXIMUM = 65 + MINIMUM = 66 + NEG = 67 + NOT_EQUAL = 68 + PAD_V2 = 69 + POW = 70 + PRELU = 71 + QUANTIZE = 72 + QUANTIZED_16BIT_LSTM = 73 + RANDOM_MULTINOMIAL = 74 + REDUCE_ALL = 75 + REDUCE_ANY = 76 + REDUCE_MAX = 77 + REDUCE_MIN = 78 + REDUCE_PROD = 79 + REDUCE_SUM = 80 + ROI_ALIGN = 81 + ROI_POOLING = 82 + RSQRT = 83 + SELECT = 84 + SIN = 85 + SLICE = 86 + SPLIT = 87 + SQRT = 88 + TILE = 89 + TOPK_V2 = 90 + TRANSPOSE_CONV_2D = 91 + UNIDIRECTIONAL_SEQUENCE_LSTM = 92 + UNIDIRECTIONAL_SEQUENCE_RNN = 93 + RESIZE_NEAREST_NEIGHBOR = 94 + + +class NNAPI_FuseCode: + FUSED_NONE = 0 + FUSED_RELU = 1 + FUSED_RELU1 = 2 + FUSED_RELU6 = 3 + + +class OperandValueSourceType: + IMMEDIATE = 0 + NUMBERED_BUFFER = 2 + NUMBERED_MEMORY = 3 + + +# Scalar types that appear explicitly in models. +# These must be kept in sync with +# AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS. +# TODO: Expose these directly to Python to avoid maintaining this list. +class TorchScalarTypes(enum.Enum): + QUINT8 = 13 + + +def approx_equal(lhs, rhs, tolerance=1e-6): + return abs(lhs - rhs) <= tolerance * min(lhs, rhs) + + +def tensor_size(op_type, dims): + ITEM_SIZES = { + NNAPI_OperandCode.TENSOR_FLOAT32: 4, + NNAPI_OperandCode.TENSOR_INT32: 4, + NNAPI_OperandCode.TENSOR_QUANT8_ASYMM: 1, + NNAPI_OperandCode.TENSOR_QUANT16_SYMM: 2, + NNAPI_OperandCode.TENSOR_QUANT16_ASYMM: 2, + } + size = ITEM_SIZES[op_type] + for d in dims: + size *= d + return size + + +def change_element(tup, index, value): + ls = list(tup) + ls[index] = value + return tuple(ls) + + +class ConvPoolArgs2d(NamedTuple): + """Configuration arguments for a convolution.""" + + kernel_h: int + kernel_w: int + stride_h: int + stride_w: int + pad_t: int + pad_b: int + pad_l: int + pad_r: int + dilation_h: int + dilation_w: int + group: int + + +class DimOrder(enum.Enum): + PRESUMED_CONTIGUOUS = 0 + CHANNELS_LAST = 1 + SCALAR_OR_VECTOR = 2 + UNKNOWN_CONSTANT = 999 + + +class Operand(NamedTuple): + """Represenation of an NNAPI operand.""" + + # NNAPI operand type. One of NNAPI_OperandCode. + # TODO: Make this an enum. + op_type: int + + # This is always the PyTorch shape, which is NCHW for feature maps. + # The actual NNAPI operand might have a transposed shape. + # we use 0 for load time dynamic shapes & -1 for runtime dynamic shapes + shape: Tuple[int, ...] + + # Specifies how the shape of the operand that we define in NNAPI + # relates to the shape we track above. + # - PRESUMED_CONTIGUOUS: physical NNAPI operand will exactly match + # the shape of the PyTorch tensor. + # - CHANNELS_LAST: The PyTorch tensor is expected to be NCHW, and + # the NNAPI operand will be represented explicitly as NHWC. + dim_order: DimOrder + + # Quantization params + scale: float + zero_point: int + + def use_nchw(self): + if self.dim_order is DimOrder.PRESUMED_CONTIGUOUS: + return True + if self.dim_order is DimOrder.CHANNELS_LAST: + return False + raise Exception("Unknown dim order") + + +def broadcast_shapes(shape1, shape2): + assert len(shape1) > 0 + assert len(shape2) > 0 + s1 = list(shape1) + s2 = list(shape2) + # TODO: Support non-equal-rank broadcast where semantics match. + # This can be tricky for NHWC tensors because dimension orders + # don't match between PT and NNAPI, even though semantics match. + if len(s1) > len(s2): + # s2 = [1] * (len(s1) - len(s2)) + s2 + raise Exception("Non-equal-rank broadcast is not supported yet.") + if len(s2) > len(s1): + # s3 = [1] * (len(s2) - len(s1)) + s1 + raise Exception("Non-equal-rank broadcast is not supported yet.") + ret = [] + for d1, d2 in zip(s1, s2): + if d1 == 1: + ret.append(d2) + elif d2 == 1: + ret.append(d1) + elif d1 == d2: + ret.append(d1) + else: + raise Exception(f"Cannot broadcast shapes: {shape1} and {shape2}") + return tuple(ret) + + +def get_conv_pool_shape(image_shape, args, out_ch, transpose): + batch, in_c, in_h, in_w = image_shape + + # TODO: Handle dilation + if args.dilation_h != 1 or args.dilation_w != 1: + raise Exception("Dilation not supported yet.") + + if transpose: + out_h = (in_h - 1) * args.stride_h + args.kernel_h - args.pad_t - args.pad_b + out_w = (in_w - 1) * args.stride_w + args.kernel_w - args.pad_l - args.pad_l + else: + out_h = (in_h - args.kernel_h + args.pad_t + args.pad_b) // args.stride_h + 1 + out_w = (in_w - args.kernel_w + args.pad_l + args.pad_r) // args.stride_w + 1 + + # Handle variable-sized tensors. + if in_h == 0: + out_h = 0 + if in_w == 0: + out_w = 0 + + out_shape = (batch, out_ch, out_h, out_w) + return out_shape + + +def fix_shape(shape, dim_order): + # Return the actual shape that an operand should have in NNAPI, + # given a PyTorch shape and dimension order. This is where we + # convert from PyTorch's "always NCHW" shape to explicit NHWC. + if dim_order is DimOrder.PRESUMED_CONTIGUOUS: + return shape + if dim_order is DimOrder.CHANNELS_LAST: + return tuple([shape[0]] + list(shape[2:]) + [shape[1]]) + if dim_order is DimOrder.SCALAR_OR_VECTOR: + assert len(shape) == 0 or len(shape) == 1 + return shape + if dim_order is DimOrder.UNKNOWN_CONSTANT: + # XXX think this through + return shape + raise Exception(f"Bad dim_order: {dim_order!r}.") + + +def reverse_map_dim(dim_order, d): + # Return the original PyTorch dimension position for a given dimension. + # d should be the dimension that NNAPI will see. + # reverse_map_dim(PRESUMED_CONTIGUOUS, x) == x + # reverse_map_dim(CHANNELS_LAST, 3) == 1 + if dim_order in (DimOrder.PRESUMED_CONTIGUOUS, DimOrder.SCALAR_OR_VECTOR): + return d + assert dim_order is DimOrder.CHANNELS_LAST + return [0, 2, 3, 1][d] + + +def flex_name(op_id, dim): + # Return the local variable name for the computed flexible size + # for a given op and dimension. + return f"s_{op_id}_{dim}" + + +class _NnapiSerializer: + def __init__(self, config, use_int16_for_qint16=False): + self.operands = [] + self.values = [] + self.operations = [] + self.value_data = [] + self.operation_args = [] + self.inputs = [] + self.outputs = [] + self.flexible_shape_computation_lines = [] + + self.modules = {} + self.constants = {} + self.tensor_sequences = {} + self.jitval_operand_map = {} + self.cached_immediates = {} + self.used_weights = [] + self.weight_offset = 0 + self.use_int16_for_qint16 = use_int16_for_qint16 + + if config is None: + config = {} + + def get_next_operand_id(self): + return len(self.operands) + + # Add a tensor operand corresponding to a JIT Value. + # Returns the NNAPI operand ID. Can be looked up later with + # get_tensor_operand_by_jitval. + def add_tensor_operand(self, jitval, oper): + assert isinstance(oper, Operand) + if jitval in self.jitval_operand_map: + raise Exception(f"Duplicate tensor: {jitval!r}") + + operand_id = self.get_next_operand_id() + self.operands.append(oper) + self.jitval_operand_map[jitval] = operand_id + return operand_id + + # Add a tensor operand that does not correspond to a JIT Value. + # Useful for cases where multiple NNAPI operands are required + # to implement one JIT IR node. Returns the NNAPI operand ID. + def add_anonymous_tensor_operand(self, oper): + assert isinstance(oper, Operand) + operand_id = self.get_next_operand_id() + self.operands.append(oper) + return operand_id + + def torch_tensor_to_operand(self, tensor, dim_order): + dtype = str(tensor.dtype).replace("torch.", "") + scale = 0.0 + zero_point = 0 + if dtype == "float32": + op_type = NNAPI_OperandCode.TENSOR_FLOAT32 + elif dtype == "int32": + op_type = NNAPI_OperandCode.TENSOR_INT32 + elif dtype == "quint8": + op_type = NNAPI_OperandCode.TENSOR_QUANT8_ASYMM + scale = tensor.q_scale() + zero_point = tensor.q_zero_point() + elif dtype == "qint32": + op_type = NNAPI_OperandCode.TENSOR_INT32 + scale = tensor.q_scale() + zero_point = tensor.q_zero_point() + assert zero_point == 0 + elif dtype == "int16": + if self.use_int16_for_qint16: + nnapi_dtype = getattr(tensor, "nnapi_dtype", None) + op_codes = ( + NNAPI_OperandCode.TENSOR_QUANT16_SYMM, + NNAPI_OperandCode.TENSOR_QUANT16_ASYMM, + ) + if nnapi_dtype in op_codes: + op_type = nnapi_dtype + scale = tensor.nnapi_scale + zero_point = tensor.nnapi_zero_point + else: + raise Exception( + f"`nnapi_type` needs to be one of {op_codes} for `int16`" + ) + else: + raise Exception( + "`int16` isn't supported. If you're trying to represent NNAPI" + " qint16 with Pytorch int16, set `use_int16_for_qint16 = True`" + ) + else: + raise Exception(f"Can't handle input with dtype '{tensor.dtype}'") + return Operand( + shape=tuple(tensor.shape), + op_type=op_type, + dim_order=dim_order, + scale=scale, + zero_point=zero_point, + ) + + def add_tensor_operand_for_input(self, arg_idx, jitval, tensor): + dim_order = ( + DimOrder.CHANNELS_LAST + if getattr(tensor, "nnapi_nhwc", False) + else DimOrder.PRESUMED_CONTIGUOUS + ) + toper = self.torch_tensor_to_operand(tensor, dim_order) + operand_id = self.add_tensor_operand(jitval, toper) + self.inputs.append(operand_id) + for dim, size in enumerate(tensor.shape): + if size == 0: + self.compute_operand_shape( + operand_id, dim, f"args[{arg_idx}].shape[{dim}]" + ) + return operand_id + + def add_tensor_operand_for_weight( + self, tensor, dim_order=DimOrder.UNKNOWN_CONSTANT + ): + toper = self.torch_tensor_to_operand(tensor, dim_order) + operand_id = len(self.operands) + self.operands.append(toper) + tsize = tensor_size(toper.op_type, toper.shape) + psize = ((tsize - 1) | 0x3) + 1 + self.values.append((operand_id, OperandValueSourceType.NUMBERED_BUFFER)) + buf_num = len(self.used_weights) + offset = 0 + self.value_data.append(struct.pack("iii", buf_num, offset, tsize)) + # For NHWC NNAPI op, lay out data in the same dim order by permuting torch tensor + if dim_order == DimOrder.CHANNELS_LAST: + tensor = tensor.permute(0, 2, 3, 1) + self.used_weights.append(tensor) + return operand_id + + def add_immediate_operand(self, code, value, dims): + assert isinstance(dims, tuple) + cache_key = (code, value) + if cache_key not in self.cached_immediates: + operand_id = len(self.operands) + self.operands.append(Operand(code, dims, DimOrder.SCALAR_OR_VECTOR, 0.0, 0)) + self.values.append((operand_id, OperandValueSourceType.IMMEDIATE)) + self.value_data.append(value) + self.cached_immediates[cache_key] = operand_id + return self.cached_immediates[cache_key] + + def add_immediate_int_scalar(self, value): + return self.add_immediate_operand( + NNAPI_OperandCode.INT32, struct.pack("i", value), () + ) + + def add_immediate_float_scalar(self, value): + return self.add_immediate_operand( + NNAPI_OperandCode.FLOAT32, struct.pack("f", value), () + ) + + def add_immediate_bool_scalar(self, value): + return self.add_immediate_operand( + NNAPI_OperandCode.BOOL, b"\x01" if value else b"\x00", () + ) + + def add_immediate_int_vector(self, value): + return self.add_immediate_operand( + NNAPI_OperandCode.TENSOR_INT32, + array.array("i", value).tobytes(), + (len(value),), + ) + + def has_operand_for_jitval(self, jitval): + return jitval in self.jitval_operand_map + + def get_tensor_operand_by_jitval(self, jitval): + operand_id = self.jitval_operand_map[jitval] + return (operand_id, self.operands[operand_id]) + + def get_tensor_operand_by_jitval_fixed_size(self, jitval): + op_id, oper = self.get_tensor_operand_by_jitval(jitval) + for s in oper.shape: + if s == 0: + # TODO: Improve this error message, possibly after converting + # many callsites to support flexible size. + raise Exception("Flexible size is not supported for this operand.") + if s < 0: + # runtime flex + LOG.warning("Operand %s has runtime flex shape", oper) + return op_id, oper + + def get_tensor_operand_or_constant( + self, jitval, dim_order=DimOrder.PRESUMED_CONTIGUOUS + ): + operand_id = self.jitval_operand_map.get(jitval) + if operand_id is None: + _, value = self.get_constant_value(jitval, "TensorType") + operand_id = self.add_tensor_operand_for_weight(value, dim_order) + return (operand_id, self.operands[operand_id]) + + def get_tensor_operand_for_weight(self, jitval): + _, value = self.get_constant_value(jitval, "TensorType") + operand_id = self.add_tensor_operand_for_weight(value) + return (operand_id, self.operands[operand_id]) + + def add_operation(self, opcode, inputs, outputs): + self.operations.append((opcode, len(inputs), len(outputs))) + self.operation_args.extend(inputs + outputs) + + def add_tensor_sequence(self, jitval, values): + assert jitval not in self.tensor_sequences + self.tensor_sequences[jitval] = values + + def add_constant_value(self, jitval, ctype, value): + assert jitval not in self.constants + self.constants[jitval] = (ctype, value) + + def get_constant_value(self, jitval, typekind=None): + record = self.constants.get(jitval) + if record is None: + raise Exception(f"Could not find constant value for '{jitval!r}'.") + ctype, _ = record + if typekind is not None and ctype.kind() != typekind: + raise Exception( + f"Expected constant value of type {typekind}, but got {ctype.kind()} for value '{jitval!r}'" + ) + return record + + def operand_to_template_torchscript(self, op_id, oper, shape=None): + """Return a TorchScript expression to build a template for a given operand.""" + if shape is None: + shape = oper.shape + else: + assert len(shape) == len(oper.shape) + + shape_parts = ["("] + for d, s in enumerate(shape): + if s > 0: + # Fixed shape dimension: just add the value. + shape_parts.append(str(s)) + elif s == 0: + # Load time flexible shape dimension: it should have been computed in a variable. + shape_parts.append(flex_name(op_id, d)) + elif s == -1: + # Runtime flexible shape + shape_parts.append("0") + else: + raise Exception("Unknown dim value, dimensions should be >= -1") + shape_parts.append(",") + shape_parts.append(")") + shape_code = "".join(shape_parts) + if oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32: + return f"torch.zeros({shape_code}, dtype=torch.float32)" + elif oper.op_type == NNAPI_OperandCode.TENSOR_INT32: + return f"torch.zeros({shape_code}, dtype=torch.int32)" + elif oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM: + return ( + f"torch.quantize_per_tensor(" + f"torch.zeros(1), scale={oper.scale}, zero_point={oper.zero_point}, dtype=torch.quint8)" + f".expand({shape_code}).contiguous()" + ) + elif oper.op_type in ( + NNAPI_OperandCode.TENSOR_QUANT16_ASYMM, + NNAPI_OperandCode.TENSOR_QUANT16_SYMM, + ): + if self.use_int16_for_qint16: + return f"torch.zeros({shape_code}, dtype=torch.int16)" + else: + raise Exception( + "`int16` isn't supported. If you're trying to represent NNAPI" + " qint16 with Pytorch int16, set `use_int16_for_qint16 = True`" + ) + + raise Exception(f"Unsupported output operand type: {oper.op_type}") + + def forward_operand_shape(self, out_op_id, out_dim, in_op_id, in_dim): + self.compute_operand_shape(out_op_id, out_dim, flex_name(in_op_id, in_dim)) + + def compute_operand_shape(self, op_id, dim, expr): + self.flexible_shape_computation_lines.append( + f"{flex_name(op_id, dim)} = {expr}" + ) + + def transpose_to_nhwc(self, in_id, oper): + if oper.shape[2:] != (1, 1): + raise Exception("Automatic transpose only supported for H,W == 1,1") + + out_oper = oper._replace(dim_order=DimOrder.CHANNELS_LAST) + + inputs = [None] * 2 + inputs[0] = in_id + inputs[1] = self.add_immediate_int_vector([0, 2, 3, 1]) + + outputs = [None] * 1 + outputs[0] = self.add_anonymous_tensor_operand(out_oper) + + self.add_operation(NNAPI_OperationCode.TRANSPOSE, inputs, outputs) + + return outputs[0], out_oper + + # Transpose inputs as necessary to allow broadcasting. + def transpose_for_broadcast(self, in0_id, in0_oper, in1_id, in1_oper): + if in0_oper.dim_order == in1_oper.dim_order: + return in0_id, in0_oper, in1_id, in1_oper + + # Assume NHWC is preferred if there is a mismatch. + orders = (in0_oper.dim_order, in1_oper.dim_order) + if orders == (DimOrder.PRESUMED_CONTIGUOUS, DimOrder.CHANNELS_LAST): + return self.transpose_to_nhwc(in0_id, in0_oper) + (in1_id, in1_oper) + if orders == (DimOrder.CHANNELS_LAST, DimOrder.PRESUMED_CONTIGUOUS): + return (in0_id, in0_oper) + self.transpose_to_nhwc(in1_id, in1_oper) + + raise Exception( + f"Automatic transpose not supported for dim_orders: {in0_oper.dim_order!r}, {in1_oper.dim_order!r}" + ) + + def get_size_arg(self, jitval): + ctype, value = self.get_constant_value(jitval) + if ctype.kind() == "ListType": + assert ctype.getElementType().kind() == "IntType" + return value + raise Exception(f"Can't handle size arg of type '{ctype!r}' for '{jitval!r}'") + + def get_conv_pool_args_2d_from_pack(self, kernel_size, packed_config): + pc = [i.item() for i in packed_config] + assert pc[0] == 2 + strides = [pc[1], pc[2]] + paddings = [pc[3], pc[4]] + dilations = [pc[5], pc[6]] + output_padding = [pc[7], pc[8]] + group_num = pc[9] + + assert len(pc) == 11 + assert output_padding == [0, 0] + + return self.get_conv_pool_args_2d_common( + kernel_size, strides, paddings, dilations, group_num + ) + + def get_conv_pool_args_2d_from_jit( + self, kernel_size, stride, padding, dilation=None, group=None + ): + strides = self.get_size_arg(stride) + paddings = self.get_size_arg(padding) + if dilation is None: + dilations = [1, 1] + else: + dilations = self.get_size_arg(dilation) + if group is not None: + _, group_num = self.get_constant_value(group, "IntType") + else: + group_num = None + return self.get_conv_pool_args_2d_common( + kernel_size, strides, paddings, dilations, group_num + ) + + def get_conv_pool_args_2d_common( + self, kernel_size, strides, paddings, dilations, group_num + ): + kernels = list(kernel_size) + + assert len(kernels) == 2 + assert len(strides) == 2 + assert len(paddings) == 2 + assert len(dilations) == 2 + + # NNAPI uses 4 values for padding. + ph, pw = paddings + real_paddings = [ph, ph, pw, pw] + + return ConvPoolArgs2d( + *(kernels + strides + real_paddings + dilations + [group_num]) + ) + + def serialize_model(self, model, inputs, return_shapes=None): + self.add_immediate_bool_scalar(False) + self.add_immediate_bool_scalar(True) + + inp_dim_orders = [] + out_dim_orders = [] + + self_jitval = next(model.graph.inputs()) + self.add_constant_value(self_jitval, self_jitval.type(), model) + + for arg_idx, (input_value, input_tensor) in enumerate( + zip(list(model.graph.inputs())[1:], inputs) + ): + op_id = self.add_tensor_operand_for_input( + arg_idx, input_value, input_tensor + ) + inp_dim_orders.append(self.operands[op_id].dim_order.value) + + for idx, node in enumerate(model.graph.nodes()): + LOG.debug("Processing node #%d: %r", idx, node) + self.add_node(node) + + retn = model.graph.return_node() + assert retn.inputsSize() == 1 + assert retn.outputsSize() == 0 + retn_input = retn.inputsAt(0) + template_return_lines = ["return ["] + if retn_input.type().kind() == "TensorType": + return_values = [retn_input] + retval_count = -1 + elif retn_input.type().kind() == "TupleType": + return_values = self.tensor_sequences[retn_input] + retval_count = len(return_values) + else: + raise Exception(f"Unsupported return type: {retn_input.type()}") + + if return_shapes is not None: + assert len(return_shapes) == len(return_values) + for i, v in enumerate(return_values): + op_id = self.jitval_operand_map[v] + self.outputs.append(op_id) + out_dim_orders.append(self.operands[op_id].dim_order.value) + shape = return_shapes[i] if return_shapes else None + template_return_lines.append( + self.operand_to_template_torchscript(op_id, self.operands[op_id], shape) + + "," + ) + template_return_lines.append("]") + + model = [] + + version = 1 + header = struct.pack( + "iiiiii", + version, + len(self.operands), + len(self.values), + len(self.operations), + len(self.inputs), + len(self.outputs), + ) + model.append(header) + + serialized_values, serialized_value_data = self.serialize_values() + + model.extend( + struct.pack("iifi", t, len(d), s, z) for (t, d, _m, s, z) in self.operands + ) + model.extend(serialized_values) + model.extend(struct.pack("iii", *x) for x in self.operations) + + # Compact the model so we can get its length so far. + model = [b"".join(model)] + model_offset = len(model[0]) + # Model offset is the index into the model (in 32-bit words, not bytes) + # of the next dimension we're about to serialize. If it's 0, + # generate code to mutate it before passing to NNAPI. + assert model_offset % 4 == 0 + model_offset = int(model_offset / 4) + + for op_id, (_, dims, dim_order, _, _) in enumerate(self.operands): + shape = fix_shape(dims, dim_order) + for d, s in enumerate(shape): + if s == 0: + pt_d = reverse_map_dim(dim_order, d) + self.flexible_shape_computation_lines.append( + f"ser_model[{model_offset}] = {flex_name(op_id, pt_d)}" + ) + model_offset += 1 + + # convert runtime flex shape from -1 to 0 + shape = tuple(d if d != -1 else 0 for d in shape) + model.append(self.serialize_ints(shape)) + + model.extend(serialized_value_data) + model.append(self.serialize_ints(self.operation_args)) + model.append(self.serialize_ints(self.inputs)) + model.append(self.serialize_ints(self.outputs)) + + self.flexible_shape_computation_lines.extend(template_return_lines) + + return ( + array.array("i", b"".join(model)), + self.used_weights, + inp_dim_orders, + out_dim_orders, + self.flexible_shape_computation_lines, + retval_count, + ) + + def serialize_values(self): + serialized_values = [] + serialized_value_data = [] + assert len(self.values) == len(self.value_data) + for (op_index, source_type), data in zip(self.values, self.value_data): + source_length = len(data) + + # Pad with 0 bytes out to a multiple of 4 for alignment. + physical_length = ((source_length - 1) | 0x3) + 1 + padded_data = data + (b"\0" * (physical_length - source_length)) + + serialized_values.append( + struct.pack("iii", op_index, source_type, source_length) + ) + serialized_value_data.append(padded_data) + + return serialized_values, serialized_value_data + + @staticmethod + def serialize_ints(ints): + return array.array("i", ints).tobytes() + + ADDER_MAP = { + "prim::GetAttr": lambda self, node: self.add_getattr(node), + "prim::Constant": lambda self, node: self.add_constant_node(node), + "prim::ListConstruct": lambda self, node: self.add_list_construct(node), + "prim::TupleConstruct": lambda self, node: self.add_tuple_construct(node), + "aten::unsqueeze": lambda self, node: self.add_unsqueeze(node), + "aten::to": lambda self, node: self.add_to(node), + "aten::detach": lambda self, node: self._identity(node), + "aten::reshape": lambda self, node: self.add_reshape(node), + "aten::flatten": lambda self, node: self.add_flatten(node), + "aten::slice": lambda self, node: self.add_slice(node), + "aten::size": lambda self, node: self.add_size(node), + "aten::cat": lambda self, node: self.add_cat(node), + "aten::mean": lambda self, node: self.add_mean(node), + "aten::quantize_per_tensor": lambda self, node: self.add_quantize(node), + "aten::dequantize": lambda self, node: self.add_dequantize(node), + "aten::add": lambda self, node: self.add_add_sub_op( + node, NNAPI_OperationCode.ADD, NNAPI_FuseCode.FUSED_NONE + ), + "aten::sub": lambda self, node: self.add_add_sub_op( + node, NNAPI_OperationCode.SUB, NNAPI_FuseCode.FUSED_NONE + ), + "aten::mul": lambda self, node: self.add_pointwise_simple_binary_broadcast_op( + node, NNAPI_OperationCode.MUL, NNAPI_FuseCode.FUSED_NONE + ), + "aten::div": lambda self, node: self.add_pointwise_simple_binary_broadcast_op( + node, NNAPI_OperationCode.DIV, NNAPI_FuseCode.FUSED_NONE + ), + "aten::relu": lambda self, node: self.add_pointwise_simple_unary_op( + node, NNAPI_OperationCode.RELU + ), + "aten::sigmoid": lambda self, node: self.add_pointwise_simple_unary_op( + node, NNAPI_OperationCode.LOGISTIC + ), + "aten::softmax": lambda self, node: self.add_softmax(node), + "aten::hardtanh": lambda self, node: self.add_hardtanh(node), + "aten::avg_pool2d": lambda self, node: self.add_avg_pool2d(node), + "aten::max_pool2d": lambda self, node: self.add_pool2d_node( + node, NNAPI_OperationCode.MAX_POOL_2D + ), + "aten::adaptive_avg_pool2d": lambda self, node: self.add_adaptive_avg_pool2d( + node + ), + "aten::upsample_nearest2d": lambda self, node: self.add_upsample_nearest2d( + node + ), + "aten::prelu": lambda self, node: self.add_prelu_op(node), + "aten::addmm": lambda self, node: self.add_addmm(node), + "aten::linear": lambda self, node: self.add_linear(node), + "aten::_convolution": lambda self, node: self.add_conv_underscore(node), + "aten::conv2d": lambda self, node: self.add_conv2d(node), + "aten::log_softmax": lambda self, node: self.add_log_softmax(node), + "quantized::linear": lambda self, node: self.add_qlinear(node), + "quantized::conv2d": lambda self, node: self.add_qconv2d( + node, NNAPI_FuseCode.FUSED_NONE + ), + "quantized::conv2d_relu": lambda self, node: self.add_qconv2d( + node, NNAPI_FuseCode.FUSED_RELU + ), + "quantized::conv_transpose2d": lambda self, node: self.add_qconv2d( + node, NNAPI_FuseCode.FUSED_NONE, transpose=True + ), + "quantized::add": lambda self, node: self.add_qadd( + node, NNAPI_OperationCode.ADD, NNAPI_FuseCode.FUSED_NONE + ), + "quantized::add_relu": lambda self, node: self.add_qadd( + node, NNAPI_OperationCode.ADD, NNAPI_FuseCode.FUSED_RELU + ), + "quantized::mul": lambda self, node: self.add_qadd( + node, NNAPI_OperationCode.MUL, NNAPI_FuseCode.FUSED_NONE + ), + } + + def add_node(self, node): + adder = self.ADDER_MAP.get(node.kind()) + if not adder: + raise Exception(f"Unsupported node kind ({node.kind()!r}) in node {node!r}") + adder(self, node) + + def _identity(self, node): + in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) + jitval = node.outputsAt(0) + self.jitval_operand_map[jitval] = in_id + + def add_getattr(self, node): + assert node.inputsSize() == 1 + assert node.outputsSize() == 1 + obj_ctype, obj = self.get_constant_value(node.inputsAt(0)) + assert str(obj_ctype).startswith("__torch__.") + name = node.s("name") + value = getattr(obj, name) + output = node.outputsAt(0) + ctype = output.type() + self.add_constant_value(output, ctype, value) + + def add_constant_node(self, node): + assert node.inputsSize() == 0 + assert node.outputsSize() == 1 + output = node.outputsAt(0) + ctype = output.type() + value = output.toIValue() + self.add_constant_value(output, ctype, value) + + def add_list_construct(self, node): + assert node.outputsSize() == 1 + output = node.outputsAt(0) + ctype = output.type() + const_vals: Optional[List] = [] + tensors: Optional[List] = [] + for inp in node.inputs(): + if const_vals is not None and inp in self.constants: + _, val = self.get_constant_value(inp) + const_vals.append(val) + else: + const_vals = None + if tensors is not None and inp.type().kind() == "TensorType": + tensors.append(inp) + else: + tensors = None + + if const_vals is not None: + # NOTE: Now that TorchScript supports list constants, + # this code path might not be used anymore. + self.add_constant_value(output, ctype, const_vals) + if tensors is not None: + self.add_tensor_sequence(output, tensors) + if const_vals is None and tensors is None: + raise Exception( + f"Unable to handle ListConstruct node. Neither all constants nor all tensors. {node!r}" + ) + + def add_tuple_construct(self, node): + assert node.outputsSize() == 1 + output = node.outputsAt(0) + values = [] + for inp in node.inputs(): + values.append(inp) + self.add_tensor_sequence(output, values) + + def add_unsqueeze(self, node): + assert node.inputsSize() == 2 + assert node.outputsSize() == 1 + + in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) + + _, dim = self.get_constant_value(node.inputsAt(1), "IntType") + assert in_oper.dim_order == DimOrder.PRESUMED_CONTIGUOUS + + real_dim = dim if dim >= 0 else dim + len(in_oper.shape) + 1 + out_shape_list = list(in_oper.shape) + out_shape_list.insert(real_dim, 1) + out_shape = tuple(out_shape_list) + out_oper = in_oper._replace(shape=out_shape) + + inputs = [None] * 2 + inputs[0] = in_id + inputs[1] = self.add_immediate_int_scalar(dim) + + outputs = [None] * 1 + outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper) + + self.add_operation(NNAPI_OperationCode.EXPAND_DIMS, inputs, outputs) + + def add_to(self, node): + # Handle to("cpu") / to("gpu") case + self._identity(node) + + def add_reshape(self, node): + assert node.inputsSize() == 2 + assert node.outputsSize() == 1 + + in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) + + shape_ctype, shape = self.get_constant_value(node.inputsAt(1)) + assert shape_ctype.kind() == "ListType" + assert shape_ctype.getElementType().kind() == "IntType" + is_trivial_reshape = len(shape) == 2 and shape[1] == -1 + + if in_oper.dim_order != DimOrder.PRESUMED_CONTIGUOUS and not is_trivial_reshape: + raise Exception( + "Currently, reshape is only supported on NHWC tensors if the target size is [X, -1]." + ) + + # Bit of a hack here. Use a real tensor to infer the output shape. + out_shape = torch.zeros(1).expand(in_oper.shape).reshape(shape).shape + out_oper = in_oper._replace( + shape=out_shape, dim_order=DimOrder.PRESUMED_CONTIGUOUS + ) + + inputs = [None] * 2 + inputs[0] = in_id + inputs[1] = self.add_immediate_int_vector(shape) + + outputs = [None] * 1 + outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper) + + self.add_operation(NNAPI_OperationCode.RESHAPE, inputs, outputs) + + def add_flatten(self, node): + assert node.inputsSize() == 3 + assert node.outputsSize() == 1 + + in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) + + start_ctype, start_dim = self.get_constant_value(node.inputsAt(1), "IntType") + end_ctype, end_dim = self.get_constant_value(node.inputsAt(2), "IntType") + + # channels last with channels == 1 or (height & width both 1) + is_trivial_flatten = len(in_oper.shape) == 4 and ( + in_oper.shape[1] == 1 or (in_oper.shape[2] == 1 and in_oper.shape[3] == 1) + ) + if in_oper.dim_order != DimOrder.PRESUMED_CONTIGUOUS and not is_trivial_flatten: + raise Exception( + "Currently, flatten is not supported on NHWC tensors unless C=1 or H=W=1" + ) + + if start_dim < 0: + start_dim += len(in_oper.shape) + if end_dim < 0: + end_dim += len(in_oper.shape) + + out_shape = ( + in_oper.shape[:start_dim] + + ( + functools.reduce( + lambda x, y: x * y, in_oper.shape[start_dim : end_dim + 1] + ), + ) + + in_oper.shape[end_dim + 1 :] + ) + + if any(dim == 0 for dim in in_oper.shape[start_dim : end_dim + 1]): + raise Exception("Flattening flexible dims is not supported yet") + non_flattened_dims = in_oper.shape[:start_dim] + in_oper.shape[end_dim + 1 :] + if non_flattened_dims.count(0) > 1: + raise Exception("Only 1 dim can be flexible") + + out_oper = in_oper._replace( + shape=out_shape, dim_order=DimOrder.PRESUMED_CONTIGUOUS + ) + out_id = self.add_tensor_operand(node.outputsAt(0), out_oper) + + for idx, dim in enumerate(out_shape): + if dim == 0: + self.forward_operand_shape(out_id, idx, in_id, in_oper.shape.index(0)) + + inputs_1 = tuple(dim if dim != 0 else -1 for dim in out_shape) + inputs = [None] * 2 + inputs[0] = in_id + inputs[1] = self.add_immediate_int_vector(inputs_1) + + outputs = [None] * 1 + outputs[0] = out_id + + self.add_operation(NNAPI_OperationCode.RESHAPE, inputs, outputs) + + def add_slice(self, node): + assert node.inputsSize() == 5 + assert node.outputsSize() == 1 + + in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) + _, dim_value = self.get_constant_value(node.inputsAt(1)) + _, start_value = self.get_constant_value(node.inputsAt(2)) + _, stop_value = self.get_constant_value(node.inputsAt(3)) + _, step_value = self.get_constant_value(node.inputsAt(4)) + + if start_value is None: + start_value = 0 + if stop_value is None: + stop_value = sys.maxsize + + if start_value < 0: + start_value += in_oper.shape[dim_value] + elif start_value == sys.maxsize: + start_value = 0 + + if start_value == 0 and stop_value == sys.maxsize: + self._identity(node) + return + + if in_oper.shape[dim_value] == 0: + raise Exception("Unable to slice with flexible shape") + + if stop_value < 0: + stop_value += in_oper.shape[dim_value] + elif stop_value == sys.maxsize: + stop_value = in_oper.shape[dim_value] + + if start_value >= stop_value: + raise Exception("Slice start value should be less than stop value") + + out_len = (stop_value - start_value) // step_value + out_shape = tuple( + out_len if i == dim_value else dim for i, dim in enumerate(in_oper.shape) + ) + out_id = self.add_tensor_operand( + node.outputsAt(0), in_oper._replace(shape=out_shape) + ) + + # flex inputs + end_mask = 0 + for idx, dim in enumerate(out_shape): + if dim == 0: + self.forward_operand_shape(out_id, idx, in_id, idx) + end_mask |= 1 << idx + + inputs = [None] * 7 + inputs[0] = in_id + inputs[1] = self.add_immediate_int_vector( + [start_value if i == dim_value else 0 for i in range(len(in_oper.shape))] + ) + inputs[2] = self.add_immediate_int_vector( + [ + stop_value if i == dim_value else dim + for i, dim in enumerate(in_oper.shape) + ] + ) + inputs[3] = self.add_immediate_int_vector( + [step_value if i == dim_value else 1 for i in range(len(in_oper.shape))] + ) + inputs[4] = self.add_immediate_int_scalar(0) # begin mask + inputs[5] = self.add_immediate_int_scalar(end_mask) + inputs[6] = self.add_immediate_int_scalar(0) # shrink axis mas + + outputs = [None] * 1 + outputs[0] = out_id + + self.add_operation(NNAPI_OperationCode.STRIDED_SLICE, inputs, outputs) + + def add_size(self, node): + assert node.inputsSize() == 2 + assert node.outputsSize() == 1 + + _, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) + _, value = self.constants[node.inputsAt(1)] + res = in_oper.shape[value] + output = node.outputsAt(0) + self.add_constant_value(output, output.type(), res) + + def add_cat(self, node): + assert node.inputsSize() == 2 + assert node.outputsSize() == 1 + + tensors = self.tensor_sequences[node.inputsAt(0)] + _, dim = self.get_constant_value(node.inputsAt(1), "IntType") + + assert len(tensors) > 0 + in_ids = [] + out_oper = None + out_dim_size = 0 + for inp in tensors: + in_id, in_oper = self.get_tensor_operand_by_jitval(inp) + if out_oper is None: + out_shape = change_element(in_oper.shape, dim, -1) + out_oper = in_oper._replace(shape=out_shape) + assert in_oper.op_type == out_oper.op_type + assert in_oper.dim_order == out_oper.dim_order + assert change_element(in_oper.shape, dim, -1) == change_element( + out_oper.shape, dim, -1 + ) + # TODO: Possibly check scale and zero point. + in_ids.append(in_id) + # TODO: Possibly support variable-sized inputs. + out_dim_size += in_oper.shape[dim] + + assert out_oper is not None + out_oper = out_oper._replace( + shape=change_element(out_oper.shape, dim, out_dim_size) + ) + + if in_oper.dim_order == DimOrder.CHANNELS_LAST: + assert len(out_oper.shape) == 4 + nnapi_dim = [0, 3, 1, 2][dim] + else: + nnapi_dim = dim + + out_id = self.add_tensor_operand(node.outputsAt(0), out_oper) + for idx, d in enumerate(out_oper.shape): + if d == 0: + if idx == dim: + shape = " + ".join(flex_name(ip_id, dim) for ip_id in in_ids) + self.compute_operand_shape(out_id, idx, shape) + else: + self.forward_operand_shape(out_id, idx, in_ids[0], idx) + + inputs = in_ids + [self.add_immediate_int_scalar(nnapi_dim)] + + outputs = [None] * 1 + outputs[0] = out_id + + self.add_operation(NNAPI_OperationCode.CONCATENATION, inputs, outputs) + + def add_mean(self, node): + assert node.inputsSize() == 4 + assert node.outputsSize() == 1 + + in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) + dim_ctype, dim = self.get_constant_value(node.inputsAt(1)) + assert dim_ctype.kind() == "ListType" + assert dim_ctype.getElementType().kind() == "IntType" + _, keep_dim = self.get_constant_value(node.inputsAt(2), "BoolType") + # Expect None for dtype + self.get_constant_value(node.inputsAt(3), "NoneType") + + if in_oper.dim_order == DimOrder.CHANNELS_LAST: + assert len(in_oper.shape) == 4 + nnapi_dim = [[0, 3, 1, 2][d] for d in dim] + else: + nnapi_dim = dim + + collapsed_dims = set() + for d in dim: + if d < 0: + d += len(in_oper.shape) + collapsed_dims.add(d) + + if in_oper.dim_order == DimOrder.CHANNELS_LAST and not keep_dim: + assert collapsed_dims.issuperset({2, 3}) + out_dim_order = DimOrder.PRESUMED_CONTIGUOUS + else: + out_dim_order = in_oper.dim_order + + out_shape = [] + for i, s in enumerate(in_oper.shape): + if i not in collapsed_dims: + out_shape.append(s) + elif keep_dim: + out_shape.append(1) + + out_oper = in_oper._replace(shape=out_shape, dim_order=out_dim_order) + + inputs = [None] * 3 + inputs[0] = in_id + inputs[1] = self.add_immediate_int_vector(nnapi_dim) + inputs[2] = self.add_immediate_int_scalar(keep_dim) + + outputs = [None] * 1 + outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper) + + self.add_operation(NNAPI_OperationCode.MEAN, inputs, outputs) + + def add_quantize(self, node): + assert node.inputsSize() == 4 + assert node.outputsSize() == 1 + + in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) + if in_oper.dim_order != DimOrder.CHANNELS_LAST: + raise Exception( + "Most hardware backends prefer NHWC quantized tensors. " + "Try setting `t.nnapi_nhwc = True` on your tensor inputs. " + ) + _, scale = self.get_constant_value(node.inputsAt(1), "FloatType") + _, zero_point = self.get_constant_value(node.inputsAt(2), "IntType") + _, scalar_type = self.get_constant_value(node.inputsAt(3), "IntType") + if scalar_type != TorchScalarTypes.QUINT8.value: + raise Exception( + "PyTorch NNAPI export only supports quantized tensors " + "with the quint8 dtype." + ) + op_type = NNAPI_OperandCode.TENSOR_QUANT8_ASYMM + + out_oper = in_oper._replace( + op_type=op_type, + scale=scale, + zero_point=zero_point, + ) + + inputs = [None] * 1 + inputs[0] = in_id + + outputs = [None] * 1 + outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper) + + self.add_operation(NNAPI_OperationCode.QUANTIZE, inputs, outputs) + + def add_dequantize(self, node): + assert node.inputsSize() == 1 + assert node.outputsSize() == 1 + + in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) + out_oper = in_oper._replace( + op_type=NNAPI_OperandCode.TENSOR_FLOAT32, + scale=0.0, + zero_point=0, + ) + + inputs = [None] * 1 + inputs[0] = in_id + + outputs = [None] * 1 + outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper) + + self.add_operation(NNAPI_OperationCode.DEQUANTIZE, inputs, outputs) + + def add_pointwise_simple_unary_op(self, node, opcode): + assert node.inputsSize() == 1 + assert node.outputsSize() == 1 + + in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) + + out_oper = in_oper + if opcode == NNAPI_OperationCode.LOGISTIC: + # NNAPI docs: For ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, the scale + # must be 1.f / 256 and the zeroPoint must be 0. + # https://fburl.com/h52stoog + if in_oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM: + out_oper = in_oper._replace(zero_point=0, scale=1.0 / 256) + + out_id = self.add_tensor_operand(node.outputsAt(0), out_oper) + + for idx, dim in enumerate(in_oper.shape): + if dim == 0: + self.forward_operand_shape(out_id, idx, in_id, idx) + + inputs = [None] * 1 + inputs[0] = in_id + + outputs = [None] * 1 + outputs[0] = out_id + + self.add_operation(opcode, inputs, outputs) + + def _do_add_binary(self, node, opcode, fuse_code, *, qparams=None): # noqa: D401 + """Helper for pointwise binary broadcast ops with superfluous extra args.""" + assert node.outputsSize() == 1 + + assert node.inputsAt(0).type().kind() == "TensorType" + assert node.inputsAt(1).type().kind() == "TensorType" + + if self.has_operand_for_jitval(node.inputsAt(0)): + in0_id, in0_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) + in1_id, in1_oper = self.get_tensor_operand_or_constant( + node.inputsAt(1), in0_oper.dim_order + ) + elif self.has_operand_for_jitval(node.inputsAt(1)): + in1_id, in1_oper = self.get_tensor_operand_by_jitval(node.inputsAt(1)) + in0_id, in0_oper = self.get_tensor_operand_or_constant( + node.inputsAt(0), in1_oper.dim_order + ) + else: + raise Exception(f"Can't do a NNAPI binary op: {opcode} on two constants") + + assert in0_oper.op_type == in1_oper.op_type + in0_id, in0_oper, in1_id, in1_oper = self.transpose_for_broadcast( + in0_id, in0_oper, in1_id, in1_oper + ) + # NOTE: PyTorch and NNAPI have the same broadcast semantics. + out_shape = broadcast_shapes(in0_oper.shape, in1_oper.shape) + out_oper = in0_oper._replace(shape=out_shape) + if qparams is not None: + scale, zp = qparams + out_oper = out_oper._replace(scale=scale, zero_point=zp) + + out_id = self.add_tensor_operand(node.outputsAt(0), out_oper) + for idx, (d0, d1) in enumerate(zip(in0_oper.shape, in1_oper.shape)): + if d0 == 1 and d1 == 0: + self.forward_operand_shape(out_id, idx, in1_id, idx) + elif d0 == 0 and d1 == 1: + self.forward_operand_shape(out_id, idx, in0_id, idx) + elif d0 == 0 and d1 == 0: + self.flexible_shape_computation_lines.append( + f"assert {flex_name(in0_id, idx)} == {flex_name(in1_id, idx)}" + ) + self.forward_operand_shape(out_id, idx, in0_id, idx) + + inputs = [None] * 3 + inputs[0] = in0_id + inputs[1] = in1_id + inputs[2] = self.add_immediate_int_scalar(fuse_code) + + outputs = [None] * 1 + outputs[0] = out_id + + self.add_operation(opcode, inputs, outputs) + + def add_pointwise_simple_binary_broadcast_op(self, node, opcode, fuse_code): + assert node.inputsSize() == 2 + self._do_add_binary(node, opcode, fuse_code) + + def add_add_sub_op(self, node, opcode, fuse_code): + assert node.inputsSize() == 3 + + _, alpha = self.get_constant_value(node.inputsAt(2), "IntType") + if alpha != 1: + raise Exception("NNAPI does not support add/sub with alpha.") + + self._do_add_binary(node, opcode, fuse_code) + + def add_qadd(self, node, opcode, fuse_code): + assert node.inputsSize() == 4 + + _, scale = self.get_constant_value(node.inputsAt(2), "FloatType") + _, zero_point = self.get_constant_value(node.inputsAt(3), "IntType") + + self._do_add_binary(node, opcode, fuse_code, qparams=(scale, zero_point)) + + def add_softmax(self, node): + assert node.inputsSize() == 3 + in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) + + _, softmax_dim = self.get_constant_value(node.inputsAt(1), "IntType") + + out_id = self.add_tensor_operand(node.outputsAt(0), in_oper) + for dim, size in enumerate(in_oper.shape): + if size == 0: + self.forward_operand_shape(out_id, dim, in_id, dim) + + inputs = [None] * 3 + inputs[0] = in_id + inputs[1] = self.add_immediate_float_scalar( + 1.0 + ) # positive scaling factor of exponent, beta + inputs[2] = self.add_immediate_int_scalar(softmax_dim) + + outputs = [None] * 1 + outputs[0] = out_id + + self.add_operation(NNAPI_OperationCode.SOFTMAX, inputs, outputs) + + def add_hardtanh(self, node): + assert node.inputsSize() == 3 + assert node.outputsSize() == 1 + + in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) + _, min_val = self.get_constant_value(node.inputsAt(1), "FloatType") + _, max_val = self.get_constant_value(node.inputsAt(2), "FloatType") + + op_map = { + (-1, 1): NNAPI_OperationCode.RELU1, + (0, 6): NNAPI_OperationCode.RELU6, # noqa: E201 + } + + opcode = op_map.get((min_val, max_val)) + if opcode is None: + raise Exception("NNAPI only supports hardtanh with args (-1, 1) or (0, 6).") + + inputs = [None] * 1 + inputs[0] = in_id + + outputs = [None] * 1 + outputs[0] = self.add_tensor_operand(node.outputsAt(0), in_oper) + + self.add_operation(opcode, inputs, outputs) + + def add_prelu_op(self, node): + assert node.inputsSize() == 2 + assert node.outputsSize() == 1 + + assert node.inputsAt(0).type().kind() == "TensorType" + assert node.inputsAt(1).type().kind() == "TensorType" + + in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) + w_id, w_oper = self.get_tensor_operand_for_weight(node.inputsAt(1)) + assert len(w_oper.shape) == 1 + assert w_oper.shape[0] > 0 + if w_oper.shape[0] > 1: + if in_oper.use_nchw(): + # TODO: Support this by adding trailing 1 dims. + raise Exception( + "Per-channel PReLU only supports channels_last right now." + ) + + out_id = self.add_tensor_operand(node.outputsAt(0), in_oper) + for dim, size in enumerate(in_oper.shape): + if size > 0: + pass + elif dim <= 1: + raise Exception("PReLU requires fixed size for dim 0 and dim 1.") + else: + self.forward_operand_shape(out_id, dim, in_id, dim) + + inputs = [None] * 2 + inputs[0] = in_id + inputs[1] = w_id + + outputs = [None] * 1 + outputs[0] = out_id + + self.add_operation(NNAPI_OperationCode.PRELU, inputs, outputs) + + def add_pool2d_node(self, node, opcode): + assert node.inputsSize() == 6 + assert node.outputsSize() == 1 + image, kernel, stride, padding, dilation, ceil_mode = node.inputs() + + stride = stride or kernel + + # TODO: Validate ceil_mode semantics. + + args = self.get_conv_pool_args_2d_from_jit( + self.get_size_arg(kernel), stride, padding, dilation + ) + if args.dilation_h != 1 or args.dilation_w != 1: + raise Exception("NNAPI does not support dilated pooling.") + + image_id, image_oper = self.get_tensor_operand_by_jitval_fixed_size(image) + assert len(image_oper.shape) == 4 + + out_shape = get_conv_pool_shape( + image_oper.shape, args, image_oper.shape[1], False + ) + use_nchw = image_oper.use_nchw() + + inputs = [None] * 11 + inputs[0] = image_id + inputs[1] = self.add_immediate_int_scalar(args.pad_l) + inputs[2] = self.add_immediate_int_scalar(args.pad_r) + inputs[3] = self.add_immediate_int_scalar(args.pad_t) + inputs[4] = self.add_immediate_int_scalar(args.pad_b) + inputs[5] = self.add_immediate_int_scalar(args.stride_w) + inputs[6] = self.add_immediate_int_scalar(args.stride_h) + inputs[7] = self.add_immediate_int_scalar(args.kernel_w) + inputs[8] = self.add_immediate_int_scalar(args.kernel_h) + inputs[9] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE) + inputs[10] = self.add_immediate_bool_scalar(use_nchw) + + outputs = [None] * 1 + outputs[0] = self.add_tensor_operand( + node.outputsAt(0), image_oper._replace(shape=out_shape) + ) + + self.add_operation(opcode, inputs, outputs) + + def add_avg_pool2d(self, node): + assert node.inputsSize() == 7 + assert node.outputsSize() == 1 + ( + image, + kernel, + stride, + padding, + ceil_mode, + count_include_pad, + divisor_override, + ) = node.inputs() + + _, count_include_pad_value = self.get_constant_value(count_include_pad) + _, divisor_override_value = self.get_constant_value(divisor_override) + if not count_include_pad_value or divisor_override_value: + raise Exception( + "NNAPI doesn't support count_include_pad=False or divisor_override" + ) + + args = self.get_conv_pool_args_2d_from_jit( + self.get_size_arg(kernel), stride, padding + ) + + image_id, image_oper = self.get_tensor_operand_by_jitval(image) + assert len(image_oper.shape) == 4 + + out_shape = get_conv_pool_shape( + image_oper.shape, args, image_oper.shape[1], False + ) + use_nchw = image_oper.use_nchw() + + inputs = [None] * 11 + inputs[0] = image_id + inputs[1] = self.add_immediate_int_scalar(args.pad_l) + inputs[2] = self.add_immediate_int_scalar(args.pad_r) + inputs[3] = self.add_immediate_int_scalar(args.pad_t) + inputs[4] = self.add_immediate_int_scalar(args.pad_b) + inputs[5] = self.add_immediate_int_scalar(args.stride_w) + inputs[6] = self.add_immediate_int_scalar(args.stride_h) + inputs[7] = self.add_immediate_int_scalar(args.kernel_w) + inputs[8] = self.add_immediate_int_scalar(args.kernel_h) + inputs[9] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE) + inputs[10] = self.add_immediate_bool_scalar(use_nchw) + + outputs = [None] * 1 + out_id = self.add_tensor_operand( + node.outputsAt(0), image_oper._replace(shape=out_shape) + ) + self._handle_conv_pool_flexible_input(out_id, image, args, False) + outputs[0] = out_id + + self.add_operation(NNAPI_OperationCode.AVERAGE_POOL_2D, inputs, outputs) + + def add_adaptive_avg_pool2d(self, node): + assert node.inputsSize() == 2 + assert node.outputsSize() == 1 + + image_id, image_oper = self.get_tensor_operand_by_jitval_fixed_size( + node.inputsAt(0) + ) + assert len(image_oper.shape) == 4 + + size_ctype, size_arg = self.get_constant_value(node.inputsAt(1)) + assert size_ctype.kind() == "ListType" + assert size_ctype.getElementType().kind() == "IntType" + if size_arg != [1, 1]: + raise Exception( + "NNAPI only supports adaptive_avg_pool2d with output size (1, 1)." + ) + + out_shape = image_oper.shape[0:2] + tuple(size_arg) + use_nchw = image_oper.use_nchw() + + inputs = [None] * 11 + inputs[0] = image_id + inputs[1] = self.add_immediate_int_scalar(0) + inputs[2] = self.add_immediate_int_scalar(0) + inputs[3] = self.add_immediate_int_scalar(0) + inputs[4] = self.add_immediate_int_scalar(0) + inputs[5] = self.add_immediate_int_scalar(1) + inputs[6] = self.add_immediate_int_scalar(1) + inputs[7] = self.add_immediate_int_scalar(image_oper.shape[3]) + inputs[8] = self.add_immediate_int_scalar(image_oper.shape[2]) + inputs[9] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE) + inputs[10] = self.add_immediate_bool_scalar(use_nchw) + + outputs = [None] * 1 + outputs[0] = self.add_tensor_operand( + node.outputsAt(0), image_oper._replace(shape=out_shape) + ) + + self.add_operation(NNAPI_OperationCode.AVERAGE_POOL_2D, inputs, outputs) + + def add_upsample_nearest2d(self, node): + assert node.inputsSize() == 3 or node.inputsSize() == 4 + assert node.outputsSize() == 1 + if node.inputsSize() == 3: + image, size_jit, scale_jit = node.inputs() + else: + image, size_jit, scale_h_jit, scale_w_jit = node.inputs() + size_ctype, size_arg = self.get_constant_value(size_jit) + + if node.inputsSize() == 3: + scale_ctype, scale_arg = self.get_constant_value(scale_jit) + else: + scale_h_ctype, scale_h_arg = self.get_constant_value(scale_h_jit) + scale_w_ctype, scale_w_arg = self.get_constant_value(scale_w_jit) + + # The only way for the 4-argument overload of upsample_nearest2d to + # have been added to the graph without error is if the scale_h and + # scale_w arguments are None + assert scale_h_ctype.kind() == "NoneType" + assert scale_w_ctype.kind() == "NoneType" + + scale_ctype = scale_h_ctype + scale_arg = scale_h_arg + + image_id, image_oper = self.get_tensor_operand_by_jitval(image) + assert len(image_oper.shape) == 4 + + if size_ctype.kind() != "NoneType" and scale_ctype.kind() != "NoneType": + raise Exception("Size and scale cannot both be non-None.") + elif size_ctype.kind() != "NoneType": + assert size_ctype.kind() == "ListType" + assert size_ctype.getElementType().kind() == "IntType" + assert scale_ctype.kind() == "NoneType" + assert scale_arg is None + assert isinstance(size_arg, list) + assert size_arg + assert all(isinstance(val, int) for val in size_arg) + if len(size_arg) == 1: + size_arg = size_arg * 2 + assert len(size_arg) == 2 + out_h = size_arg[0] + out_w = size_arg[1] + arg_h = self.add_immediate_int_scalar(out_h) + arg_w = self.add_immediate_int_scalar(out_w) + elif scale_ctype.kind() != "NoneType": + assert scale_ctype.kind() == "ListType" + assert scale_ctype.getElementType().kind() == "FloatType" + assert size_ctype.kind() == "NoneType" + assert size_arg is None + assert isinstance(scale_arg, list) + assert scale_arg + assert all(isinstance(val, float) for val in scale_arg) + if len(scale_arg) == 1: + scale_arg = scale_arg * 2 + assert len(scale_arg) == 2 + out_h = int(scale_arg[0] * image_oper.shape[2]) + out_w = int(scale_arg[1] * image_oper.shape[3]) + arg_h = self.add_immediate_float_scalar(scale_arg[0]) + arg_w = self.add_immediate_float_scalar(scale_arg[1]) + else: + raise Exception("Size and scale cannot both be None.") + + out_shape = (image_oper.shape[0], image_oper.shape[1], out_h, out_w) + use_nchw = image_oper.use_nchw() + out_id = self.add_tensor_operand( + node.outputsAt(0), image_oper._replace(shape=out_shape) + ) + + if image_oper.shape[0] == 0 or image_oper.shape[1] == 0: + raise Exception("Flexible batch or channels not supported") + + # Handle variable input size + for dim in (2, 3): # h, w indices + if image_oper.shape[dim] == 0: + if size_ctype.kind() != "NoneType": + self.compute_operand_shape(out_id, dim, size_arg[dim - 2]) + elif scale_ctype.kind() != "NoneType": + self.compute_operand_shape( + out_id, + dim, + f"int({scale_arg[dim - 2]} * {flex_name(image_id, dim)})", + ) + else: + raise Exception("Size and scale cannot both be None.") + + inputs = [None] * 4 + inputs[0] = image_id + inputs[1] = arg_w + inputs[2] = arg_h + inputs[3] = self.add_immediate_bool_scalar(use_nchw) + + outputs = [None] * 1 + outputs[0] = out_id + + self.add_operation(NNAPI_OperationCode.RESIZE_NEAREST_NEIGHBOR, inputs, outputs) + + def add_addmm(self, node): + assert node.inputsSize() == 5 + assert node.outputsSize() == 1 + jit_bias, jit_input, jit_weight, jit_beta, jit_alpha = node.inputs() + + for jitval in (jit_beta, jit_alpha): + scale_ctype, scale_value = self.get_constant_value(jitval) + assert scale_ctype.kind() in ("IntType", "FloatType") + if scale_value != 1: + raise Exception( + "NNAPI Fully-Connected does not support alpha and beta." + ) + + self.add_addmm_or_linear(node, True, jit_input, jit_weight, jit_bias) + + def add_linear(self, node): + assert node.inputsSize() == 3 + assert node.outputsSize() == 1 + jit_input, jit_weight, jit_bias = node.inputs() + + self.add_addmm_or_linear(node, False, jit_input, jit_weight, jit_bias) + + def add_addmm_or_linear( + self, node, transpose_weight, jit_input, jit_weight, jit_bias + ): + input_id, input_oper = self.get_tensor_operand_by_jitval(jit_input) + bias_id, bias_oper = self.get_tensor_operand_for_weight(jit_bias) + + assert len(input_oper.shape) == 2 + assert len(bias_oper.shape) == 1 + + # TODO: Transform at load time to share weights with CPU model. + _, weight_tensor = self.get_constant_value(jit_weight, "TensorType") + assert len(weight_tensor.shape) == 2 + if transpose_weight: + nnapi_weight_tensor = weight_tensor.t().contiguous() + else: + nnapi_weight_tensor = weight_tensor.contiguous() + weight_id = self.add_tensor_operand_for_weight(nnapi_weight_tensor) + weight_oper = self.operands[weight_id] + + out_shape = (input_oper.shape[0], weight_oper.shape[0]) + out_id = self.add_tensor_operand( + node.outputsAt(0), input_oper._replace(shape=out_shape) + ) + + if input_oper.shape[0] == 0: + self.forward_operand_shape(out_id, 0, input_id, 0) + + inputs = [None] * 4 + inputs[0] = input_id + inputs[1] = weight_id + inputs[2] = bias_id + inputs[3] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE) + + outputs = [None] * 1 + outputs[0] = out_id + + self.add_operation(NNAPI_OperationCode.FULLY_CONNECTED, inputs, outputs) + + def add_qlinear(self, node): + assert node.inputsSize() == 4 + assert node.outputsSize() == 1 + ( + jit_input, + jit_packed_weight, + jit_scale, + jit_zero_point, + ) = node.inputs() + + input_id, input_oper = self.get_tensor_operand_by_jitval_fixed_size(jit_input) + # TODO: Support automatic reshape + assert len(input_oper.shape) == 2 + + _, out_scale = self.get_constant_value(jit_scale, "FloatType") + _, out_zero_point = self.get_constant_value(jit_zero_point, "IntType") + weight_ctype, packed_weight = self.get_constant_value(jit_packed_weight) + assert weight_ctype.name() == "LinearPackedParamsBase" + raw_weight, raw_bias = packed_weight.__getstate__()[0] + assert raw_bias is not None + + assert len(raw_weight.shape) == 2 + assert len(raw_bias.shape) == 1 + assert raw_bias.shape[0] == raw_weight.shape[0] + assert raw_weight.shape[1] == input_oper.shape[1] + + assert raw_weight.qscheme() == torch.per_tensor_affine + if raw_weight.dtype == torch.quint8: + unsigned_weight = raw_weight + else: + assert raw_weight.dtype == torch.qint8 + unsigned_weight = torch._make_per_tensor_quantized_tensor( + (raw_weight.int_repr().int() + 128).to(torch.uint8), + scale=raw_weight.q_scale(), + zero_point=raw_weight.q_zero_point() + 128, + ) + weight_scale = unsigned_weight.q_scale() + bias_scale = input_oper.scale * weight_scale + int_bias = torch.quantize_per_tensor(raw_bias, bias_scale, 0, torch.qint32) + bias_id = self.add_tensor_operand_for_weight(int_bias) + + multiplier = input_oper.scale * weight_scale / out_scale + assert multiplier > 0 + if multiplier >= 1: + raise Exception( + "Quantized convolution multiplier is greater than 1. " + "This is supported by NNAPI, but not by most hardware backends. " + "Try training a model without quantization-aware training. " + ) + + # TODO: Transform at load time to share weights with CPU model. + nnapi_weight_tensor = unsigned_weight.contiguous() + weight_id = self.add_tensor_operand_for_weight(nnapi_weight_tensor) + weight_oper = self.operands[weight_id] + + out_shape = (input_oper.shape[0], weight_oper.shape[0]) + out_oper = input_oper._replace( + shape=out_shape, + scale=out_scale, + zero_point=out_zero_point, + ) + + inputs = [None] * 4 + inputs[0] = input_id + inputs[1] = weight_id + inputs[2] = bias_id + inputs[3] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE) + + outputs = [None] * 1 + outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper) + + self.add_operation(NNAPI_OperationCode.FULLY_CONNECTED, inputs, outputs) + + def get_optional_bias(self, jit_bias, weight_tensor, transpose=False): + ctype, value = self.get_constant_value(jit_bias) + if ctype.kind() == "NoneType": + bias_idx = 1 if transpose else 0 + nnapi_bias_tensor = torch.zeros( + weight_tensor.size()[bias_idx], dtype=weight_tensor.dtype + ) + bias_id = self.add_tensor_operand_for_weight(nnapi_bias_tensor) + bias_oper = self.operands[bias_id] + return bias_id, bias_oper + else: + return self.get_tensor_operand_for_weight(jit_bias) + + def add_conv2d(self, node): + assert node.inputsSize() == 7 + assert node.outputsSize() == 1 + + ( + jit_image, + jit_weight, + jit_bias, + jit_stride, + jit_pad, + jit_dilation, + jit_groups, + ) = node.inputs() + + _, weight_tensor = self.get_constant_value(jit_weight, "TensorType") + bias_id, bias_oper = self.get_optional_bias(jit_bias, weight_tensor) + args = self.get_conv_pool_args_2d_from_jit( + weight_tensor.shape[2:4], jit_stride, jit_pad, jit_dilation, jit_groups + ) + + return self.add_conv2d_common( + node.outputsAt(0), + 0.0, + 0, + jit_image, + weight_tensor, + bias_id, + args, + False, # transpose + NNAPI_FuseCode.FUSED_NONE, + ) + + def add_conv_underscore(self, node): + assert node.inputsSize() == 13 + assert node.outputsSize() == 1 + + ( + jit_image, + jit_weight, + jit_bias, + jit_stride, + jit_pad, + jit_dilation, + jit_transpose, + _, + jit_groups, + _, + _, + _, + _, + ) = node.inputs() + + _, weight_tensor = self.get_constant_value(jit_weight, "TensorType") + _, transpose = self.get_constant_value(jit_transpose) + bias_id, bias_oper = self.get_optional_bias(jit_bias, weight_tensor, transpose) + args = self.get_conv_pool_args_2d_from_jit( + weight_tensor.shape[2:4], jit_stride, jit_pad, jit_dilation, jit_groups + ) + + return self.add_conv2d_common( + node.outputsAt(0), + 0.0, + 0, + jit_image, + weight_tensor, + bias_id, + args, + transpose, + NNAPI_FuseCode.FUSED_NONE, + ) + + def add_log_softmax(self, node): + assert node.inputsSize() == 3 + assert node.outputsSize() == 1 + + (jit_input, jit_dim, jit_half_to_float) = node.inputs() + input_id, input_oper = self.get_tensor_operand_by_jitval_fixed_size(jit_input) + _, dim = self.get_constant_value(jit_dim, "IntType") + + out_shape = input_oper.shape + + inputs = [None] * 3 + inputs[0] = input_id + # specifying 1 as the scaling factor for the exponent, beta + inputs[1] = self.add_immediate_float_scalar(1) + inputs[2] = self.add_immediate_int_scalar(dim) + + outputs = [None] * 1 + outputs[0] = self.add_tensor_operand( + node.outputsAt(0), input_oper._replace(shape=out_shape) + ) + self.add_operation(NNAPI_OperationCode.LOG_SOFTMAX, inputs, outputs) + + def add_qconv2d(self, node, fuse_code, transpose=False): + assert node.inputsSize() == 4 + assert node.outputsSize() == 1 + + ( + jit_image, + jit_packed_weight, + jit_scale, + jit_zero_point, + ) = node.inputs() + + _, out_scale = self.get_constant_value(jit_scale, "FloatType") + _, out_zero_point = self.get_constant_value(jit_zero_point, "IntType") + weight_ctype, packed_weight = self.get_constant_value(jit_packed_weight) + assert weight_ctype.name() == "Conv2dPackedParamsBase" + ( + pack_version, + tensors, + opt_tensors, + ) = packed_weight.__getstate__()[0] + assert pack_version == "2" + packed_config, raw_weight = tensors + (raw_bias,) = opt_tensors + assert raw_bias is not None + args = self.get_conv_pool_args_2d_from_pack( + raw_weight.shape[2:4], packed_config + ) + + assert raw_weight.qscheme() == torch.per_tensor_affine + if raw_weight.dtype == torch.quint8: + unsigned_weight = raw_weight + else: + assert raw_weight.dtype == torch.qint8 + unsigned_weight = torch._make_per_tensor_quantized_tensor( + (raw_weight.int_repr().int() + 128).to(torch.uint8), + scale=raw_weight.q_scale(), + zero_point=raw_weight.q_zero_point() + 128, + ) + weight_scale = unsigned_weight.q_scale() + _, image_oper = self.get_tensor_operand_by_jitval(jit_image) + bias_scale = image_oper.scale * weight_scale + int_bias = torch.quantize_per_tensor(raw_bias, bias_scale, 0, torch.qint32) + bias_id = self.add_tensor_operand_for_weight(int_bias) + + multiplier = image_oper.scale * weight_scale / out_scale + assert multiplier > 0 + if multiplier >= 1: + raise Exception( + "Quantized convolution multiplier is greater than 1. " + "This is supported by NNAPI, but not by most hardware backends. " + "Try training a model without quantization-aware training. " + ) + + return self.add_conv2d_common( + node.outputsAt(0), + out_scale, + out_zero_point, + jit_image, + unsigned_weight, + bias_id, + args, + transpose, + fuse_code, + ) + + def add_conv2d_common( + self, + jit_out, + out_scale, + out_zero_point, + jit_image, + weight_tensor, + bias_id, + args, + transpose, + fuse_code, + ): + image_id, image_oper = self.get_tensor_operand_by_jitval(jit_image) + in_c = image_oper.shape[1] + + if args.group == 1: + # Full convolution + depthwise = False + if transpose: + weight_permutation = (1, 2, 3, 0) + else: + weight_permutation = (0, 2, 3, 1) + elif args.group == in_c: + # Depthwise convolution + depthwise = True + weight_permutation = (1, 2, 3, 0) + else: + raise Exception("Group convolution not supported yet.") + + # TODO: Transform at load time to share weights with CPU model. + nnapi_weight_tensor = weight_tensor.permute(*weight_permutation).contiguous() + weight_id = self.add_tensor_operand_for_weight(nnapi_weight_tensor) + weight_oper = self.operands[weight_id] + + bias_oper = self.operands[bias_id] + + if image_oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32: + assert weight_oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32 + assert bias_oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32 + elif image_oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM: + assert weight_oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM + assert bias_oper.op_type == NNAPI_OperandCode.TENSOR_INT32 + assert approx_equal(image_oper.scale * weight_oper.scale, bias_oper.scale) + assert bias_oper.zero_point == 0 + else: + raise Exception(f"Unsupported input type for conv2d: {image_oper.op_type}") + + assert len(image_oper.shape) == 4 + assert len(weight_oper.shape) == 4 + assert len(bias_oper.shape) == 1 + + if depthwise: + # Depthwise convolution + one, kern_h, kern_w, out_c = weight_oper.shape + assert one == 1 + assert out_c % in_c == 0 + channel_multiplier = out_c // in_c + assert channel_multiplier == 1 # Don't support multiplier + assert out_c == in_c + else: + # Full convolution + out_c, kern_h, kern_w, kern_d = weight_oper.shape + assert kern_d == in_c + + assert out_c == bias_oper.shape[0] + + use_nchw = image_oper.use_nchw() + + if depthwise: + num_args = 12 + opcode = NNAPI_OperationCode.DEPTHWISE_CONV_2D + else: + num_args = 11 + if transpose: + opcode = NNAPI_OperationCode.TRANSPOSE_CONV_2D + else: + opcode = NNAPI_OperationCode.CONV_2D + + inputs = [None] * num_args + inputs[0] = image_id + inputs[1] = weight_id + inputs[2] = bias_id + inputs[3] = self.add_immediate_int_scalar(args.pad_l) + inputs[4] = self.add_immediate_int_scalar(args.pad_r) + inputs[5] = self.add_immediate_int_scalar(args.pad_t) + inputs[6] = self.add_immediate_int_scalar(args.pad_b) + inputs[7] = self.add_immediate_int_scalar(args.stride_w) + inputs[8] = self.add_immediate_int_scalar(args.stride_h) + if depthwise: + inputs[9] = self.add_immediate_int_scalar(1) + inputs[10] = self.add_immediate_int_scalar(fuse_code) + inputs[11] = self.add_immediate_bool_scalar(use_nchw) + else: + inputs[9] = self.add_immediate_int_scalar(fuse_code) + inputs[10] = self.add_immediate_bool_scalar(use_nchw) + + outputs = [None] * 1 + out_shape = get_conv_pool_shape(image_oper.shape, args, out_c, transpose) + out_oper = image_oper._replace( + shape=out_shape, + scale=out_scale, + zero_point=out_zero_point, + ) + out_id = self.add_tensor_operand(jit_out, out_oper) + self._handle_conv_pool_flexible_input(out_id, jit_image, args, transpose) + + outputs[0] = out_id + self.add_operation(opcode, inputs, outputs) + + def _handle_conv_pool_flexible_input(self, out_id, jit_image, args, transpose): + image_id, image_oper = self.get_tensor_operand_by_jitval(jit_image) + batch, in_ch, in_h, in_w = image_oper.shape + + if batch == 0: + self.forward_operand_shape(out_id, 0, image_id, 0) + if in_ch == 0: + raise Exception("Input channels can't be flexible") + # H & W + if transpose: + if in_h == 0: + self.compute_operand_shape( + out_id, + 2, + f"({flex_name(image_id, 2)} - 1) * {args.stride_h} + {args.kernel_h} - {args.pad_t} - {args.pad_b}", + ) + if in_w == 0: + self.compute_operand_shape( + out_id, + 3, + f"({flex_name(image_id, 3)} - 1) * {args.stride_w} + {args.kernel_w} - {args.pad_l} - {args.pad_r}", + ) + else: + if in_h == 0: + self.compute_operand_shape( + out_id, + 2, + f"({flex_name(image_id, 2)} - {args.kernel_h} + {args.pad_t} + {args.pad_b}) // {args.stride_h} + 1", + ) + if in_w == 0: + self.compute_operand_shape( + out_id, + 3, + f"({flex_name(image_id, 3)} - {args.kernel_w} + {args.pad_l} + {args.pad_r}) // {args.stride_w} + 1", + ) + + +def serialize_model( + module, inputs, *, config=None, return_shapes=None, use_int16_for_qint16=False +): + """Convert to NNAPI and serialize torchscript module. + + Parameters: + module: Torchscript module to convert + inputs: Tensors used to specify input details for NNAPI + config (optional): Optional config to attach to module + return_shapes (optional): Specify shape of outputs if + your module uses runtime flexible shapes to set output + buffer size for NNAPI + use_int16_for_qint16 (optional): Use Pytorch int16 to represent NNAPI qint16 values + """ + return _NnapiSerializer(config, use_int16_for_qint16).serialize_model( + module, inputs, return_shapes + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/cuda/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/backends/cuda/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..77ce755d14a982eb4298201e528f79a022986cf6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/backends/cuda/__init__.py @@ -0,0 +1,335 @@ +import contextlib + +from typing import Union + +import torch + +__all__ = [ + "is_built", + "cuFFTPlanCacheAttrContextProp", + "cuFFTPlanCache", + "cuFFTPlanCacheManager", + "cuBLASModule", + "preferred_linalg_library", + "cufft_plan_cache", + "matmul", + "SDPBackend", + "SDPAParams", + "enable_flash_sdp", + "flash_sdp_enabled", + "enable_mem_efficient_sdp", + "mem_efficient_sdp_enabled", + "math_sdp_enabled", + "enable_math_sdp", + "can_use_flash_attention", + "can_use_efficient_attention", + "sdp_kernel", +] + + +def is_built(): + r""" + Return whether PyTorch is built with CUDA support. + + Note that this doesn't necessarily mean CUDA is available; just that if this PyTorch + binary were run on a machine with working CUDA drivers and devices, we would be able to use it. + """ + return torch._C._has_cuda + + +class cuFFTPlanCacheAttrContextProp: + # Like regular ContextProp, but uses the `.device_index` attribute from the + # calling object as the first argument to the getter and setter. + def __init__(self, getter, setter): + self.getter = getter + self.setter = setter + + def __get__(self, obj, objtype): + return self.getter(obj.device_index) + + def __set__(self, obj, val): + if isinstance(self.setter, str): + raise RuntimeError(self.setter) + self.setter(obj.device_index, val) + + +class cuFFTPlanCache: + r""" + Represent a specific plan cache for a specific `device_index`. + + The attributes `size` and `max_size`, and method `clear`, can fetch and/ or + change properties of the C++ cuFFT plan cache. + """ + + def __init__(self, device_index): + self.device_index = device_index + + size = cuFFTPlanCacheAttrContextProp( + torch._cufft_get_plan_cache_size, + ".size is a read-only property showing the number of plans currently in the " + "cache. To change the cache capacity, set cufft_plan_cache.max_size.", + ) + + max_size = cuFFTPlanCacheAttrContextProp( + torch._cufft_get_plan_cache_max_size, torch._cufft_set_plan_cache_max_size + ) + + def clear(self): + return torch._cufft_clear_plan_cache(self.device_index) + + +class cuFFTPlanCacheManager: + r""" + Represent all cuFFT plan caches, return the cuFFTPlanCache for a given device when indexed. + + Finally, this object, when used directly as a `cuFFTPlanCache` object (e.g., + setting the `.max_size`) attribute, the current device's cuFFT plan cache is + used. + """ + + __initialized = False + + def __init__(self): + self.caches = [] + self.__initialized = True + + def __getitem__(self, device): + index = torch.cuda._utils._get_device_index(device) + if index < 0 or index >= torch.cuda.device_count(): + raise RuntimeError( + f"cufft_plan_cache: expected 0 <= device index < {torch.cuda.device_count()}, but got " + f"device with index {index}" + ) + if len(self.caches) == 0: + self.caches.extend( + cuFFTPlanCache(index) for index in range(torch.cuda.device_count()) + ) + return self.caches[index] + + def __getattr__(self, name): + return getattr(self[torch.cuda.current_device()], name) + + def __setattr__(self, name, value): + if self.__initialized: + return setattr(self[torch.cuda.current_device()], name, value) + else: + return super().__setattr__(name, value) + + +class cuBLASModule: + def __getattr__(self, name): + if name == "allow_tf32": + return torch._C._get_cublas_allow_tf32() + elif name == "allow_fp16_reduced_precision_reduction": + return torch._C._get_cublas_allow_fp16_reduced_precision_reduction() + elif name == "allow_bf16_reduced_precision_reduction": + return torch._C._get_cublas_allow_bf16_reduced_precision_reduction() + raise AttributeError("Unknown attribute " + name) + + def __setattr__(self, name, value): + if name == "allow_tf32": + return torch._C._set_cublas_allow_tf32(value) + elif name == "allow_fp16_reduced_precision_reduction": + return torch._C._set_cublas_allow_fp16_reduced_precision_reduction(value) + elif name == "allow_bf16_reduced_precision_reduction": + return torch._C._set_cublas_allow_bf16_reduced_precision_reduction(value) + raise AttributeError("Unknown attribute " + name) + + +_LinalgBackends = { + "default": torch._C._LinalgBackend.Default, + "cusolver": torch._C._LinalgBackend.Cusolver, + "magma": torch._C._LinalgBackend.Magma, +} +_LinalgBackends_str = ", ".join(_LinalgBackends.keys()) + + +def preferred_linalg_library( + backend: Union[None, str, torch._C._LinalgBackend] = None +) -> torch._C._LinalgBackend: + r""" + Override the heuristic PyTorch uses to choose between cuSOLVER and MAGMA for CUDA linear algebra operations. + + .. warning:: This flag is experimental and subject to change. + + When PyTorch runs a CUDA linear algebra operation it often uses the cuSOLVER or MAGMA libraries, + and if both are available it decides which to use with a heuristic. + This flag (a :class:`str`) allows overriding those heuristics. + + * If `"cusolver"` is set then cuSOLVER will be used wherever possible. + * If `"magma"` is set then MAGMA will be used wherever possible. + * If `"default"` (the default) is set then heuristics will be used to pick between + cuSOLVER and MAGMA if both are available. + * When no input is given, this function returns the currently preferred library. + * User may use the environment variable TORCH_LINALG_PREFER_CUSOLVER=1 to set the preferred library to cuSOLVER + globally. + This flag only sets the initial value of the preferred library and the preferred library + may still be overridden by this function call later in your script. + + Note: When a library is preferred other libraries may still be used if the preferred library + doesn't implement the operation(s) called. + This flag may achieve better performance if PyTorch's heuristic library selection is incorrect + for your application's inputs. + + Currently supported linalg operators: + + * :func:`torch.linalg.inv` + * :func:`torch.linalg.inv_ex` + * :func:`torch.linalg.cholesky` + * :func:`torch.linalg.cholesky_ex` + * :func:`torch.cholesky_solve` + * :func:`torch.cholesky_inverse` + * :func:`torch.linalg.lu_factor` + * :func:`torch.linalg.lu` + * :func:`torch.linalg.lu_solve` + * :func:`torch.linalg.qr` + * :func:`torch.linalg.eigh` + * :func:`torch.linalg.eighvals` + * :func:`torch.linalg.svd` + * :func:`torch.linalg.svdvals` + """ + if backend is None: + pass + elif isinstance(backend, str): + if backend not in _LinalgBackends: + raise RuntimeError( + "Unknown input value. " f"Choose from: {_LinalgBackends_str}." + ) + torch._C._set_linalg_preferred_backend(_LinalgBackends[backend]) + elif isinstance(backend, torch._C._LinalgBackend): + torch._C._set_linalg_preferred_backend(backend) + else: + raise RuntimeError("Unknown input value type.") + + return torch._C._get_linalg_preferred_backend() + + +from torch._C import _SDPAParams as SDPAParams, _SDPBackend as SDPBackend + +# Set the __module__ attribute +SDPBackend.__module__ = "torch.backends.cuda" +SDPAParams.__module__ = "torch.backends.cuda" + + +def flash_sdp_enabled(): + r""" + .. warning:: This flag is beta and subject to change. + + Returns whether flash scaled dot product attention is enabled or not. + """ + return torch._C._get_flash_sdp_enabled() + + +def enable_flash_sdp(enabled: bool): + r""" + .. warning:: This flag is beta and subject to change. + + Enables or disables flash scaled dot product attention. + """ + torch._C._set_sdp_use_flash(enabled) + + +def mem_efficient_sdp_enabled(): + r""" + .. warning:: This flag is beta and subject to change. + + Returns whether memory efficient scaled dot product attention is enabled or not. + """ + return torch._C._get_mem_efficient_sdp_enabled() + + +def enable_mem_efficient_sdp(enabled: bool): + r""" + .. warning:: This flag is beta and subject to change. + + Enables or disables memory efficient scaled dot product attention. + """ + torch._C._set_sdp_use_mem_efficient(enabled) + + +def math_sdp_enabled(): + r""" + .. warning:: This flag is beta and subject to change. + + Returns whether math scaled dot product attention is enabled or not. + """ + return torch._C._get_math_sdp_enabled() + + +def enable_math_sdp(enabled: bool): + r""" + .. warning:: This flag is beta and subject to change. + + Enables or disables math scaled dot product attention. + """ + torch._C._set_sdp_use_math(enabled) + + +def can_use_flash_attention(params: SDPAParams, debug: bool = False) -> bool: + r"""Check if FlashAttention can be utilized in scaled_dot_product_attention. + + Args: + params: An instance of SDPAParams containing the tensors for query, + key, value, an optional attention mask, dropout rate, and + a flag indicating if the attention is causal. + debug: Whether to logging.warn debug information as to why FlashAttention could not be run. + Defaults to False. + + Returns: + True if FlashAttention can be used with the given parameters; otherwise, False. + + Note: + This function is dependent on a CUDA-enabled build of PyTorch. It will return False + in non-CUDA environments. + """ + return torch._C._can_use_flash_attention(params, debug) + + +def can_use_efficient_attention(params: SDPAParams, debug: bool = False) -> bool: + r"""Check if efficient_attention can be utilized in scaled_dot_product_attention. + + Args: + params: An instance of SDPAParams containing the tensors for query, + key, value, an optional attention mask, dropout rate, and + a flag indicating if the attention is causal. + debug: Whether to logging.warn with information as to why efficient_attention could not be run. + Defaults to False. + + Returns: + True if efficient_attention can be used with the given parameters; otherwise, False. + + Note: + This function is dependent on a CUDA-enabled build of PyTorch. It will return False + in non-CUDA environments. + """ + return torch._C._can_use_mem_efficient_attention(params, debug) + + +@contextlib.contextmanager +def sdp_kernel( + enable_flash: bool = True, + enable_math: bool = True, + enable_mem_efficient: bool = True, +): + r""" + .. warning:: This flag is beta and subject to change. + + This context manager can be used to temporarily enable or disable any of the three backends for scaled dot product attention. + Upon exiting the context manager, the previous state of the flags will be restored. + """ + previous_flash: bool = flash_sdp_enabled() + previous_mem_efficient: bool = mem_efficient_sdp_enabled() + previous_math: bool = math_sdp_enabled() + try: + enable_flash_sdp(enable_flash) + enable_mem_efficient_sdp(enable_mem_efficient) + enable_math_sdp(enable_math) + yield {} + finally: + enable_flash_sdp(previous_flash) + enable_mem_efficient_sdp(previous_mem_efficient) + enable_math_sdp(previous_math) + + +cufft_plan_cache = cuFFTPlanCacheManager() +matmul = cuBLASModule() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/mkl/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/backends/mkl/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..261ee764485b653a9ecd50c8e8fe3943f0c23449 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/backends/mkl/__init__.py @@ -0,0 +1,56 @@ +import torch + + +def is_available(): + r"""Return whether PyTorch is built with MKL support.""" + return torch._C.has_mkl + + +VERBOSE_OFF = 0 +VERBOSE_ON = 1 + + +class verbose: + """ + On-demand oneMKL verbosing functionality. + + To make it easier to debug performance issues, oneMKL can dump verbose + messages containing execution information like duration while executing + the kernel. The verbosing functionality can be invoked via an environment + variable named `MKL_VERBOSE`. However, this methodology dumps messages in + all steps. Those are a large amount of verbose messages. Moreover, for + investigating the performance issues, generally taking verbose messages + for one single iteration is enough. This on-demand verbosing functionality + makes it possible to control scope for verbose message dumping. In the + following example, verbose messages will be dumped out for the second + inference only. + + .. highlight:: python + .. code-block:: python + + import torch + model(data) + with torch.backends.mkl.verbose(torch.backends.mkl.VERBOSE_ON): + model(data) + + Args: + level: Verbose level + - ``VERBOSE_OFF``: Disable verbosing + - ``VERBOSE_ON``: Enable verbosing + """ + + def __init__(self, enable): + self.enable = enable + + def __enter__(self): + if self.enable == VERBOSE_OFF: + return + st = torch._C._verbose.mkl_set_verbose(self.enable) + assert ( + st + ), "Failed to set MKL into verbose mode. Please consider to disable this verbose scope." + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + torch._C._verbose.mkl_set_verbose(VERBOSE_OFF) + return False diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/mkl/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/backends/mkl/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a600dd067c882c920781d24db3d427f80a705b15 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/backends/mkl/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/quantized/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/backends/quantized/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..85009753e0aee1a9c2b7d0743d44171f70affbd3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/backends/quantized/__init__.py @@ -0,0 +1,65 @@ +import sys +import types +from typing import List + +import torch + + +# This function should correspond to the enums present in c10/core/QEngine.h +def _get_qengine_id(qengine: str) -> int: + if qengine == "none" or qengine == "" or qengine is None: + ret = 0 + elif qengine == "fbgemm": + ret = 1 + elif qengine == "qnnpack": + ret = 2 + elif qengine == "onednn": + ret = 3 + elif qengine == "x86": + ret = 4 + else: + ret = -1 + raise RuntimeError(f"{qengine} is not a valid value for quantized engine") + return ret + + +# This function should correspond to the enums present in c10/core/QEngine.h +def _get_qengine_str(qengine: int) -> str: + all_engines = {0: "none", 1: "fbgemm", 2: "qnnpack", 3: "onednn", 4: "x86"} + return all_engines.get(qengine, "*undefined") + + +class _QEngineProp: + def __get__(self, obj, objtype) -> str: + return _get_qengine_str(torch._C._get_qengine()) + + def __set__(self, obj, val: str) -> None: + torch._C._set_qengine(_get_qengine_id(val)) + + +class _SupportedQEnginesProp: + def __get__(self, obj, objtype) -> List[str]: + qengines = torch._C._supported_qengines() + return [_get_qengine_str(qe) for qe in qengines] + + def __set__(self, obj, val) -> None: + raise RuntimeError("Assignment not supported") + + +class QuantizedEngine(types.ModuleType): + def __init__(self, m, name): + super().__init__(name) + self.m = m + + def __getattr__(self, attr): + return self.m.__getattribute__(attr) + + engine = _QEngineProp() + supported_engines = _SupportedQEnginesProp() + + +# This is the sys.modules replacement trick, see +# https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273 +sys.modules[__name__] = QuantizedEngine(sys.modules[__name__], __name__) +engine: str +supported_engines: List[str] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/quantized/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/backends/quantized/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6e9145f1037e71a29914415b434d1ab418ace0c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/backends/quantized/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/xeon/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/backends/xeon/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/xeon/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/backends/xeon/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebd709c0e9370a12cb2832fb8b811003d0d48eb4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/backends/xeon/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/xeon/__pycache__/run_cpu.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/backends/xeon/__pycache__/run_cpu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..985badcb321fb5d2d40485935f75e03fbeab1aad Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/backends/xeon/__pycache__/run_cpu.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/xeon/run_cpu.py b/env-llmeval/lib/python3.10/site-packages/torch/backends/xeon/run_cpu.py new file mode 100644 index 0000000000000000000000000000000000000000..61e8397b6181e695163b98b0acead1d4f8e46869 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/backends/xeon/run_cpu.py @@ -0,0 +1,926 @@ +""" +This is a script for launching PyTorch inference on Intel(R) Xeon(R) Scalable Processors with optimal configurations. + +Single instance inference, multi-instance inference are enabled. + +Note: term "instance" here doesn't refer to a cloud instance. This script is executed as a single process. It invokes +multiple "instances" which are formed from multiple threads for each. "instance" is kind of group of threads in this +context. + +Illustrated as below: + +:: + + +-----------------------------+----------------------+-------+ + | process | thread | core | + +=============================+======================+=======+ + | torch.backends.xeon.run_cpu | instance 0: thread 0 | 0 | + | | thread 1 | 1 | + | +----------------------+-------+ + | | instance 1: thread 0 | 2 | + | | thread 1 | 3 | + | +----------------------+-------+ + | | ... | ... | + | +----------------------+-------+ + | | instance N: thread 0 | M | + | | thread 1 | M+1 | + +-----------------------------+----------------------+-------+ + +To get the peak performance on Intel(R) Xeon(R) Scalable Processors, the script optimizes the configuration of thread and memory +management. For thread management, the script configures thread affinity and the preload of Intel OMP library. +For memory management, it configures NUMA binding and preload optimized memory allocation library (e.g. tcmalloc, jemalloc). + +Environment variables that will be set by this script: + ++------------------+-------------------------------------------------------------------------------------------------+ +| Environ Variable | Value | ++==================+=================================================================================================+ +| LD_PRELOAD | Depending on knobs you set, /libiomp5.so, /libjemalloc.so, /libtcmalloc.so might | +| | be appended to LD_PRELOAD. | ++------------------+-------------------------------------------------------------------------------------------------+ +| KMP_AFFINITY | If libiomp5.so is preloaded, KMP_AFFINITY could be set to "granularity=fine,compact,1,0". | ++------------------+-------------------------------------------------------------------------------------------------+ +| KMP_BLOCKTIME | If libiomp5.so is preloaded, KMP_BLOCKTIME is set to "1". | ++------------------+-------------------------------------------------------------------------------------------------+ +| OMP_NUM_THREADS | value of ncores_per_instance | ++------------------+-------------------------------------------------------------------------------------------------+ +| MALLOC_CONF | If libjemalloc.so is preloaded, MALLOC_CONF will be set to | +| | "oversize_threshold:1,background_thread:true,metadata_thp:auto". | ++------------------+-------------------------------------------------------------------------------------------------+ + +*Note*: This script respects environment variables set preliminarily. I.e. If you set the environment variables +mentioned above before running the script, the script will not overwrite the values in the script. + +How to use this module: +~~~~~~~~~~~~~~~~~~~~~~~ + +Single instance inference +------------------------- + +1. Run single-instance inference on a single node with all CPU nodes. + +:: + + python -m torch.backends.xeon.run_cpu --throughput-mode script.py args + +2. Run single-instance inference on a single CPU node. + +:: + + python -m torch.backends.xeon.run_cpu --node-id 1 script.py args + +Multi-instance inference +------------------------ + +1. Multi-instance + By default this tool runs one process per node. If you want to set the instance numbers and core per instance, + --ninstances and --ncores-per-instance should be set. + +:: + + python -m torch.backends.xeon.run_cpu -- python_script args + + eg: on an Intel(R) Xeon(R) Scalable Processor with 14 instance, 4 cores per instance + +:: + + python -m torch.backends.xeon.run_cpu --ninstances 14 --ncores-per-instance 4 python_script args + +2. Run single-instance inference among multiple instances. + By default, runs all ninstances. If you want to independently run a single instance among ninstances, specify rank. + + eg: run 0th instance on an Intel(R) Xeon(R) Scalable Processor with 2 instance (i.e., numactl -C 0-27) + +:: + + python -m torch.backends.xeon.run_cpu --ninstances 2 --rank 0 python_script args + + eg: run 1st instance on an Intel(R) Xeon(R) Scalable Processor with 2 instance (i.e., numactl -C 28-55) + +:: + + python -m torch.backends.xeon.run_cpu --ninstances 2 --rank 1 python_script args + + eg: run 0th instance on an Intel(R) Xeon(R) Scalable Processor with 2 instance, 2 cores per instance, + first four cores (i.e., numactl -C 0-1) + +:: + + python -m torch.backends.xeon.run_cpu --core-list "0, 1, 2, 3" --ninstances 2 --ncores-per-instance 2 + --rank 0 python_script args + +3. To look up what optional arguments this module offers: + +:: + + python -m torch.backends.xeon.run_cpu --help + +Memory allocator +---------------- + +"--enable-tcmalloc" and "--enable-jemalloc" can be used to enable different memory allcator. + +""" + +import glob +import logging +import os +import platform +import re +import subprocess +import sys +from argparse import ArgumentParser, RawTextHelpFormatter, REMAINDER +from os.path import expanduser +from typing import Dict, List + +from torch.distributed.elastic.multiprocessing import start_processes, Std + +format_str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" +logging.basicConfig(level=logging.INFO, format=format_str) +logger = logging.getLogger(__name__) + + +class _CPUinfo: + """Get CPU information, such as cores list and NUMA information.""" + + def __init__(self, test_input=""): + self.cpuinfo = [] + if platform.system() in ["Windows", "Darwin"]: + raise RuntimeError(f"{platform.system()} is not supported!!!") + elif platform.system() == "Linux": + # Sample output of: `lscpu --parse=CPU,Core,Socket,Node` + # + # # The following is the parsable format, which can be fed to other + # # programs. Each different item in every column has an unique ID + # # starting from zero. + # # CPU,Core,Socket,Node + # 0,0,0,0 + # 1,1,0,0 + # ... + if test_input == "": + lscpu_cmd = ["lscpu", "--parse=CPU,Core,Socket,Node"] + lscpu_info = subprocess.check_output( + lscpu_cmd, universal_newlines=True + ).split("\n") + else: + lscpu_info = test_input.split("\n") + + # Get information about cpu, core, socket and node + for line in lscpu_info: + pattern = r"^([\d]+,[\d]+,[\d]+,[\d]?)" + regex_out = re.search(pattern, line) + if regex_out: + self.cpuinfo.append(regex_out.group(1).strip().split(",")) + + # physical cores := core column in lscpu output + # logical cores := cPU column in lscpu output + self.node_nums = int(max([line[3] for line in self.cpuinfo])) + 1 + self.node_physical_cores: List[List[int]] = [] # node_id is index + self.node_logical_cores: List[List[int]] = [] # node_id is index + self.physical_core_node_map = {} # physical core to numa node id + self.logical_core_node_map = {} # logical core to numa node id + + for node_id in range(self.node_nums): + cur_node_physical_core = [] + cur_node_logical_core = [] + for cpuinfo in self.cpuinfo: + nid = cpuinfo[3] if cpuinfo[3] != "" else "0" + if node_id == int(nid): + if int(cpuinfo[1]) not in cur_node_physical_core: + cur_node_physical_core.append(int(cpuinfo[1])) + self.physical_core_node_map[int(cpuinfo[1])] = int(node_id) + cur_node_logical_core.append(int(cpuinfo[0])) + self.logical_core_node_map[int(cpuinfo[0])] = int(node_id) + self.node_physical_cores.append(cur_node_physical_core) + self.node_logical_cores.append(cur_node_logical_core) + + def _physical_core_nums(self): + return len(self.node_physical_cores) * len(self.node_physical_cores[0]) + + def _logical_core_nums(self): + return len(self.node_logical_cores) * len(self.node_logical_cores[0]) + + def get_node_physical_cores(self, node_id): + if node_id < 0 or node_id > self.node_nums - 1: + raise ValueError( + f"Invalid node id: {node_id}. Valid node ids: {list(range(len(self.node_physical_cores)))}" + ) + return self.node_physical_cores[node_id] + + def get_node_logical_cores(self, node_id): + if node_id < 0 or node_id > self.node_nums - 1: + raise ValueError( + f"Invalid node id: {node_id}. Valid node ids: {list(range(len(self.node_physical_cores)))}" + ) + return self.node_logical_cores[node_id] + + def get_all_physical_cores(self): + all_cores = [] + for cores in self.node_physical_cores: + all_cores.extend(cores) + return all_cores + + def get_all_logical_cores(self): + all_cores = [] + for cores in self.node_logical_cores: + all_cores.extend(cores) + return all_cores + + def numa_aware_check(self, core_list): + """ + Check whether all cores in core_list are in the same NUMA node. + + Cross NUMA will reduce performance. + We strongly advice to not use cores on different nodes. + """ + cores_numa_map = self.logical_core_node_map + numa_ids = [] + for core in core_list: + numa_id = cores_numa_map[core] + if numa_id not in numa_ids: + numa_ids.append(numa_id) + if len(numa_ids) > 1: + logger.warning( + "Numa Aware: cores:%s on different NUMA nodes:%s. To avoid \ +this behavior, please use --ncores-per-instance knob to make sure number of cores is divisible by --ncores-per-\ +instance. Alternatively, please use --skip-cross-node-cores knob.", + str(core_list), + str(numa_ids), + ) + if len(numa_ids) == 0: + raise RuntimeError( + "invalid number of NUMA nodes; please make sure numa_ids >= 1" + ) + return numa_ids + + +class _Launcher: + r"""Class for launcher.""" + + msg_lib_notfound = f"Unable to find the {{0}} library file lib{{1}}.so in $CONDA_PREFIX/lib or $VIRTUAL_ENV/lib \ +or /.local/lib/ or /usr/local/lib/ or /usr/local/lib64/ or /usr/lib or /usr/lib64 or \ +{expanduser('~')}/.local/lib/ so the LD_PRELOAD environment variable will not be set." + + def __init__(self): + self.cpuinfo = _CPUinfo() + + def add_lib_preload(self, lib_type): + """Enable TCMalloc/JeMalloc/intel OpenMP.""" + library_paths = [] + if "CONDA_PREFIX" in os.environ: + library_paths.append(f"{os.environ['CONDA_PREFIX']}/lib") + if "VIRTUAL_ENV" in os.environ: + library_paths.append(f"{os.environ['VIRTUAL_ENV']}/lib") + + library_paths += [ + f"{expanduser('~')}/.local/lib", + "/usr/local/lib", + "/usr/local/lib64", + "/usr/lib", + "/usr/lib64", + ] + + lib_find = False + lib_set = False + for item in os.getenv("LD_PRELOAD", "").split(":"): + if item.endswith(f"lib{lib_type}.so"): + lib_set = True + break + if not lib_set: + for lib_path in library_paths: + library_file = os.path.join(lib_path, f"lib{lib_type}.so") + matches = glob.glob(library_file) + if len(matches) > 0: + ld_preloads = [f"{matches[0]}", os.getenv("LD_PRELOAD", "")] + os.environ["LD_PRELOAD"] = os.pathsep.join( + [p.strip(os.pathsep) for p in ld_preloads if p] + ) + lib_find = True + break + return lib_set or lib_find + + def is_numactl_available(self): + numactl_available = False + try: + cmd = ["numactl", "-C", "0", "-m", "0", "hostname"] + r = subprocess.run( + cmd, + env=os.environ, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + check=False, + ) + if r.returncode == 0: + numactl_available = True + except Exception: + pass + return numactl_available + + def set_memory_allocator( + self, enable_tcmalloc=True, enable_jemalloc=False, use_default_allocator=False + ): + """ + Enable TCMalloc/JeMalloc with LD_PRELOAD and set configuration for JeMalloc. + + By default, PTMalloc will be used for PyTorch, but TCMalloc and JeMalloc can get better + memory reuse and reduce page fault to improve performance. + """ + if enable_tcmalloc and enable_jemalloc: + raise RuntimeError( + "Unable to enable TCMalloc and JEMalloc at the same time." + ) + + if enable_tcmalloc: + find_tc = self.add_lib_preload(lib_type="tcmalloc") + if not find_tc: + msg = f'{self.msg_lib_notfound} you can use "conda install -c conda-forge gperftools" to install {{0}}' + logger.warning(msg.format("TCmalloc", "tcmalloc")) # noqa: G001 + else: + logger.info("Use TCMalloc memory allocator") + + elif enable_jemalloc: + find_je = self.add_lib_preload(lib_type="jemalloc") + if not find_je: + msg = f'{self.msg_lib_notfound} you can use "conda install -c conda-forge jemalloc" to install {{0}}' + logger.warning(msg.format("Jemalloc", "jemalloc")) # noqa: G001 + else: + logger.info("Use JeMalloc memory allocator") + self.set_env( + "MALLOC_CONF", + "oversize_threshold:1,background_thread:true,metadata_thp:auto", + ) + + elif use_default_allocator: + pass + + else: + find_tc = self.add_lib_preload(lib_type="tcmalloc") + if find_tc: + logger.info("Use TCMalloc memory allocator") + return + find_je = self.add_lib_preload(lib_type="jemalloc") + if find_je: + logger.info("Use JeMalloc memory allocator") + return + logger.warning( + """Neither TCMalloc nor JeMalloc is found in $CONDA_PREFIX/lib or $VIRTUAL_ENV/lib + or /.local/lib/ or /usr/local/lib/ or /usr/local/lib64/ or /usr/lib or /usr/lib64 or + %s/.local/lib/ so the LD_PRELOAD environment variable will not be set. + This may drop the performance""", + expanduser("~"), + ) + + def log_env_var(self, env_var_name=""): + if env_var_name in os.environ: + logger.info("%s=%s", env_var_name, os.environ[env_var_name]) + + def set_env(self, env_name, env_value): + if not env_value: + logger.warning("%s is None", env_name) + if env_name not in os.environ: + os.environ[env_name] = env_value + elif os.environ[env_name] != env_value: + logger.warning( + "Overriding value with the one set in environment variable: %s. \ +Value applied: %s. Value ignored: %s", + env_name, + os.environ[env_name], + env_value, + ) + self.log_env_var(env_name) + + # set_kmp_affinity is used to control whether to set KMP_AFFINITY or not. + # In scenario that use all cores on all nodes, including logical cores, setting KMP_AFFINITY disables logical cores. + # In this case, KMP_AFFINITY should not be set. + def set_multi_thread_and_allocator( + self, + ncores_per_instance, + disable_iomp=False, + set_kmp_affinity=True, + enable_tcmalloc=True, + enable_jemalloc=False, + use_default_allocator=False, + ): + """ + Set multi-thread configuration and enable Intel openMP and TCMalloc/JeMalloc. + + By default, GNU openMP and PTMalloc are used in PyTorch. but Intel openMP and TCMalloc/JeMalloc are better alternatives + to get performance benefit. + """ + self.set_memory_allocator( + enable_tcmalloc, enable_jemalloc, use_default_allocator + ) + self.set_env("OMP_NUM_THREADS", str(ncores_per_instance)) + if not disable_iomp: + find_iomp = self.add_lib_preload(lib_type="iomp5") + if not find_iomp: + msg = f'{self.msg_lib_notfound} you can use "conda install mkl" to install {{0}}' + logger.warning(msg.format("iomp", "iomp5")) # noqa: G001 + else: + logger.info("Using Intel OpenMP") + if set_kmp_affinity: + self.set_env("KMP_AFFINITY", "granularity=fine,compact,1,0") + self.set_env("KMP_BLOCKTIME", "1") + self.log_env_var("LD_PRELOAD") + + r""" + Launcher for single instance and multi-instance + """ + + def launch(self, args): + cores = [] + set_kmp_affinity = True + enable_taskset = False + if args.core_list: # user specify what cores will be used by params + cores = [int(x) for x in args.core_list.split(",")] + if args.ncores_per_instance == -1: + raise RuntimeError( + 'please specify the "--ncores-per-instance" if you have pass the --core-list params' + ) + elif ( + args.ninstances > 1 + and args.ncores_per_instance * args.ninstances < len(cores) + ): + logger.warning( + "only first %s cores will be used, \ +but you specify %s cores in core_list", + args.ncores_per_instance * args.ninstances, + len(cores), + ) + else: + args.ninstances = len(cores) // args.ncores_per_instance + + else: + if args.use_logical_core: + if args.node_id != -1: + cores = self.cpuinfo.get_node_logical_cores(args.node_id) + else: + cores = self.cpuinfo.get_all_logical_cores() + # When using all cores on all nodes, including logical cores, + # setting KMP_AFFINITY disables logical cores. Thus, KMP_AFFINITY should not be set. + set_kmp_affinity = False + else: + if args.node_id != -1: + cores = self.cpuinfo.get_node_physical_cores(args.node_id) + else: + cores = self.cpuinfo.get_all_physical_cores() + if ( + not args.multi_instance + and args.ninstances == -1 + and args.ncores_per_instance == -1 + ): + args.ninstances = 1 + args.ncores_per_instance = len(cores) + elif ( + args.multi_instance + and args.ninstances == -1 + and args.ncores_per_instance == -1 + ): + args.throughput_mode = True + elif args.ncores_per_instance == -1 and args.ninstances != -1: + if args.ninstances > len(cores): + raise RuntimeError( + f"there are {len(cores)} total cores but you specify {args.ninstances} ninstances; \ +please make sure ninstances <= total_cores)" + ) + else: + args.ncores_per_instance = len(cores) // args.ninstances + elif args.ncores_per_instance != -1 and args.ninstances == -1: + if not args.skip_cross_node_cores: + args.ninstances = len(cores) // args.ncores_per_instance + else: + ncore_per_node = len(self.cpuinfo.node_physical_cores[0]) + num_leftover_cores = ncore_per_node % args.ncores_per_instance + if args.ncores_per_instance > ncore_per_node: + # too many ncores_per_instance to skip cross-node cores + logger.warning( + "there are %s core(s) per socket, but you specify %s ncores_per_instance and \ +skip_cross_node_cores. Please make sure --ncores-per-instance < core(s) per \ +socket", + ncore_per_node, + args.ncores_per_instance, + ) + sys.exit(-1) + elif num_leftover_cores == 0: + # aren't any cross-node cores + logger.info( + "--skip-cross-node-cores is set, but there are no cross-node cores." + ) + args.ninstances = len(cores) // args.ncores_per_instance + else: + # skip cross-node cores + if args.ninstances != -1: + logger.warning( + "--skip-cross-node-cores is exclusive to --ninstances. --ninstances \ +won't take effect even if it is set explicitly." + ) + + i = 1 + leftover_cores = set() + while ncore_per_node * i <= len(cores): + leftover_cores.update( + cores[ + ncore_per_node * i + - num_leftover_cores : ncore_per_node * i + ] + ) + i += 1 + cores = list(set(cores) - leftover_cores) + assert len(cores) % args.ncores_per_instance == 0 + args.ninstances = len(cores) // args.ncores_per_instance + else: + if args.ninstances * args.ncores_per_instance > len(cores): + raise RuntimeError( + "Please make sure ninstances * ncores_per_instance <= total_cores" + ) + if args.latency_mode: + logger.warning( + "--latency-mode is exclusive to --ninstances, --ncores-per-instance, --node-id and \ +--use-logical-core. They won't take effect even they are set explicitly." + ) + args.ncores_per_instance = 4 + cores = self.cpuinfo.get_all_physical_cores() + args.ninstances = len(cores) // args.ncores_per_instance + + if args.throughput_mode: + logger.warning( + "--throughput-mode is exclusive to --ninstances, --ncores-per-instance, --node-id and \ +--use-logical-core. They won't take effect even they are set explicitly." + ) + args.ninstances = self.cpuinfo.node_nums + cores = self.cpuinfo.get_all_physical_cores() + args.ncores_per_instance = len(cores) // args.ninstances + + if args.ninstances > 1 and args.rank != -1: + logger.info( + "assigning %s cores for instance %s", + args.ncores_per_instance, + args.rank, + ) + + if not args.disable_numactl: + numactl_available = self.is_numactl_available() + if not numactl_available: + if not args.disable_taskset: + logger.warning( + "Core binding with numactl is not available. Disabling numactl and using taskset instead. \ + This may affect performance in multi-socket system; please use numactl if memory binding is needed." + ) + args.disable_numactl = True + enable_taskset = True + else: + logger.warning( + "Core binding with numactl is not available, and --disable_taskset is set. \ + Please unset --disable_taskset to use taskset instead of numactl." + ) + sys.exit(-1) + + if not args.disable_taskset: + enable_taskset = True + + self.set_multi_thread_and_allocator( + args.ncores_per_instance, + args.disable_iomp, + set_kmp_affinity, + args.enable_tcmalloc, + args.enable_jemalloc, + args.use_default_allocator, + ) + entrypoint = "" + launch_args = {} + launch_envs: Dict[int, Dict] = {} + launch_tee = {} + for i in range(args.ninstances): + cmd = [] + cur_process_cores = "" + if not args.disable_numactl or enable_taskset: + if not args.disable_numactl: + cmd = ["numactl"] + elif enable_taskset: + cmd = ["taskset"] + cores = sorted(cores) + if ( + args.rank == -1 + ): # sequentially assign ncores_per_instance to ninstances + core_list = cores[ + i + * args.ncores_per_instance : (i + 1) + * args.ncores_per_instance + ] + else: # assign ncores_per_instance from rank + core_list = cores[ + args.rank + * args.ncores_per_instance : (args.rank + 1) + * args.ncores_per_instance + ] + + core_ranges: List[Dict] = [] + for core in core_list: + if len(core_ranges) == 0: + range_elem = {"start": core, "end": core} + core_ranges.append(range_elem) + else: + if core - core_ranges[-1]["end"] == 1: + core_ranges[-1]["end"] = core + else: + range_elem = {"start": core, "end": core} + core_ranges.append(range_elem) + for r in core_ranges: + cur_process_cores = f"{cur_process_cores}{r['start']}-{r['end']}," + cur_process_cores = cur_process_cores[:-1] + if not args.disable_numactl: + numa_params = f"-C {cur_process_cores} " + numa_ids = ",".join( + [ + str(numa_id) + for numa_id in self.cpuinfo.numa_aware_check(core_list) + ] + ) + numa_params += f"-m {numa_ids}" + cmd.extend(numa_params.split()) + elif enable_taskset: + taskset_params = f"-c {cur_process_cores} " + cmd.extend(taskset_params.split()) + with_python = not args.no_python + if with_python: + cmd.append(sys.executable) + cmd.append("-u") + if args.module: + cmd.append("-m") + cmd.append(args.program) + cmd.extend(args.program_args) + cmd_s = " ".join(cmd) + logger.info(cmd_s) + if entrypoint == "": + entrypoint = cmd[0] + del cmd[0] + launch_args[i] = tuple(cmd) + launch_envs[i] = {} + launch_tee[i] = Std.ALL + + if args.rank != -1: # launches single instance, rank, only + break + + ctx = start_processes( + name=args.log_file_prefix, + entrypoint=entrypoint, + args=launch_args, + envs=launch_envs, + log_dir=args.log_path, + tee=launch_tee, + ) + ctx.wait() + + +def _add_memory_allocator_params(parser): + group = parser.add_argument_group("Memory Allocator Parameters") + # allocator control + group.add_argument( + "--enable-tcmalloc", + "--enable_tcmalloc", + action="store_true", + default=False, + help="Enable tcmalloc allocator", + ) + group.add_argument( + "--enable-jemalloc", + "--enable_jemalloc", + action="store_true", + default=False, + help="Enable jemalloc allocator", + ) + group.add_argument( + "--use-default-allocator", + "--use_default_allocator", + action="store_true", + default=False, + help="Use default memory allocator", + ) + + +def _add_multi_instance_params(parser): + group = parser.add_argument_group("Multi-instance Parameters") + # multi-instance control + group.add_argument( + "--ncores-per-instance", + "--ncores_per_instance", + metavar="\b", + default=-1, + type=int, + help="Cores per instance", + ) + group.add_argument( + "--ninstances", + metavar="\b", + default=-1, + type=int, + help="For multi-instance, you should give the cores number you used for per instance.", + ) + group.add_argument( + "--skip-cross-node-cores", + "--skip_cross_node_cores", + action="store_true", + default=False, + help="If specified --ncores-per-instance, skips cross-node cores.", + ) + group.add_argument( + "--rank", + metavar="\b", + default="-1", + type=int, + help="Specify instance index to assign ncores_per_instance for rank; \ +otherwise ncores_per_instance will be assigned sequentially to ninstances. Please refer to \ +https://github.com/intel/intel-extension-for-pytorch/blob/master/docs/tutorials/performance_tuning/launch_script.md", + ) + group.add_argument( + "--latency-mode", + "--latency_mode", + action="store_true", + default=False, + help="By default 4 core per instance and use all physical cores", + ) + group.add_argument( + "--throughput-mode", + "--throughput_mode", + action="store_true", + default=False, + help="By default one instance per node and use all physical cores", + ) + group.add_argument( + "--node-id", + "--node_id", + metavar="\b", + default=-1, + type=int, + help="node id for multi-instance, by default all nodes will be used", + ) + group.add_argument( + "--use-logical-core", + "--use_logical_core", + action="store_true", + default=False, + help="Whether only use physical cores", + ) + group.add_argument( + "--disable-numactl", + "--disable_numactl", + action="store_true", + default=False, + help="Disable numactl", + ) + group.add_argument( + "--disable-taskset", + "--disable_taskset", + action="store_true", + default=False, + help="Disable taskset", + ) + group.add_argument( + "--core-list", + "--core_list", + metavar="\b", + default=None, + type=str, + help='Specify the core list as "core_id, core_id, ....", otherwise, all the cores will be used.', + ) + group.add_argument( + "--log-path", + "--log_path", + metavar="\b", + default="", + type=str, + help="The log file directory. Default path is " + ", which means disable logging to files.", + ) + group.add_argument( + "--log-file-prefix", + "--log_file_prefix", + metavar="\b", + default="run", + type=str, + help="log file prefix", + ) + + +def _add_kmp_iomp_params(parser): + group = parser.add_argument_group("IOMP Parameters") + group.add_argument( + "--disable-iomp", + "--disable_iomp", + action="store_true", + default=False, + help="By default, we use Intel OpenMP and libiomp5.so will be add to LD_PRELOAD", + ) + + +def create_args(parser=None): + """ + Parse the command line options. + + @retval ArgumentParser + """ + parser.add_argument( + "--multi-instance", + "--multi_instance", + action="store_true", + default=False, + help="Enable multi-instance, by default one instance per node", + ) + + parser.add_argument( + "-m", + "--module", + default=False, + action="store_true", + help="Changes each process to interpret the launch script " + "as a python module, executing with the same behavior as" + '"python -m".', + ) + + parser.add_argument( + "--no-python", + "--no_python", + default=False, + action="store_true", + help='Do not prepend the --program script with "python" - just exec ' + "it directly. Useful when the script is not a Python script.", + ) + + _add_memory_allocator_params(parser) + _add_kmp_iomp_params(parser) + + _add_multi_instance_params(parser) + # positional + parser.add_argument( + "program", + type=str, + help="The full path to the program/script to be launched. " + "followed by all the arguments for the script", + ) + + # rest from the training program + parser.add_argument("program_args", nargs=REMAINDER) + + +def main(args): + env_before = set(os.environ.keys()) + if platform.system() in ["Windows", "Darwin"]: + raise RuntimeError(f"{platform.system()} is not supported!!!") + + if args.log_path: + os.makedirs(args.log_path, exist_ok=True) + else: + args.log_path = os.devnull + + if args.latency_mode and args.throughput_mode: + raise RuntimeError( + "Either args.latency_mode or args.throughput_mode should be set" + ) + + if not args.no_python and not args.program.endswith(".py"): + raise RuntimeError( + 'For non Python script, you should use "--no-python" parameter.' + ) + + # Verify LD_PRELOAD + if "LD_PRELOAD" in os.environ: + lst_valid = [] + tmp_ldpreload = os.environ["LD_PRELOAD"] + for item in tmp_ldpreload.split(":"): + matches = glob.glob(item) + if len(matches) > 0: + lst_valid.append(item) + else: + logger.warning("%s doesn't exist. Removing it from LD_PRELOAD.", item) + if len(lst_valid) > 0: + os.environ["LD_PRELOAD"] = ":".join(lst_valid) + else: + os.environ["LD_PRELOAD"] = "" + + launcher = _Launcher() + launcher.launch(args) + for x in sorted(set(os.environ.keys()) - env_before): + logger.debug("%s=%s", x, os.environ[x]) + + +if __name__ == "__main__": + parser = ArgumentParser( + description="This is a script for launching PyTorch inference on Intel(R) Xeon(R) Scalable " + "Processors with optimal configurations. Single instance inference, " + "multi-instance inference are enable. To get the peak performance on Intel(R) " + "Xeon(R) Scalable Processors, the script optimizes the configuration " + "of thread and memory management. For thread management, the script configures thread " + "affinity and the preload of Intel OMP library. For memory management, it configures " + "NUMA binding and preload optimized memory allocation library (e.g. tcmalloc, jemalloc) " + "\n################################# Basic usage ############################# \n" + "\n 1. single instance\n" + "\n >>> python -m torch.backends.xeon.run_cpu python_script args \n" + "\n2. multi-instance \n" + "\n >>> python -m torch.backends.xeon.run_cpu --ninstances xxx " + "--ncores-per-instance xx python_script args\n" + "\n############################################################################# \n", + formatter_class=RawTextHelpFormatter, + ) + create_args(parser) + args = parser.parse_args() + main(args) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fft/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/fft/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3bc5191c7b57de89817e5401d0db24aac1c6df5e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fft/__init__.py @@ -0,0 +1,1360 @@ +import sys + +import torch +from torch._C import _add_docstr, _fft # type: ignore[attr-defined] +from torch._torch_docs import factory_common_args, common_args + +__all__ = ['fft', 'ifft', 'fft2', 'ifft2', 'fftn', 'ifftn', + 'rfft', 'irfft', 'rfft2', 'irfft2', 'rfftn', 'irfftn', + 'hfft', 'ihfft', 'fftfreq', 'rfftfreq', 'fftshift', 'ifftshift', + 'Tensor'] + +Tensor = torch.Tensor + +# Note: This not only adds the doc strings for the spectral ops, but +# connects the torch.fft Python namespace to the torch._C._fft builtins. + +fft = _add_docstr(_fft.fft_fft, r""" +fft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor + +Computes the one dimensional discrete Fourier transform of :attr:`input`. + +Note: + The Fourier domain representation of any real signal satisfies the + Hermitian property: `X[i] = conj(X[-i])`. This function always returns both + the positive and negative frequency terms even though, for real inputs, the + negative frequencies are redundant. :func:`~torch.fft.rfft` returns the + more compact one-sided representation where only the positive frequencies + are returned. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimension. + +Args: + input (Tensor): the input tensor + n (int, optional): Signal length. If given, the input will either be zero-padded + or trimmed to this length before computing the FFT. + dim (int, optional): The dimension along which to take the one dimensional FFT. + norm (str, optional): Normalization mode. For the forward transform + (:func:`~torch.fft.fft`), these correspond to: + + * ``"forward"`` - normalize by ``1/n`` + * ``"backward"`` - no normalization + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal) + + Calling the backward transform (:func:`~torch.fft.ifft`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.ifft` + the exact inverse. + + Default is ``"backward"`` (no normalization). + +Keyword args: + {out} + +Example: + + >>> t = torch.arange(4) + >>> t + tensor([0, 1, 2, 3]) + >>> torch.fft.fft(t) + tensor([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j]) + + >>> t = torch.tensor([0.+1.j, 2.+3.j, 4.+5.j, 6.+7.j]) + >>> torch.fft.fft(t) + tensor([12.+16.j, -8.+0.j, -4.-4.j, 0.-8.j]) +""".format(**common_args)) + +ifft = _add_docstr(_fft.fft_ifft, r""" +ifft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor + +Computes the one dimensional inverse discrete Fourier transform of :attr:`input`. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimension. + +Args: + input (Tensor): the input tensor + n (int, optional): Signal length. If given, the input will either be zero-padded + or trimmed to this length before computing the IFFT. + dim (int, optional): The dimension along which to take the one dimensional IFFT. + norm (str, optional): Normalization mode. For the backward transform + (:func:`~torch.fft.ifft`), these correspond to: + + * ``"forward"`` - no normalization + * ``"backward"`` - normalize by ``1/n`` + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal) + + Calling the forward transform (:func:`~torch.fft.fft`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.ifft` + the exact inverse. + + Default is ``"backward"`` (normalize by ``1/n``). + +Keyword args: + {out} + +Example: + + >>> t = torch.tensor([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j]) + >>> torch.fft.ifft(t) + tensor([0.+0.j, 1.+0.j, 2.+0.j, 3.+0.j]) +""".format(**common_args)) + +fft2 = _add_docstr(_fft.fft_fft2, r""" +fft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor + +Computes the 2 dimensional discrete Fourier transform of :attr:`input`. +Equivalent to :func:`~torch.fft.fftn` but FFTs only the last two dimensions by default. + +Note: + The Fourier domain representation of any real signal satisfies the + Hermitian property: ``X[i, j] = conj(X[-i, -j])``. This + function always returns all positive and negative frequency terms even + though, for real inputs, half of these values are redundant. + :func:`~torch.fft.rfft2` returns the more compact one-sided representation + where only the positive frequencies of the last dimension are returned. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the FFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Default: ``s = [input.size(d) for d in dim]`` + dim (Tuple[int], optional): Dimensions to be transformed. + Default: last two dimensions. + norm (str, optional): Normalization mode. For the forward transform + (:func:`~torch.fft.fft2`), these correspond to: + + * ``"forward"`` - normalize by ``1/n`` + * ``"backward"`` - no normalization + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal) + + Where ``n = prod(s)`` is the logical FFT size. + Calling the backward transform (:func:`~torch.fft.ifft2`) with the same + normalization mode will apply an overall normalization of ``1/n`` + between the two transforms. This is required to make + :func:`~torch.fft.ifft2` the exact inverse. + + Default is ``"backward"`` (no normalization). + +Keyword args: + {out} + +Example: + + >>> x = torch.rand(10, 10, dtype=torch.complex64) + >>> fft2 = torch.fft.fft2(x) + + The discrete Fourier transform is separable, so :func:`~torch.fft.fft2` + here is equivalent to two one-dimensional :func:`~torch.fft.fft` calls: + + >>> two_ffts = torch.fft.fft(torch.fft.fft(x, dim=0), dim=1) + >>> torch.testing.assert_close(fft2, two_ffts, check_stride=False) + +""".format(**common_args)) + +ifft2 = _add_docstr(_fft.fft_ifft2, r""" +ifft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor + +Computes the 2 dimensional inverse discrete Fourier transform of :attr:`input`. +Equivalent to :func:`~torch.fft.ifftn` but IFFTs only the last two dimensions by default. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the IFFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Default: ``s = [input.size(d) for d in dim]`` + dim (Tuple[int], optional): Dimensions to be transformed. + Default: last two dimensions. + norm (str, optional): Normalization mode. For the backward transform + (:func:`~torch.fft.ifft2`), these correspond to: + + * ``"forward"`` - no normalization + * ``"backward"`` - normalize by ``1/n`` + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal) + + Where ``n = prod(s)`` is the logical IFFT size. + Calling the forward transform (:func:`~torch.fft.fft2`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.ifft2` + the exact inverse. + + Default is ``"backward"`` (normalize by ``1/n``). + +Keyword args: + {out} + +Example: + + >>> x = torch.rand(10, 10, dtype=torch.complex64) + >>> ifft2 = torch.fft.ifft2(x) + + The discrete Fourier transform is separable, so :func:`~torch.fft.ifft2` + here is equivalent to two one-dimensional :func:`~torch.fft.ifft` calls: + + >>> two_iffts = torch.fft.ifft(torch.fft.ifft(x, dim=0), dim=1) + >>> torch.testing.assert_close(ifft2, two_iffts, check_stride=False) + +""".format(**common_args)) + +fftn = _add_docstr(_fft.fft_fftn, r""" +fftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor + +Computes the N dimensional discrete Fourier transform of :attr:`input`. + +Note: + The Fourier domain representation of any real signal satisfies the + Hermitian property: ``X[i_1, ..., i_n] = conj(X[-i_1, ..., -i_n])``. This + function always returns all positive and negative frequency terms even + though, for real inputs, half of these values are redundant. + :func:`~torch.fft.rfftn` returns the more compact one-sided representation + where only the positive frequencies of the last dimension are returned. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the FFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Default: ``s = [input.size(d) for d in dim]`` + dim (Tuple[int], optional): Dimensions to be transformed. + Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given. + norm (str, optional): Normalization mode. For the forward transform + (:func:`~torch.fft.fftn`), these correspond to: + + * ``"forward"`` - normalize by ``1/n`` + * ``"backward"`` - no normalization + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal) + + Where ``n = prod(s)`` is the logical FFT size. + Calling the backward transform (:func:`~torch.fft.ifftn`) with the same + normalization mode will apply an overall normalization of ``1/n`` + between the two transforms. This is required to make + :func:`~torch.fft.ifftn` the exact inverse. + + Default is ``"backward"`` (no normalization). + +Keyword args: + {out} + +Example: + + >>> x = torch.rand(10, 10, dtype=torch.complex64) + >>> fftn = torch.fft.fftn(x) + + The discrete Fourier transform is separable, so :func:`~torch.fft.fftn` + here is equivalent to two one-dimensional :func:`~torch.fft.fft` calls: + + >>> two_ffts = torch.fft.fft(torch.fft.fft(x, dim=0), dim=1) + >>> torch.testing.assert_close(fftn, two_ffts, check_stride=False) + +""".format(**common_args)) + +ifftn = _add_docstr(_fft.fft_ifftn, r""" +ifftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor + +Computes the N dimensional inverse discrete Fourier transform of :attr:`input`. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the IFFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Default: ``s = [input.size(d) for d in dim]`` + dim (Tuple[int], optional): Dimensions to be transformed. + Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given. + norm (str, optional): Normalization mode. For the backward transform + (:func:`~torch.fft.ifftn`), these correspond to: + + * ``"forward"`` - no normalization + * ``"backward"`` - normalize by ``1/n`` + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal) + + Where ``n = prod(s)`` is the logical IFFT size. + Calling the forward transform (:func:`~torch.fft.fftn`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.ifftn` + the exact inverse. + + Default is ``"backward"`` (normalize by ``1/n``). + +Keyword args: + {out} + +Example: + + >>> x = torch.rand(10, 10, dtype=torch.complex64) + >>> ifftn = torch.fft.ifftn(x) + + The discrete Fourier transform is separable, so :func:`~torch.fft.ifftn` + here is equivalent to two one-dimensional :func:`~torch.fft.ifft` calls: + + >>> two_iffts = torch.fft.ifft(torch.fft.ifft(x, dim=0), dim=1) + >>> torch.testing.assert_close(ifftn, two_iffts, check_stride=False) + +""".format(**common_args)) + +rfft = _add_docstr(_fft.fft_rfft, r""" +rfft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor + +Computes the one dimensional Fourier transform of real-valued :attr:`input`. + +The FFT of a real signal is Hermitian-symmetric, ``X[i] = conj(X[-i])`` so +the output contains only the positive frequencies below the Nyquist frequency. +To compute the full output, use :func:`~torch.fft.fft` + +Note: + Supports torch.half on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimension. + +Args: + input (Tensor): the real input tensor + n (int, optional): Signal length. If given, the input will either be zero-padded + or trimmed to this length before computing the real FFT. + dim (int, optional): The dimension along which to take the one dimensional real FFT. + norm (str, optional): Normalization mode. For the forward transform + (:func:`~torch.fft.rfft`), these correspond to: + + * ``"forward"`` - normalize by ``1/n`` + * ``"backward"`` - no normalization + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal) + + Calling the backward transform (:func:`~torch.fft.irfft`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.irfft` + the exact inverse. + + Default is ``"backward"`` (no normalization). + +Keyword args: + {out} + +Example: + + >>> t = torch.arange(4) + >>> t + tensor([0, 1, 2, 3]) + >>> torch.fft.rfft(t) + tensor([ 6.+0.j, -2.+2.j, -2.+0.j]) + + Compare against the full output from :func:`~torch.fft.fft`: + + >>> torch.fft.fft(t) + tensor([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j]) + + Notice that the symmetric element ``T[-1] == T[1].conj()`` is omitted. + At the Nyquist frequency ``T[-2] == T[2]`` is it's own symmetric pair, + and therefore must always be real-valued. +""".format(**common_args)) + +irfft = _add_docstr(_fft.fft_irfft, r""" +irfft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor + +Computes the inverse of :func:`~torch.fft.rfft`. + +:attr:`input` is interpreted as a one-sided Hermitian signal in the Fourier +domain, as produced by :func:`~torch.fft.rfft`. By the Hermitian property, the +output will be real-valued. + +Note: + Some input frequencies must be real-valued to satisfy the Hermitian + property. In these cases the imaginary component will be ignored. + For example, any imaginary component in the zero-frequency term cannot + be represented in a real output and so will always be ignored. + +Note: + The correct interpretation of the Hermitian input depends on the length of + the original data, as given by :attr:`n`. This is because each input shape + could correspond to either an odd or even length signal. By default, the + signal is assumed to be even length and odd signals will not round-trip + properly. So, it is recommended to always pass the signal length :attr:`n`. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimension. + With default arguments, size of the transformed dimension should be (2^n + 1) as argument + `n` defaults to even output size = 2 * (transformed_dim_size - 1) + +Args: + input (Tensor): the input tensor representing a half-Hermitian signal + n (int, optional): Output signal length. This determines the length of the + output signal. If given, the input will either be zero-padded or trimmed to this + length before computing the real IFFT. + Defaults to even output: ``n=2*(input.size(dim) - 1)``. + dim (int, optional): The dimension along which to take the one dimensional real IFFT. + norm (str, optional): Normalization mode. For the backward transform + (:func:`~torch.fft.irfft`), these correspond to: + + * ``"forward"`` - no normalization + * ``"backward"`` - normalize by ``1/n`` + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real IFFT orthonormal) + + Calling the forward transform (:func:`~torch.fft.rfft`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.irfft` + the exact inverse. + + Default is ``"backward"`` (normalize by ``1/n``). + +Keyword args: + {out} + +Example: + + >>> t = torch.linspace(0, 1, 5) + >>> t + tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]) + >>> T = torch.fft.rfft(t) + >>> T + tensor([ 2.5000+0.0000j, -0.6250+0.8602j, -0.6250+0.2031j]) + + Without specifying the output length to :func:`~torch.fft.irfft`, the output + will not round-trip properly because the input is odd-length: + + >>> torch.fft.irfft(T) + tensor([0.1562, 0.3511, 0.7812, 1.2114]) + + So, it is recommended to always pass the signal length :attr:`n`: + + >>> roundtrip = torch.fft.irfft(T, t.numel()) + >>> torch.testing.assert_close(roundtrip, t, check_stride=False) + +""".format(**common_args)) + +rfft2 = _add_docstr(_fft.fft_rfft2, r""" +rfft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor + +Computes the 2-dimensional discrete Fourier transform of real :attr:`input`. +Equivalent to :func:`~torch.fft.rfftn` but FFTs only the last two dimensions by default. + +The FFT of a real signal is Hermitian-symmetric, ``X[i, j] = conj(X[-i, -j])``, +so the full :func:`~torch.fft.fft2` output contains redundant information. +:func:`~torch.fft.rfft2` instead omits the negative frequencies in the last +dimension. + +Note: + Supports torch.half on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the real FFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Default: ``s = [input.size(d) for d in dim]`` + dim (Tuple[int], optional): Dimensions to be transformed. + Default: last two dimensions. + norm (str, optional): Normalization mode. For the forward transform + (:func:`~torch.fft.rfft2`), these correspond to: + + * ``"forward"`` - normalize by ``1/n`` + * ``"backward"`` - no normalization + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real FFT orthonormal) + + Where ``n = prod(s)`` is the logical FFT size. + Calling the backward transform (:func:`~torch.fft.irfft2`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.irfft2` + the exact inverse. + + Default is ``"backward"`` (no normalization). + +Keyword args: + {out} + +Example: + + >>> t = torch.rand(10, 10) + >>> rfft2 = torch.fft.rfft2(t) + >>> rfft2.size() + torch.Size([10, 6]) + + Compared against the full output from :func:`~torch.fft.fft2`, we have all + elements up to the Nyquist frequency. + + >>> fft2 = torch.fft.fft2(t) + >>> torch.testing.assert_close(fft2[..., :6], rfft2, check_stride=False) + + The discrete Fourier transform is separable, so :func:`~torch.fft.rfft2` + here is equivalent to a combination of :func:`~torch.fft.fft` and + :func:`~torch.fft.rfft`: + + >>> two_ffts = torch.fft.fft(torch.fft.rfft(t, dim=1), dim=0) + >>> torch.testing.assert_close(rfft2, two_ffts, check_stride=False) + +""".format(**common_args)) + +irfft2 = _add_docstr(_fft.fft_irfft2, r""" +irfft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor + +Computes the inverse of :func:`~torch.fft.rfft2`. +Equivalent to :func:`~torch.fft.irfftn` but IFFTs only the last two dimensions by default. + +:attr:`input` is interpreted as a one-sided Hermitian signal in the Fourier +domain, as produced by :func:`~torch.fft.rfft2`. By the Hermitian property, the +output will be real-valued. + +Note: + Some input frequencies must be real-valued to satisfy the Hermitian + property. In these cases the imaginary component will be ignored. + For example, any imaginary component in the zero-frequency term cannot + be represented in a real output and so will always be ignored. + +Note: + The correct interpretation of the Hermitian input depends on the length of + the original data, as given by :attr:`s`. This is because each input shape + could correspond to either an odd or even length signal. By default, the + signal is assumed to be even length and odd signals will not round-trip + properly. So, it is recommended to always pass the signal shape :attr:`s`. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + With default arguments, the size of last dimension should be (2^n + 1) as argument + `s` defaults to even output size = 2 * (last_dim_size - 1) + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the real FFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Defaults to even output in the last dimension: + ``s[-1] = 2*(input.size(dim[-1]) - 1)``. + dim (Tuple[int], optional): Dimensions to be transformed. + The last dimension must be the half-Hermitian compressed dimension. + Default: last two dimensions. + norm (str, optional): Normalization mode. For the backward transform + (:func:`~torch.fft.irfft2`), these correspond to: + + * ``"forward"`` - no normalization + * ``"backward"`` - normalize by ``1/n`` + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real IFFT orthonormal) + + Where ``n = prod(s)`` is the logical IFFT size. + Calling the forward transform (:func:`~torch.fft.rfft2`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.irfft2` + the exact inverse. + + Default is ``"backward"`` (normalize by ``1/n``). + +Keyword args: + {out} + +Example: + + >>> t = torch.rand(10, 9) + >>> T = torch.fft.rfft2(t) + + Without specifying the output length to :func:`~torch.fft.irfft2`, the output + will not round-trip properly because the input is odd-length in the last + dimension: + + >>> torch.fft.irfft2(T).size() + torch.Size([10, 8]) + + So, it is recommended to always pass the signal shape :attr:`s`. + + >>> roundtrip = torch.fft.irfft2(T, t.size()) + >>> roundtrip.size() + torch.Size([10, 9]) + >>> torch.testing.assert_close(roundtrip, t, check_stride=False) + +""".format(**common_args)) + +rfftn = _add_docstr(_fft.fft_rfftn, r""" +rfftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor + +Computes the N-dimensional discrete Fourier transform of real :attr:`input`. + +The FFT of a real signal is Hermitian-symmetric, +``X[i_1, ..., i_n] = conj(X[-i_1, ..., -i_n])`` so the full +:func:`~torch.fft.fftn` output contains redundant information. +:func:`~torch.fft.rfftn` instead omits the negative frequencies in the +last dimension. + +Note: + Supports torch.half on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the real FFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Default: ``s = [input.size(d) for d in dim]`` + dim (Tuple[int], optional): Dimensions to be transformed. + Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given. + norm (str, optional): Normalization mode. For the forward transform + (:func:`~torch.fft.rfftn`), these correspond to: + + * ``"forward"`` - normalize by ``1/n`` + * ``"backward"`` - no normalization + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real FFT orthonormal) + + Where ``n = prod(s)`` is the logical FFT size. + Calling the backward transform (:func:`~torch.fft.irfftn`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.irfftn` + the exact inverse. + + Default is ``"backward"`` (no normalization). + +Keyword args: + {out} + +Example: + + >>> t = torch.rand(10, 10) + >>> rfftn = torch.fft.rfftn(t) + >>> rfftn.size() + torch.Size([10, 6]) + + Compared against the full output from :func:`~torch.fft.fftn`, we have all + elements up to the Nyquist frequency. + + >>> fftn = torch.fft.fftn(t) + >>> torch.testing.assert_close(fftn[..., :6], rfftn, check_stride=False) + + The discrete Fourier transform is separable, so :func:`~torch.fft.rfftn` + here is equivalent to a combination of :func:`~torch.fft.fft` and + :func:`~torch.fft.rfft`: + + >>> two_ffts = torch.fft.fft(torch.fft.rfft(t, dim=1), dim=0) + >>> torch.testing.assert_close(rfftn, two_ffts, check_stride=False) + +""".format(**common_args)) + +irfftn = _add_docstr(_fft.fft_irfftn, r""" +irfftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor + +Computes the inverse of :func:`~torch.fft.rfftn`. + +:attr:`input` is interpreted as a one-sided Hermitian signal in the Fourier +domain, as produced by :func:`~torch.fft.rfftn`. By the Hermitian property, the +output will be real-valued. + +Note: + Some input frequencies must be real-valued to satisfy the Hermitian + property. In these cases the imaginary component will be ignored. + For example, any imaginary component in the zero-frequency term cannot + be represented in a real output and so will always be ignored. + +Note: + The correct interpretation of the Hermitian input depends on the length of + the original data, as given by :attr:`s`. This is because each input shape + could correspond to either an odd or even length signal. By default, the + signal is assumed to be even length and odd signals will not round-trip + properly. So, it is recommended to always pass the signal shape :attr:`s`. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + With default arguments, the size of last dimension should be (2^n + 1) as argument + `s` defaults to even output size = 2 * (last_dim_size - 1) + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the real FFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Defaults to even output in the last dimension: + ``s[-1] = 2*(input.size(dim[-1]) - 1)``. + dim (Tuple[int], optional): Dimensions to be transformed. + The last dimension must be the half-Hermitian compressed dimension. + Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given. + norm (str, optional): Normalization mode. For the backward transform + (:func:`~torch.fft.irfftn`), these correspond to: + + * ``"forward"`` - no normalization + * ``"backward"`` - normalize by ``1/n`` + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real IFFT orthonormal) + + Where ``n = prod(s)`` is the logical IFFT size. + Calling the forward transform (:func:`~torch.fft.rfftn`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.irfftn` + the exact inverse. + + Default is ``"backward"`` (normalize by ``1/n``). + +Keyword args: + {out} + +Example: + + >>> t = torch.rand(10, 9) + >>> T = torch.fft.rfftn(t) + + Without specifying the output length to :func:`~torch.fft.irfft`, the output + will not round-trip properly because the input is odd-length in the last + dimension: + + >>> torch.fft.irfftn(T).size() + torch.Size([10, 8]) + + So, it is recommended to always pass the signal shape :attr:`s`. + + >>> roundtrip = torch.fft.irfftn(T, t.size()) + >>> roundtrip.size() + torch.Size([10, 9]) + >>> torch.testing.assert_close(roundtrip, t, check_stride=False) + +""".format(**common_args)) + +hfft = _add_docstr(_fft.fft_hfft, r""" +hfft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor + +Computes the one dimensional discrete Fourier transform of a Hermitian +symmetric :attr:`input` signal. + +Note: + + :func:`~torch.fft.hfft`/:func:`~torch.fft.ihfft` are analogous to + :func:`~torch.fft.rfft`/:func:`~torch.fft.irfft`. The real FFT expects + a real signal in the time-domain and gives a Hermitian symmetry in the + frequency-domain. The Hermitian FFT is the opposite; Hermitian symmetric in + the time-domain and real-valued in the frequency-domain. For this reason, + special care needs to be taken with the length argument :attr:`n`, in the + same way as with :func:`~torch.fft.irfft`. + +Note: + Because the signal is Hermitian in the time-domain, the result will be + real in the frequency domain. Note that some input frequencies must be + real-valued to satisfy the Hermitian property. In these cases the imaginary + component will be ignored. For example, any imaginary component in + ``input[0]`` would result in one or more complex frequency terms which + cannot be represented in a real output and so will always be ignored. + +Note: + The correct interpretation of the Hermitian input depends on the length of + the original data, as given by :attr:`n`. This is because each input shape + could correspond to either an odd or even length signal. By default, the + signal is assumed to be even length and odd signals will not round-trip + properly. So, it is recommended to always pass the signal length :attr:`n`. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimension. + With default arguments, size of the transformed dimension should be (2^n + 1) as argument + `n` defaults to even output size = 2 * (transformed_dim_size - 1) + +Args: + input (Tensor): the input tensor representing a half-Hermitian signal + n (int, optional): Output signal length. This determines the length of the + real output. If given, the input will either be zero-padded or trimmed to this + length before computing the Hermitian FFT. + Defaults to even output: ``n=2*(input.size(dim) - 1)``. + dim (int, optional): The dimension along which to take the one dimensional Hermitian FFT. + norm (str, optional): Normalization mode. For the forward transform + (:func:`~torch.fft.hfft`), these correspond to: + + * ``"forward"`` - normalize by ``1/n`` + * ``"backward"`` - no normalization + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian FFT orthonormal) + + Calling the backward transform (:func:`~torch.fft.ihfft`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.ihfft` + the exact inverse. + + Default is ``"backward"`` (no normalization). + +Keyword args: + {out} + +Example: + + Taking a real-valued frequency signal and bringing it into the time domain + gives Hermitian symmetric output: + + >>> t = torch.linspace(0, 1, 5) + >>> t + tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]) + >>> T = torch.fft.ifft(t) + >>> T + tensor([ 0.5000-0.0000j, -0.1250-0.1720j, -0.1250-0.0406j, -0.1250+0.0406j, + -0.1250+0.1720j]) + + Note that ``T[1] == T[-1].conj()`` and ``T[2] == T[-2].conj()`` is + redundant. We can thus compute the forward transform without considering + negative frequencies: + + >>> torch.fft.hfft(T[:3], n=5) + tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]) + + Like with :func:`~torch.fft.irfft`, the output length must be given in order + to recover an even length output: + + >>> torch.fft.hfft(T[:3]) + tensor([0.1250, 0.2809, 0.6250, 0.9691]) +""".format(**common_args)) + +ihfft = _add_docstr(_fft.fft_ihfft, r""" +ihfft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor + +Computes the inverse of :func:`~torch.fft.hfft`. + +:attr:`input` must be a real-valued signal, interpreted in the Fourier domain. +The IFFT of a real signal is Hermitian-symmetric, ``X[i] = conj(X[-i])``. +:func:`~torch.fft.ihfft` represents this in the one-sided form where only the +positive frequencies below the Nyquist frequency are included. To compute the +full output, use :func:`~torch.fft.ifft`. + +Note: + Supports torch.half on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimension. + +Args: + input (Tensor): the real input tensor + n (int, optional): Signal length. If given, the input will either be zero-padded + or trimmed to this length before computing the Hermitian IFFT. + dim (int, optional): The dimension along which to take the one dimensional Hermitian IFFT. + norm (str, optional): Normalization mode. For the backward transform + (:func:`~torch.fft.ihfft`), these correspond to: + + * ``"forward"`` - no normalization + * ``"backward"`` - normalize by ``1/n`` + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal) + + Calling the forward transform (:func:`~torch.fft.hfft`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.ihfft` + the exact inverse. + + Default is ``"backward"`` (normalize by ``1/n``). + +Keyword args: + {out} + +Example: + + >>> t = torch.arange(5) + >>> t + tensor([0, 1, 2, 3, 4]) + >>> torch.fft.ihfft(t) + tensor([ 2.0000-0.0000j, -0.5000-0.6882j, -0.5000-0.1625j]) + + Compare against the full output from :func:`~torch.fft.ifft`: + + >>> torch.fft.ifft(t) + tensor([ 2.0000-0.0000j, -0.5000-0.6882j, -0.5000-0.1625j, -0.5000+0.1625j, + -0.5000+0.6882j]) +""".format(**common_args)) + +hfft2 = _add_docstr(_fft.fft_hfft2, r""" +hfft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor + +Computes the 2-dimensional discrete Fourier transform of a Hermitian symmetric +:attr:`input` signal. Equivalent to :func:`~torch.fft.hfftn` but only +transforms the last two dimensions by default. + +:attr:`input` is interpreted as a one-sided Hermitian signal in the time +domain. By the Hermitian property, the Fourier transform will be real-valued. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + With default arguments, the size of last dimension should be (2^n + 1) as argument + `s` defaults to even output size = 2 * (last_dim_size - 1) + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the Hermitian FFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Defaults to even output in the last dimension: + ``s[-1] = 2*(input.size(dim[-1]) - 1)``. + dim (Tuple[int], optional): Dimensions to be transformed. + The last dimension must be the half-Hermitian compressed dimension. + Default: last two dimensions. + norm (str, optional): Normalization mode. For the forward transform + (:func:`~torch.fft.hfft2`), these correspond to: + + * ``"forward"`` - normalize by ``1/n`` + * ``"backward"`` - no normalization + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian FFT orthonormal) + + Where ``n = prod(s)`` is the logical FFT size. + Calling the backward transform (:func:`~torch.fft.ihfft2`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.ihfft2` + the exact inverse. + + Default is ``"backward"`` (no normalization). + +Keyword args: + {out} + +Example: + + Starting from a real frequency-space signal, we can generate a + Hermitian-symmetric time-domain signal: + >>> T = torch.rand(10, 9) + >>> t = torch.fft.ihfft2(T) + + Without specifying the output length to :func:`~torch.fft.hfftn`, the + output will not round-trip properly because the input is odd-length in the + last dimension: + + >>> torch.fft.hfft2(t).size() + torch.Size([10, 10]) + + So, it is recommended to always pass the signal shape :attr:`s`. + + >>> roundtrip = torch.fft.hfft2(t, T.size()) + >>> roundtrip.size() + torch.Size([10, 9]) + >>> torch.allclose(roundtrip, T) + True + +""".format(**common_args)) + +ihfft2 = _add_docstr(_fft.fft_ihfft2, r""" +ihfft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor + +Computes the 2-dimensional inverse discrete Fourier transform of real +:attr:`input`. Equivalent to :func:`~torch.fft.ihfftn` but transforms only the +two last dimensions by default. + +Note: + Supports torch.half on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the Hermitian IFFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Default: ``s = [input.size(d) for d in dim]`` + dim (Tuple[int], optional): Dimensions to be transformed. + Default: last two dimensions. + norm (str, optional): Normalization mode. For the backward transform + (:func:`~torch.fft.ihfft2`), these correspond to: + + * ``"forward"`` - no normalization + * ``"backward"`` - normalize by ``1/n`` + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian IFFT orthonormal) + + Where ``n = prod(s)`` is the logical IFFT size. + Calling the forward transform (:func:`~torch.fft.hfft2`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.ihfft2` + the exact inverse. + + Default is ``"backward"`` (normalize by ``1/n``). + +Keyword args: + {out} + +Example: + + >>> T = torch.rand(10, 10) + >>> t = torch.fft.ihfft2(t) + >>> t.size() + torch.Size([10, 6]) + + Compared against the full output from :func:`~torch.fft.ifft2`, the + Hermitian time-space signal takes up only half the space. + + >>> fftn = torch.fft.ifft2(t) + >>> torch.allclose(fftn[..., :6], rfftn) + True + + The discrete Fourier transform is separable, so :func:`~torch.fft.ihfft2` + here is equivalent to a combination of :func:`~torch.fft.ifft` and + :func:`~torch.fft.ihfft`: + + >>> two_ffts = torch.fft.ifft(torch.fft.ihfft(t, dim=1), dim=0) + >>> torch.allclose(t, two_ffts) + True + +""".format(**common_args)) + +hfftn = _add_docstr(_fft.fft_hfftn, r""" +hfftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor + +Computes the n-dimensional discrete Fourier transform of a Hermitian symmetric +:attr:`input` signal. + +:attr:`input` is interpreted as a one-sided Hermitian signal in the time +domain. By the Hermitian property, the Fourier transform will be real-valued. + +Note: + :func:`~torch.fft.hfftn`/:func:`~torch.fft.ihfftn` are analogous to + :func:`~torch.fft.rfftn`/:func:`~torch.fft.irfftn`. The real FFT expects + a real signal in the time-domain and gives Hermitian symmetry in the + frequency-domain. The Hermitian FFT is the opposite; Hermitian symmetric in + the time-domain and real-valued in the frequency-domain. For this reason, + special care needs to be taken with the shape argument :attr:`s`, in the + same way as with :func:`~torch.fft.irfftn`. + +Note: + Some input frequencies must be real-valued to satisfy the Hermitian + property. In these cases the imaginary component will be ignored. + For example, any imaginary component in the zero-frequency term cannot + be represented in a real output and so will always be ignored. + +Note: + The correct interpretation of the Hermitian input depends on the length of + the original data, as given by :attr:`s`. This is because each input shape + could correspond to either an odd or even length signal. By default, the + signal is assumed to be even length and odd signals will not round-trip + properly. It is recommended to always pass the signal shape :attr:`s`. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + With default arguments, the size of last dimension should be (2^n + 1) as argument + `s` defaults to even output size = 2 * (last_dim_size - 1) + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the real FFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Defaults to even output in the last dimension: + ``s[-1] = 2*(input.size(dim[-1]) - 1)``. + dim (Tuple[int], optional): Dimensions to be transformed. + The last dimension must be the half-Hermitian compressed dimension. + Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given. + norm (str, optional): Normalization mode. For the forward transform + (:func:`~torch.fft.hfftn`), these correspond to: + + * ``"forward"`` - normalize by ``1/n`` + * ``"backward"`` - no normalization + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian FFT orthonormal) + + Where ``n = prod(s)`` is the logical FFT size. + Calling the backward transform (:func:`~torch.fft.ihfftn`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.ihfftn` + the exact inverse. + + Default is ``"backward"`` (no normalization). + +Keyword args: + {out} + +Example: + + Starting from a real frequency-space signal, we can generate a + Hermitian-symmetric time-domain signal: + >>> T = torch.rand(10, 9) + >>> t = torch.fft.ihfftn(T) + + Without specifying the output length to :func:`~torch.fft.hfftn`, the + output will not round-trip properly because the input is odd-length in the + last dimension: + + >>> torch.fft.hfftn(t).size() + torch.Size([10, 10]) + + So, it is recommended to always pass the signal shape :attr:`s`. + + >>> roundtrip = torch.fft.hfftn(t, T.size()) + >>> roundtrip.size() + torch.Size([10, 9]) + >>> torch.allclose(roundtrip, T) + True + +""".format(**common_args)) + +ihfftn = _add_docstr(_fft.fft_ihfftn, r""" +ihfftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor + +Computes the N-dimensional inverse discrete Fourier transform of real :attr:`input`. + +:attr:`input` must be a real-valued signal, interpreted in the Fourier domain. +The n-dimensional IFFT of a real signal is Hermitian-symmetric, +``X[i, j, ...] = conj(X[-i, -j, ...])``. :func:`~torch.fft.ihfftn` represents +this in the one-sided form where only the positive frequencies below the +Nyquist frequency are included in the last signal dimension. To compute the +full output, use :func:`~torch.fft.ifftn`. + +Note: + Supports torch.half on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the Hermitian IFFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Default: ``s = [input.size(d) for d in dim]`` + dim (Tuple[int], optional): Dimensions to be transformed. + Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given. + norm (str, optional): Normalization mode. For the backward transform + (:func:`~torch.fft.ihfftn`), these correspond to: + + * ``"forward"`` - no normalization + * ``"backward"`` - normalize by ``1/n`` + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian IFFT orthonormal) + + Where ``n = prod(s)`` is the logical IFFT size. + Calling the forward transform (:func:`~torch.fft.hfftn`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.ihfftn` + the exact inverse. + + Default is ``"backward"`` (normalize by ``1/n``). + +Keyword args: + {out} + +Example: + + >>> T = torch.rand(10, 10) + >>> ihfftn = torch.fft.ihfftn(T) + >>> ihfftn.size() + torch.Size([10, 6]) + + Compared against the full output from :func:`~torch.fft.ifftn`, we have all + elements up to the Nyquist frequency. + + >>> ifftn = torch.fft.ifftn(t) + >>> torch.allclose(ifftn[..., :6], ihfftn) + True + + The discrete Fourier transform is separable, so :func:`~torch.fft.ihfftn` + here is equivalent to a combination of :func:`~torch.fft.ihfft` and + :func:`~torch.fft.ifft`: + + >>> two_iffts = torch.fft.ifft(torch.fft.ihfft(t, dim=1), dim=0) + >>> torch.allclose(ihfftn, two_iffts) + True + +""".format(**common_args)) + +fftfreq = _add_docstr(_fft.fft_fftfreq, r""" +fftfreq(n, d=1.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + +Computes the discrete Fourier Transform sample frequencies for a signal of size :attr:`n`. + +Note: + By convention, :func:`~torch.fft.fft` returns positive frequency terms + first, followed by the negative frequencies in reverse order, so that + ``f[-i]`` for all :math:`0 < i \leq n/2`` in Python gives the negative + frequency terms. For an FFT of length :attr:`n` and with inputs spaced in + length unit :attr:`d`, the frequencies are:: + + f = [0, 1, ..., (n - 1) // 2, -(n // 2), ..., -1] / (d * n) + +Note: + For even lengths, the Nyquist frequency at ``f[n/2]`` can be thought of as + either negative or positive. :func:`~torch.fft.fftfreq` follows NumPy's + convention of taking it to be negative. + +Args: + n (int): the FFT length + d (float, optional): The sampling length scale. + The spacing between individual samples of the FFT input. + The default assumes unit spacing, dividing that result by the actual + spacing gives the result in physical frequency units. + +Keyword Args: + {out} + {dtype} + {layout} + {device} + {requires_grad} + +Example: + + >>> torch.fft.fftfreq(5) + tensor([ 0.0000, 0.2000, 0.4000, -0.4000, -0.2000]) + + For even input, we can see the Nyquist frequency at ``f[2]`` is given as + negative: + + >>> torch.fft.fftfreq(4) + tensor([ 0.0000, 0.2500, -0.5000, -0.2500]) + +""".format(**factory_common_args)) + +rfftfreq = _add_docstr(_fft.fft_rfftfreq, r""" +rfftfreq(n, d=1.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + +Computes the sample frequencies for :func:`~torch.fft.rfft` with a signal of size :attr:`n`. + +Note: + :func:`~torch.fft.rfft` returns Hermitian one-sided output, so only the + positive frequency terms are returned. For a real FFT of length :attr:`n` + and with inputs spaced in length unit :attr:`d`, the frequencies are:: + + f = torch.arange((n + 1) // 2) / (d * n) + +Note: + For even lengths, the Nyquist frequency at ``f[n/2]`` can be thought of as + either negative or positive. Unlike :func:`~torch.fft.fftfreq`, + :func:`~torch.fft.rfftfreq` always returns it as positive. + +Args: + n (int): the real FFT length + d (float, optional): The sampling length scale. + The spacing between individual samples of the FFT input. + The default assumes unit spacing, dividing that result by the actual + spacing gives the result in physical frequency units. + +Keyword Args: + {out} + {dtype} + {layout} + {device} + {requires_grad} + +Example: + + >>> torch.fft.rfftfreq(5) + tensor([0.0000, 0.2000, 0.4000]) + + >>> torch.fft.rfftfreq(4) + tensor([0.0000, 0.2500, 0.5000]) + + Compared to the output from :func:`~torch.fft.fftfreq`, we see that the + Nyquist frequency at ``f[2]`` has changed sign: + >>> torch.fft.fftfreq(4) + tensor([ 0.0000, 0.2500, -0.5000, -0.2500]) + +""".format(**factory_common_args)) + +fftshift = _add_docstr(_fft.fft_fftshift, r""" +fftshift(input, dim=None) -> Tensor + +Reorders n-dimensional FFT data, as provided by :func:`~torch.fft.fftn`, to have +negative frequency terms first. + +This performs a periodic shift of n-dimensional data such that the origin +``(0, ..., 0)`` is moved to the center of the tensor. Specifically, to +``input.shape[dim] // 2`` in each selected dimension. + +Note: + By convention, the FFT returns positive frequency terms first, followed by + the negative frequencies in reverse order, so that ``f[-i]`` for all + :math:`0 < i \leq n/2` in Python gives the negative frequency terms. + :func:`~torch.fft.fftshift` rearranges all frequencies into ascending order + from negative to positive with the zero-frequency term in the center. + +Note: + For even lengths, the Nyquist frequency at ``f[n/2]`` can be thought of as + either negative or positive. :func:`~torch.fft.fftshift` always puts the + Nyquist term at the 0-index. This is the same convention used by + :func:`~torch.fft.fftfreq`. + +Args: + input (Tensor): the tensor in FFT order + dim (int, Tuple[int], optional): The dimensions to rearrange. + Only dimensions specified here will be rearranged, any other dimensions + will be left in their original order. + Default: All dimensions of :attr:`input`. + +Example: + + >>> f = torch.fft.fftfreq(4) + >>> f + tensor([ 0.0000, 0.2500, -0.5000, -0.2500]) + + >>> torch.fft.fftshift(f) + tensor([-0.5000, -0.2500, 0.0000, 0.2500]) + + Also notice that the Nyquist frequency term at ``f[2]`` was moved to the + beginning of the tensor. + + This also works for multi-dimensional transforms: + + >>> x = torch.fft.fftfreq(5, d=1/5) + 0.1 * torch.fft.fftfreq(5, d=1/5).unsqueeze(1) + >>> x + tensor([[ 0.0000, 1.0000, 2.0000, -2.0000, -1.0000], + [ 0.1000, 1.1000, 2.1000, -1.9000, -0.9000], + [ 0.2000, 1.2000, 2.2000, -1.8000, -0.8000], + [-0.2000, 0.8000, 1.8000, -2.2000, -1.2000], + [-0.1000, 0.9000, 1.9000, -2.1000, -1.1000]]) + + >>> torch.fft.fftshift(x) + tensor([[-2.2000, -1.2000, -0.2000, 0.8000, 1.8000], + [-2.1000, -1.1000, -0.1000, 0.9000, 1.9000], + [-2.0000, -1.0000, 0.0000, 1.0000, 2.0000], + [-1.9000, -0.9000, 0.1000, 1.1000, 2.1000], + [-1.8000, -0.8000, 0.2000, 1.2000, 2.2000]]) + + :func:`~torch.fft.fftshift` can also be useful for spatial data. If our + data is defined on a centered grid (``[-(N//2), (N-1)//2]``) then we can + use the standard FFT defined on an uncentered grid (``[0, N)``) by first + applying an :func:`~torch.fft.ifftshift`. + + >>> x_centered = torch.arange(-5, 5) + >>> x_uncentered = torch.fft.ifftshift(x_centered) + >>> fft_uncentered = torch.fft.fft(x_uncentered) + + Similarly, we can convert the frequency domain components to centered + convention by applying :func:`~torch.fft.fftshift`. + + >>> fft_centered = torch.fft.fftshift(fft_uncentered) + + The inverse transform, from centered Fourier space back to centered spatial + data, can be performed by applying the inverse shifts in reverse order: + + >>> x_centered_2 = torch.fft.fftshift(torch.fft.ifft(torch.fft.ifftshift(fft_centered))) + >>> torch.testing.assert_close(x_centered.to(torch.complex64), x_centered_2, check_stride=False) + + +""") + +ifftshift = _add_docstr(_fft.fft_ifftshift, r""" +ifftshift(input, dim=None) -> Tensor + +Inverse of :func:`~torch.fft.fftshift`. + +Args: + input (Tensor): the tensor in FFT order + dim (int, Tuple[int], optional): The dimensions to rearrange. + Only dimensions specified here will be rearranged, any other dimensions + will be left in their original order. + Default: All dimensions of :attr:`input`. + +Example: + + >>> f = torch.fft.fftfreq(5) + >>> f + tensor([ 0.0000, 0.2000, 0.4000, -0.4000, -0.2000]) + + A round-trip through :func:`~torch.fft.fftshift` and + :func:`~torch.fft.ifftshift` gives the same result: + + >>> shifted = torch.fft.fftshift(f) + >>> torch.fft.ifftshift(shifted) + tensor([ 0.0000, 0.2000, 0.4000, -0.4000, -0.2000]) + +""") diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fft/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fft/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02f204f2d1a3c4cc2d96100b382442e860a7c4c0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fft/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_experimental.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_experimental.py new file mode 100644 index 0000000000000000000000000000000000000000..158e0cdbda9f4312195b9b98835aa1f3b1466902 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_experimental.py @@ -0,0 +1,28 @@ +"""Experimental classes and functions used by ONNX export.""" + +import dataclasses +from typing import Mapping, Optional, Sequence, Set, Type, Union + +import torch +import torch._C._onnx as _C_onnx + + +@dataclasses.dataclass +class ExportOptions: + """Arguments used by :func:`torch.onnx.export`. + + TODO: Adopt this in `torch.onnx.export` api to replace keyword arguments. + """ + + export_params: bool = True + verbose: bool = False + training: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL + input_names: Optional[Sequence[str]] = None + output_names: Optional[Sequence[str]] = None + operator_export_type: _C_onnx.OperatorExportTypes = _C_onnx.OperatorExportTypes.ONNX + opset_version: Optional[int] = None + do_constant_folding: bool = True + dynamic_axes: Optional[Mapping[str, Union[Mapping[int, str], Sequence[int]]]] = None + keep_initializers_as_inputs: Optional[bool] = None + custom_opsets: Optional[Mapping[str, int]] = None + export_modules_as_functions: Union[bool, Set[Type[torch.nn.Module]]] = False diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_type_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_type_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2f2b6cf63f4ebd372b35c7d0375bfa844804a850 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_type_utils.py @@ -0,0 +1,366 @@ +"""Utilities for converting and operating on ONNX, JIT and torch types.""" +from __future__ import annotations + +import enum +import typing +from typing import Dict, Literal, Optional, Union + +import torch +from torch._C import _onnx as _C_onnx +from torch.onnx import errors +from torch.onnx._internal import _beartype + +if typing.TYPE_CHECKING: + # Hack to help mypy to recognize torch._C.Value + from torch import _C # noqa: F401 + +ScalarName = Literal[ + "Byte", + "Char", + "Double", + "Float", + "Half", + "Int", + "Long", + "Short", + "Bool", + "ComplexHalf", + "ComplexFloat", + "ComplexDouble", + "QInt8", + "QUInt8", + "QInt32", + "BFloat16", + "Float8E5M2", + "Float8E4M3FN", + "Undefined", +] + +TorchName = Literal[ + "bool", + "uint8_t", + "int8_t", + "double", + "float", + "half", + "int", + "int64_t", + "int16_t", + "complex32", + "complex64", + "complex128", + "qint8", + "quint8", + "qint32", + "bfloat16", + "float8_e5m2", + "float8_e4m3fn", +] + + +class JitScalarType(enum.IntEnum): + """Scalar types defined in torch. + + Use ``JitScalarType`` to convert from torch and JIT scalar types to ONNX scalar types. + + Examples: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ONNX) + >>> # xdoctest: +IGNORE_WANT("win32 has different output") + >>> JitScalarType.from_value(torch.ones(1, 2)).onnx_type() + TensorProtoDataType.FLOAT + + >>> JitScalarType.from_value(torch_c_value_with_type_float).onnx_type() + TensorProtoDataType.FLOAT + + >>> JitScalarType.from_dtype(torch.get_default_dtype).onnx_type() + TensorProtoDataType.FLOAT + + """ + + # Order defined in https://github.com/pytorch/pytorch/blob/344defc9733a45fee8d0c4d3f5530f631e823196/c10/core/ScalarType.h + UINT8 = 0 + INT8 = enum.auto() # 1 + INT16 = enum.auto() # 2 + INT = enum.auto() # 3 + INT64 = enum.auto() # 4 + HALF = enum.auto() # 5 + FLOAT = enum.auto() # 6 + DOUBLE = enum.auto() # 7 + COMPLEX32 = enum.auto() # 8 + COMPLEX64 = enum.auto() # 9 + COMPLEX128 = enum.auto() # 10 + BOOL = enum.auto() # 11 + QINT8 = enum.auto() # 12 + QUINT8 = enum.auto() # 13 + QINT32 = enum.auto() # 14 + BFLOAT16 = enum.auto() # 15 + FLOAT8E5M2 = enum.auto() # 16 + FLOAT8E4M3FN = enum.auto() # 17 + UNDEFINED = enum.auto() # 18 + + @classmethod + @_beartype.beartype + def _from_name( + cls, name: Union[ScalarName, TorchName, Optional[str]] + ) -> JitScalarType: + """Convert a JIT scalar type or torch type name to ScalarType. + + Note: DO NOT USE this API when `name` comes from a `torch._C.Value.type()` calls. + A "RuntimeError: INTERNAL ASSERT FAILED at "../aten/src/ATen/core/jit_type_base.h" can + be raised in several scenarios where shape info is not present. + Instead use `from_value` API which is safer. + + Args: + name: JIT scalar type name (Byte) or torch type name (uint8_t). + + Returns: + JitScalarType + + Raises: + OnnxExporterError: if name is not a valid scalar type name or if it is None. + """ + if name is None: + raise errors.OnnxExporterError("Scalar type name cannot be None") + if valid_scalar_name(name): + return _SCALAR_NAME_TO_TYPE[name] # type: ignore[index] + if valid_torch_name(name): + return _TORCH_NAME_TO_SCALAR_TYPE[name] # type: ignore[index] + + raise errors.OnnxExporterError(f"Unknown torch or scalar type: '{name}'") + + @classmethod + @_beartype.beartype + def from_dtype(cls, dtype: Optional[torch.dtype]) -> JitScalarType: + """Convert a torch dtype to JitScalarType. + + Note: DO NOT USE this API when `dtype` comes from a `torch._C.Value.type()` calls. + A "RuntimeError: INTERNAL ASSERT FAILED at "../aten/src/ATen/core/jit_type_base.h" can + be raised in several scenarios where shape info is not present. + Instead use `from_value` API which is safer. + + Args: + dtype: A torch.dtype to create a JitScalarType from + + Returns: + JitScalarType + + Raises: + OnnxExporterError: if dtype is not a valid torch.dtype or if it is None. + """ + if dtype not in _DTYPE_TO_SCALAR_TYPE: + raise errors.OnnxExporterError(f"Unknown dtype: {dtype}") + return _DTYPE_TO_SCALAR_TYPE[dtype] + + @classmethod + @_beartype.beartype + def from_value( + cls, value: Union[None, torch._C.Value, torch.Tensor], default=None + ) -> JitScalarType: + """Create a JitScalarType from an value's scalar type. + + Args: + value: An object to fetch scalar type from. + default: The JitScalarType to return if a valid scalar cannot be fetched from value + + Returns: + JitScalarType. + + Raises: + OnnxExporterError: if value does not have a valid scalar type and default is None. + SymbolicValueError: when value.type()'s info are empty and default is None + """ + + if not isinstance(value, (torch._C.Value, torch.Tensor)) or ( + isinstance(value, torch._C.Value) and value.node().mustBeNone() + ): + # default value of type JitScalarType is returned when value is not valid + if default is None: + raise errors.OnnxExporterError( + "value must be either torch._C.Value or torch.Tensor objects." + ) + elif not isinstance(default, JitScalarType): + raise errors.OnnxExporterError( + "default value must be a JitScalarType object." + ) + return default + + # Each value type has their own way of storing scalar type + if isinstance(value, torch.Tensor): + return cls.from_dtype(value.dtype) + if isinstance(value.type(), torch.ListType): + try: + return cls.from_dtype(value.type().getElementType().dtype()) + except RuntimeError: + return cls._from_name(str(value.type().getElementType())) + if isinstance(value.type(), torch._C.OptionalType): + if value.type().getElementType().dtype() is None: + if isinstance(default, JitScalarType): + return default + raise errors.OnnxExporterError( + "default value must be a JitScalarType object." + ) + return cls.from_dtype(value.type().getElementType().dtype()) + + scalar_type = None + if value.node().kind() != "prim::Constant" or not isinstance( + value.type(), torch._C.NoneType + ): + # value must be a non-list torch._C.Value scalar + scalar_type = value.type().scalarType() + + if scalar_type is not None: + return cls._from_name(scalar_type) + + # When everything fails... try to default + if default is not None: + return default + raise errors.SymbolicValueError( + f"Cannot determine scalar type for this '{type(value.type())}' instance and " + "a default value was not provided.", + value, + ) + + @_beartype.beartype + def scalar_name(self) -> ScalarName: + """Convert a JitScalarType to a JIT scalar type name.""" + return _SCALAR_TYPE_TO_NAME[self] + + @_beartype.beartype + def torch_name(self) -> TorchName: + """Convert a JitScalarType to a torch type name.""" + return _SCALAR_TYPE_TO_TORCH_NAME[self] + + @_beartype.beartype + def dtype(self) -> torch.dtype: + """Convert a JitScalarType to a torch dtype.""" + return _SCALAR_TYPE_TO_DTYPE[self] + + @_beartype.beartype + def onnx_type(self) -> _C_onnx.TensorProtoDataType: + """Convert a JitScalarType to an ONNX data type.""" + if self not in _SCALAR_TYPE_TO_ONNX: + raise errors.OnnxExporterError( + f"Scalar type {self} cannot be converted to ONNX" + ) + return _SCALAR_TYPE_TO_ONNX[self] + + @_beartype.beartype + def onnx_compatible(self) -> bool: + """Return whether this JitScalarType is compatible with ONNX.""" + return ( + self in _SCALAR_TYPE_TO_ONNX + and self != JitScalarType.UNDEFINED + and self != JitScalarType.COMPLEX32 + ) + + +@_beartype.beartype +def valid_scalar_name(scalar_name: Union[ScalarName, str]) -> bool: + """Return whether the given scalar name is a valid JIT scalar type name.""" + return scalar_name in _SCALAR_NAME_TO_TYPE + + +@_beartype.beartype +def valid_torch_name(torch_name: Union[TorchName, str]) -> bool: + """Return whether the given torch name is a valid torch type name.""" + return torch_name in _TORCH_NAME_TO_SCALAR_TYPE + + +# https://github.com/pytorch/pytorch/blob/344defc9733a45fee8d0c4d3f5530f631e823196/c10/core/ScalarType.h +_SCALAR_TYPE_TO_NAME: Dict[JitScalarType, ScalarName] = { + JitScalarType.BOOL: "Bool", + JitScalarType.UINT8: "Byte", + JitScalarType.INT8: "Char", + JitScalarType.INT16: "Short", + JitScalarType.INT: "Int", + JitScalarType.INT64: "Long", + JitScalarType.HALF: "Half", + JitScalarType.FLOAT: "Float", + JitScalarType.DOUBLE: "Double", + JitScalarType.COMPLEX32: "ComplexHalf", + JitScalarType.COMPLEX64: "ComplexFloat", + JitScalarType.COMPLEX128: "ComplexDouble", + JitScalarType.QINT8: "QInt8", + JitScalarType.QUINT8: "QUInt8", + JitScalarType.QINT32: "QInt32", + JitScalarType.BFLOAT16: "BFloat16", + JitScalarType.FLOAT8E5M2: "Float8E5M2", + JitScalarType.FLOAT8E4M3FN: "Float8E4M3FN", + JitScalarType.UNDEFINED: "Undefined", +} + +_SCALAR_NAME_TO_TYPE: Dict[ScalarName, JitScalarType] = { + v: k for k, v in _SCALAR_TYPE_TO_NAME.items() +} + +_SCALAR_TYPE_TO_TORCH_NAME: Dict[JitScalarType, TorchName] = { + JitScalarType.BOOL: "bool", + JitScalarType.UINT8: "uint8_t", + JitScalarType.INT8: "int8_t", + JitScalarType.INT16: "int16_t", + JitScalarType.INT: "int", + JitScalarType.INT64: "int64_t", + JitScalarType.HALF: "half", + JitScalarType.FLOAT: "float", + JitScalarType.DOUBLE: "double", + JitScalarType.COMPLEX32: "complex32", + JitScalarType.COMPLEX64: "complex64", + JitScalarType.COMPLEX128: "complex128", + JitScalarType.QINT8: "qint8", + JitScalarType.QUINT8: "quint8", + JitScalarType.QINT32: "qint32", + JitScalarType.BFLOAT16: "bfloat16", + JitScalarType.FLOAT8E5M2: "float8_e5m2", + JitScalarType.FLOAT8E4M3FN: "float8_e4m3fn", +} + +_TORCH_NAME_TO_SCALAR_TYPE: Dict[TorchName, JitScalarType] = { + v: k for k, v in _SCALAR_TYPE_TO_TORCH_NAME.items() +} + +_SCALAR_TYPE_TO_ONNX = { + JitScalarType.BOOL: _C_onnx.TensorProtoDataType.BOOL, + JitScalarType.UINT8: _C_onnx.TensorProtoDataType.UINT8, + JitScalarType.INT8: _C_onnx.TensorProtoDataType.INT8, + JitScalarType.INT16: _C_onnx.TensorProtoDataType.INT16, + JitScalarType.INT: _C_onnx.TensorProtoDataType.INT32, + JitScalarType.INT64: _C_onnx.TensorProtoDataType.INT64, + JitScalarType.HALF: _C_onnx.TensorProtoDataType.FLOAT16, + JitScalarType.FLOAT: _C_onnx.TensorProtoDataType.FLOAT, + JitScalarType.DOUBLE: _C_onnx.TensorProtoDataType.DOUBLE, + JitScalarType.COMPLEX64: _C_onnx.TensorProtoDataType.COMPLEX64, + JitScalarType.COMPLEX128: _C_onnx.TensorProtoDataType.COMPLEX128, + JitScalarType.BFLOAT16: _C_onnx.TensorProtoDataType.BFLOAT16, + JitScalarType.UNDEFINED: _C_onnx.TensorProtoDataType.UNDEFINED, + JitScalarType.COMPLEX32: _C_onnx.TensorProtoDataType.UNDEFINED, + JitScalarType.QINT8: _C_onnx.TensorProtoDataType.INT8, + JitScalarType.QUINT8: _C_onnx.TensorProtoDataType.UINT8, + JitScalarType.QINT32: _C_onnx.TensorProtoDataType.INT32, + JitScalarType.FLOAT8E5M2: _C_onnx.TensorProtoDataType.FLOAT8E5M2, + JitScalarType.FLOAT8E4M3FN: _C_onnx.TensorProtoDataType.FLOAT8E4M3FN, +} + +# source of truth is +# https://github.com/pytorch/pytorch/blob/master/torch/csrc/utils/tensor_dtypes.cpp +_SCALAR_TYPE_TO_DTYPE = { + JitScalarType.BOOL: torch.bool, + JitScalarType.UINT8: torch.uint8, + JitScalarType.INT8: torch.int8, + JitScalarType.INT16: torch.short, + JitScalarType.INT: torch.int, + JitScalarType.INT64: torch.int64, + JitScalarType.HALF: torch.half, + JitScalarType.FLOAT: torch.float, + JitScalarType.DOUBLE: torch.double, + JitScalarType.COMPLEX32: torch.complex32, + JitScalarType.COMPLEX64: torch.complex64, + JitScalarType.COMPLEX128: torch.complex128, + JitScalarType.QINT8: torch.qint8, + JitScalarType.QUINT8: torch.quint8, + JitScalarType.QINT32: torch.qint32, + JitScalarType.BFLOAT16: torch.bfloat16, + JitScalarType.FLOAT8E5M2: torch.float8_e5m2, + JitScalarType.FLOAT8E4M3FN: torch.float8_e4m3fn, +} + +_DTYPE_TO_SCALAR_TYPE = {v: k for k, v in _SCALAR_TYPE_TO_DTYPE.items()} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset11.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset11.py new file mode 100644 index 0000000000000000000000000000000000000000..fc1d205ce33666a5214d484f2470e59fdef3822c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset11.py @@ -0,0 +1,1650 @@ +"""This file exports ONNX ops for opset 11.""" +from __future__ import annotations + +import functools +import sys +import warnings +from typing import Optional, Sequence + +import torch +from torch import _C +from torch._C import _onnx as _C_onnx +from torch.onnx import ( + _type_utils, + errors, + symbolic_helper, + symbolic_opset10 as opset10, + symbolic_opset9 as opset9, + utils, +) +from torch.onnx._globals import GLOBALS +from torch.onnx._internal import _beartype, jit_utils, registration + +# EDITING THIS FILE? READ THIS FIRST! +# see Note [Edit Symbolic Files] in README.md + +__all__ = [ + "add", + "append", + "arange", + "argsort", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "cat", + "chunk", + "clamp_max", + "clamp_min", + "clamp", + "constant_pad_nd", + "cumsum", + "Delete", + "embedding_bag", + "embedding_renorm", + "flatten", + "gather", + "hardtanh", + "hstack", + "im2col", + "index_fill", + "index", + "index_copy", + "index_put", + "insert", + "linalg_det", + "linalg_vector_norm", + "logdet", + "masked_scatter", + "masked_select", + "mm", + "narrow", + "normal", + "pad", + "pixel_shuffle", + "pop", + "prim_constant_chunk", + "reflection_pad", + "relu6", + "remainder", + "replication_pad", + "round", + "scatter", + "select", + "size", + "sort", + "split_with_sizes", + "split", + "squeeze", + "stack", + "topk", + "unbind", + "unique_dim", + "unsqueeze", + "vstack", +] + +_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=11) + + +def _apply_params(*args, **kwargs): + """Returns a decorator that calls the decorated (higher-order) function with the given parameters.""" + + def _apply(fn): + return fn(*args, **kwargs) + + return _apply + + +@_onnx_symbolic("aten::hardtanh") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "f", "f") +@_beartype.beartype +def hardtanh(g: jit_utils.GraphContext, self: _C.Value, min_val: float, max_val: float): + scalar_type = _type_utils.JitScalarType.from_value( + self, _type_utils.JitScalarType.FLOAT + ) + min_val = g.op( + "Constant", + value_t=torch.tensor(min_val, dtype=scalar_type.dtype()), + ) + max_val = g.op( + "Constant", + value_t=torch.tensor(max_val, dtype=scalar_type.dtype()), + ) + return opset9._op_with_optional_float_cast( + g, "Clip", self, min_val, max_val, opset_before=12 + ) + + +@_onnx_symbolic("aten::clamp") +@_beartype.beartype +def clamp(g: jit_utils.GraphContext, self, min, max): + @_beartype.beartype + def _cast_if_not_none(tensor, dtype): + if tensor is not None and not symbolic_helper._is_none(tensor): + return g.op( + "Cast", + tensor, + to_i=dtype.onnx_type(), + ) + else: + return tensor + + scalar_type = _type_utils.JitScalarType.from_value( + self, _type_utils.JitScalarType.UNDEFINED + ) + if scalar_type != _type_utils.JitScalarType.UNDEFINED: + min = _cast_if_not_none(min, scalar_type) + max = _cast_if_not_none(max, scalar_type) + + if symbolic_helper._is_none(min): + return clamp_max(g, self, max) + elif symbolic_helper._is_none(max): + return clamp_min(g, self, min) + else: + if ( + symbolic_helper._get_tensor_rank(min) == 0 + and symbolic_helper._get_tensor_rank(max) == 0 + ): + return opset9._op_with_optional_float_cast( + g, "Clip", self, min, max, opset_before=12 + ) + else: + return clamp_max(g, clamp_min(g, self, min), max) + + +@_onnx_symbolic("aten::clamp_min") +@symbolic_helper.parse_args("v", "v") +@_beartype.beartype +def clamp_min(g: jit_utils.GraphContext, self, min): + min = g.op("Cast", min, to_i=_type_utils.JitScalarType.from_value(self).onnx_type()) + if symbolic_helper._get_tensor_rank(min) == 0: + max = opset9.unused(g) + return opset9._op_with_optional_float_cast( + g, "Clip", self, min, max, opset_before=12 + ) + else: + return opset9._op_with_optional_float_cast(g, "Max", self, min, opset_before=12) + + +@_onnx_symbolic("aten::clamp_max") +@symbolic_helper.parse_args("v", "v") +@_beartype.beartype +def clamp_max(g: jit_utils.GraphContext, self, max): + max = g.op("Cast", max, to_i=_type_utils.JitScalarType.from_value(self).onnx_type()) + if symbolic_helper._get_tensor_rank(max) == 0: + min = opset9.unused(g) + return opset9._op_with_optional_float_cast( + g, "Clip", self, min, max, opset_before=12 + ) + else: + return opset9._op_with_optional_float_cast(g, "Min", self, max, opset_before=12) + + +@_onnx_symbolic("aten::relu6") +@_beartype.beartype +def relu6(g: jit_utils.GraphContext, input): + scalar_type = _type_utils.JitScalarType.from_value( + input, _type_utils.JitScalarType.FLOAT + ) + min_val = g.op( + "Constant", + value_t=torch.tensor(0, dtype=scalar_type.dtype()), + ) + max_val = g.op( + "Constant", + value_t=torch.tensor(6, dtype=scalar_type.dtype()), + ) + return clamp(g, input, min_val, max_val) + + +@_onnx_symbolic("aten::select") +# Opset 11 gather accepts negative indices +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "i", "v") +@_beartype.beartype +def select(g: jit_utils.GraphContext, self, dim, index): + return g.op("Gather", self, index, axis_i=dim) + + +@_onnx_symbolic("aten::index_put") +@_beartype.beartype +def index_put( + g: jit_utils.GraphContext, self, indices_list_value, values, accumulate=False +): + if symbolic_helper._is_packed_list(indices_list_value): + indices_list = symbolic_helper._unpack_list(indices_list_value) + else: + indices_list = [indices_list_value] + if symbolic_helper.is_caffe2_aten_fallback(): + args = [self] + indices_list + [values, accumulate] + return g.at("index_put", *args) + + accumulate = symbolic_helper._parse_arg(accumulate, "b") + + if len(indices_list) == 0: + return values + + if len(indices_list) > 1: + for idx_ in range(len(indices_list)): + if symbolic_helper._is_bool(indices_list[idx_]): + indices_list[idx_] = g.op("NonZero", indices_list[idx_]) + index = indices_list[0] + + for ind in indices_list[1:]: + index = opset9.add(g, index, ind) + broadcast_index_shape = g.op("Shape", index) + indices_list = [ + symbolic_helper._unsqueeze_helper( + g, opset9.expand(g, ind, broadcast_index_shape, None), [-1] + ) + for ind in indices_list + ] + index = g.op("Concat", *indices_list, axis_i=-1) + else: + # Replace index_put node with masked_scatter or masked_fill + # when inputs to the index_put node contains a single boolean input. + # + # index_put -> masked_fill + # * input index contains single tensor of Bool type (e.g.: %24 <- %23). + # * input value contains single element (e.g.: %18). + # + # Torch IR + # %mask : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) = aten::clone(%0, %6) + # %16 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) = + # aten::to(%8, %26, %27, %11, %12, %28, %29, %15) + # %18 : Float(requires_grad=0, device=cpu) = prim::Constant[value={1}]() + # %23 : Bool(8, strides=[1], device=cpu) = aten::view(%16, %22) + # %24 : Tensor?[] = prim::ListConstruct(%23) + # %25 : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) = + # aten::index_put(%mask, %24, %18, %30) + # return (%25) + # + # + # index_put -> masked_scatter + # * input index contains single tensor of Bool type (e.g.: %32 <- %31). + # * input value contains multiple elements (e.g.: %28). + # + # Torch IR + # %mask : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) = aten::clone(%0, %6) + # %28 : Float(8, strides=[1], requires_grad=0, device=cpu) + # = prim::Constant[value= 1 1 1 1 1 1 1 1 [ CPUFloatType{8} ]]() + # %15 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) + # = aten::ne(%mask, %some_const) + # %23 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) + # = aten::to(%15, %34, %35, %18, %19, %36, %37, %22) + # %38 : Long(requires_grad=0, device=cpu) = prim::Constant[value={0}]() + # %30 : int[] = prim::Constant[value=[-1]]() + # %31 : Bool(8, strides=[1], device=cpu) = aten::view(%23, %30) + # %32 : Tensor?[] = prim::ListConstruct(%31) + # %33 : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) + # = aten::index_put(%mask, %32, %28, %38) + # return (%33) + index = indices_list[0] + bool_inp = index + if symbolic_helper._is_bool(bool_inp): + rank = symbolic_helper._get_tensor_rank(values) + if rank is not None and rank == 0: + return opset9.masked_fill(g, self, bool_inp, values) + mask_rank = symbolic_helper._get_tensor_rank(bool_inp) + self_rank = symbolic_helper._get_tensor_rank(self) + if ( + mask_rank is not None + and self_rank is not None + and self_rank > mask_rank + ): + # Unsqueeze 'bool_inp' to be broadcastable to shape of 'self'. + bool_inp = symbolic_helper._unsqueeze_helper( + g, bool_inp, list(range(mask_rank, self_rank)) + ) + return masked_scatter(g, self, bool_inp, values) + broadcast_index_shape = g.op("Shape", index) + index = symbolic_helper._unsqueeze_helper(g, index, [-1]) + sub_data_shape = symbolic_helper._slice_helper( + g, g.op("Shape", self), axes=[0], starts=[len(indices_list)], ends=[sys.maxsize] + ) + values_shape = g.op("Concat", broadcast_index_shape, sub_data_shape, axis_i=0) + # Check if values is a singular value and expand accordingly + rank = symbolic_helper._get_tensor_rank(values) + if rank is not None and rank == 0: + values = opset9.expand(g, values, values_shape, None) + values = symbolic_helper._reshape_helper(g, values, values_shape) + + self_scalar_type = _type_utils.JitScalarType.from_value( + self, _type_utils.JitScalarType.UNDEFINED + ) + if self_scalar_type != _type_utils.JitScalarType.UNDEFINED: + values_scalar_type = _type_utils.JitScalarType.from_value( + values, _type_utils.JitScalarType.UNDEFINED + ) + if self_scalar_type != values_scalar_type: + values = g.op("Cast", values, to_i=self_scalar_type.onnx_type()) + elif accumulate: + raise errors.SymbolicValueError("self does not have a valid scalar type.", self) + + if accumulate: + zeros = g.op( + "ConstantOfShape", + g.op("Shape", self), + value_t=torch.tensor([0], dtype=self_scalar_type.dtype()), + ) + result = g.op("ScatterND", zeros, index, values) + result = add(g, self, result) + else: + result = g.op("ScatterND", self, index, values) + + return result + + +@_onnx_symbolic("aten::pixel_shuffle") +@symbolic_helper.parse_args("v", "i") +@_beartype.beartype +def pixel_shuffle(g: jit_utils.GraphContext, self, upscale_factor): + rank = symbolic_helper._get_tensor_rank(self) + if rank is not None and rank != 4: + return symbolic_helper._unimplemented("pixel_shuffle", "only support 4d input") + return g.op("DepthToSpace", self, blocksize_i=upscale_factor, mode_s="CRD") + + +@_onnx_symbolic( + "aten::upsample_nearest1d", + decorate=[_apply_params("upsample_nearest1d", 3, "nearest")], +) +@_onnx_symbolic( + "aten::upsample_nearest2d", + decorate=[_apply_params("upsample_nearest2d", 4, "nearest")], +) +@_onnx_symbolic( + "aten::upsample_nearest3d", + decorate=[_apply_params("upsample_nearest3d", 5, "nearest")], +) +@_onnx_symbolic( + "aten::upsample_linear1d", + decorate=[_apply_params("upsample_linear1d", 3, "linear")], +) +@_onnx_symbolic( + "aten::upsample_bilinear2d", + decorate=[_apply_params("upsample_bilinear2d", 4, "linear")], +) +@_onnx_symbolic( + "aten::upsample_trilinear3d", + decorate=[_apply_params("upsample_trilinear3d", 5, "linear")], +) +@_onnx_symbolic( + "aten::upsample_bicubic2d", + decorate=[_apply_params("upsample_bicubic2d", 4, "cubic")], +) +@_beartype.beartype +def _interpolate(name: str, dim: int, interpolate_mode: str): + return symbolic_helper._interpolate_helper(name, dim, interpolate_mode) + + +@_onnx_symbolic("aten::__interpolate") +@symbolic_helper.quantized_args(True, False, False, False, False, False, False) +@_beartype.beartype +def __interpolate( + g: jit_utils.GraphContext, + input, + size, + scale_factor, + mode, + align_corners, + recompute_scale_factor, + antialias, +): + return symbolic_helper.__interpolate_helper( + g, input, size, scale_factor, mode, align_corners, recompute_scale_factor + ) + + +@_onnx_symbolic("aten::gather") +@symbolic_helper.parse_args("v", "i", "v", "v") +@_beartype.beartype +def gather(g: jit_utils.GraphContext, self, dim, index, sparse_grad=False): + if symbolic_helper._maybe_get_const(sparse_grad, "i"): + return symbolic_helper._unimplemented("gather", "sparse_grad == True") + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("gather", self, dim, index, sparse_grad) + return g.op("GatherElements", self, index, axis_i=dim) + + +@_onnx_symbolic("aten::scatter") +@symbolic_helper.parse_args("v", "i", "v", "v") +@_beartype.beartype +def scatter(g: jit_utils.GraphContext, self, dim, index, src): + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("scatter", self, dim, index, src, overload_name="src") + src_type = _type_utils.JitScalarType.from_value(src) + src = symbolic_helper._maybe_get_scalar(src) + if symbolic_helper._is_value(src): + return g.op("ScatterElements", self, index, src, axis_i=dim) + else: + # Check if scalar "src" has same type as self (PyTorch allows different + # type for scalar src (but not when src is tensor)). If not, insert Cast node. + if _type_utils.JitScalarType.from_value(self) != src_type: + src = g.op( + "Cast", + src, + to_i=_type_utils.JitScalarType.from_value(self).onnx_type(), + ) + return g.op( + "ScatterElements", self, index, opset9.expand_as(g, src, index), axis_i=dim + ) + + +@_onnx_symbolic("aten::cumsum") +@symbolic_helper.parse_args("v", "i", "none") +@_beartype.beartype +def cumsum(g: jit_utils.GraphContext, self, dim, dtype=None): + dim_tensor = g.op("Constant", value_t=torch.tensor(dim, dtype=torch.int)) + if dtype and dtype.node().kind() != "prim::Constant": + parsed_dtype = symbolic_helper._get_const(dtype, "i", "dtype") + cast = g.op( + "Cast", self, to_i=_type_utils.JitScalarType(parsed_dtype).onnx_type() + ) + else: + cast = self + csum = g.op("CumSum", cast, dim_tensor) + return csum + + +@_onnx_symbolic("aten::masked_select") +@_beartype.beartype +def masked_select(g: jit_utils.GraphContext, self, mask): + index = opset9.nonzero(g, opset9.expand_as(g, mask, self)) + return g.op("GatherND", self, index) + + +@_onnx_symbolic("aten::masked_scatter") +@_beartype.beartype +def masked_scatter(g: jit_utils.GraphContext, self, mask, source): + index = opset9.nonzero(g, opset9.expand_as(g, mask, self)) + # NOTE: source can have more elements than needed. + # It could also have arbitrary shape. + # This is not supported by ONNX::ScatterND, so we need to flatten and slice source tensor. + source = symbolic_helper._reshape_helper(g, source, torch.LongTensor([-1])) + source = symbolic_helper._slice_helper( + g, + source, + axes=torch.LongTensor([0]), + starts=torch.LongTensor([0]), + ends=opset9.size(g, index, torch.LongTensor([0])), + ) + return g.op("ScatterND", self, index, source) + + +@_onnx_symbolic("aten::len") +@_beartype.beartype +def _len(g: jit_utils.GraphContext, self): + if ( + symbolic_helper._is_tensor_list(self) + or self.node().kind() == "onnx::SplitToSequence" + ): + return g.op("SequenceLength", self) + sz_0 = size(g, self, g.op("Constant", value_t=torch.LongTensor([0]))) + return symbolic_helper._squeeze_helper(g, sz_0, [0]) + + +@_onnx_symbolic("aten::__getitem_") +@_beartype.beartype +def __getitem_(g: jit_utils.GraphContext, self, i): + if symbolic_helper._is_tensor_list(self): + # SequenceAt requires that the input be a List of Tensors + return g.op("SequenceAt", self, i) + else: + from torch.onnx.symbolic_opset9 import __getitem_ as getitem + + return getitem(g, self, i) + + +@_onnx_symbolic("aten::_set_item") +@_beartype.beartype +def _set_item(g: jit_utils.GraphContext, tensor_list, i, v): + tensor_list = g.op("SequenceErase", tensor_list, i) + return g.op("SequenceInsert", tensor_list, v, i) + + +@_onnx_symbolic("aten::append") +@_beartype.beartype +def append(g: jit_utils.GraphContext, self, tensor): + return g.op("SequenceInsert", self, tensor) + + +@_onnx_symbolic("aten::add") +@_beartype.beartype +def add(g: jit_utils.GraphContext, self, other, alpha=None): + if symbolic_helper._is_value(self) and symbolic_helper._is_tensor_list(self): + tensor_list_node = other.node() + if tensor_list_node.kind() != "prim::ListConstruct": + return symbolic_helper._unimplemented( + "add", "does not support adding dynamic tensor list to another" + ) + tensors = symbolic_helper._unpack_list(other) + l = self + for t in tensors: + l = g.op("SequenceInsert", l, t) + return l + + return opset9.add(g, self, other, alpha) + + +@_onnx_symbolic("aten::insert") +@_beartype.beartype +def insert(g: jit_utils.GraphContext, self, pos, tensor): + return g.op("SequenceInsert", self, tensor, pos) + + +@_onnx_symbolic("aten::pop") +@_beartype.beartype +def pop(g: jit_utils.GraphContext, tensor_list, dim): + return g.op("SequenceErase", tensor_list, dim) + + +@_onnx_symbolic("aten::Delete") +@_beartype.beartype +def Delete(g: jit_utils.GraphContext, tensor_list, dim): + return g.op("SequenceErase", tensor_list, dim) + + +@_onnx_symbolic("aten::cat") +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def cat(g: jit_utils.GraphContext, tensor_list, dim): + if symbolic_helper._is_packed_list(tensor_list): + return opset9.cat(g, tensor_list, dim) + else: + dim = symbolic_helper._get_const(dim, "i", "dim") + return g.op("ConcatFromSequence", tensor_list, axis_i=dim) + + +@_onnx_symbolic("aten::stack") +@_beartype.beartype +def stack(g: jit_utils.GraphContext, tensor_list, dim): + if symbolic_helper._is_packed_list(tensor_list): + return opset9.stack(g, tensor_list, dim) + else: + dim = symbolic_helper._get_const(dim, "i", "dim") + return g.op("ConcatFromSequence", tensor_list, axis_i=dim, new_axis_i=1) + + +@_onnx_symbolic("aten::_unique2") +@symbolic_helper.parse_args("v", "i", "i", "i") +@_beartype.beartype +def _unique2(g: jit_utils.GraphContext, self, sorted, return_inverse, return_counts): + u, indices, inverse_indices, counts = g.op( + "Unique", self, sorted_i=sorted, outputs=4 + ) + return u, inverse_indices, counts + + +@_onnx_symbolic("aten::unique_dim") +@symbolic_helper.parse_args("v", "i", "i", "i", "i") +@_beartype.beartype +def unique_dim( + g: jit_utils.GraphContext, self, dim, sorted, return_inverse, return_counts +): + u, indices, inverse_indices, counts = g.op( + "Unique", self, axis_i=dim, sorted_i=sorted, outputs=4 + ) + return u, inverse_indices, counts + + +@_onnx_symbolic("aten::topk") +@symbolic_helper.parse_args("v", "v", "i", "i", "i", "none") +@_beartype.beartype +def topk(g: jit_utils.GraphContext, self, k, dim, largest, sorted, out=None): + return symbolic_helper._topk_helper( + g, self, k, dim, largest=largest, sorted=sorted, out=out + ) + + +@_onnx_symbolic("aten::sort") +@symbolic_helper.parse_args("v", "i", "i", "none") +@_beartype.beartype +def sort(g: jit_utils.GraphContext, self, dim, decending, out=None): + return symbolic_helper._sort_helper(g, self, dim, decending=decending, out=out) + + +@_onnx_symbolic("aten::argsort") +@symbolic_helper.parse_args("v", "i", "i", "none") +@_beartype.beartype +def argsort(g: jit_utils.GraphContext, self, dim, decending, out=None): + _, indices = symbolic_helper._sort_helper( + g, self, dim, decending=decending, out=out + ) + return indices + + +@_onnx_symbolic("aten::round") +@symbolic_helper.parse_args("v", "i") +@_beartype.beartype +def round(g: jit_utils.GraphContext, self, decimals=0): + if not symbolic_helper._is_fp(self): + return self + if decimals == 0: + return g.op("Round", self) + mul = g.op("Mul", self, g.op("Constant", value_t=torch.tensor(pow(10, decimals)))) + round = g.op("Round", mul) + return g.op( + "Mul", round, g.op("Constant", value_t=torch.tensor(pow(10, -1 * decimals))) + ) + + +@_onnx_symbolic("aten::remainder") +@_beartype.beartype +def remainder(g: jit_utils.GraphContext, input, other): + if symbolic_helper._is_fp(input) or symbolic_helper._is_fp(other): + return opset9.remainder(g, input, other) + return g.op("Mod", input, other, fmod_i=0) + + +@_onnx_symbolic("aten::split") +@symbolic_helper.parse_args("v", "v", "i", "i") +@_beartype.beartype +def split(g: jit_utils.GraphContext, self, split_size_or_sizes, dim, _outputs=None): + if not symbolic_helper._is_split_static(split_size_or_sizes, _outputs): + split_out = g.op("SplitToSequence", self, split_size_or_sizes, axis_i=dim) + if _outputs is None: + return split_out + # Convert to multiple slice nodes iff number of splits and number of outputs are statically known. + if ( + symbolic_helper._is_packed_list(split_size_or_sizes) + and len(symbolic_helper._unpack_list(split_size_or_sizes)) == _outputs + ): + split_sizes = [ + symbolic_helper._unsqueeze_helper(g, v, [0]) + for v in symbolic_helper._unpack_list(split_size_or_sizes) + ] + start = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long)) + axis = g.op("Constant", value_t=torch.tensor([dim], dtype=torch.long)) + res = [] + for i in range(_outputs): + end = g.op( + "Add", start, split_sizes[i] + ) # split_sizes is a list of same length as _outputs + res.append(g.op("Slice", self, start, end, axis)) + start = end + return res + return [ + g.op( + "SequenceAt", + split_out, + g.op("Constant", value_t=torch.tensor([i], dtype=torch.long)), + ) + for i in range(_outputs) + ] + else: + return opset9.split(g, self, split_size_or_sizes, dim, _outputs) + + +@_onnx_symbolic("aten::split_with_sizes") +@symbolic_helper.parse_args("v", "v", "i", "i") +@_beartype.beartype +def split_with_sizes(g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None): + return split(g, self, split_sizes, dim, _outputs) + + +@_onnx_symbolic("aten::unbind") +@symbolic_helper.parse_args("v", "i", "i") +@_beartype.beartype +def unbind(g: jit_utils.GraphContext, self, dim=0, _outputs=None): + if _outputs is None: + return g.op( + "SplitToSequence", + self, + g.op("Constant", value_t=torch.tensor(1, dtype=torch.long)), + axis_i=dim, + keepdims_i=0, + ) + else: + return opset9.unbind(g, self, dim, _outputs) + + +@_beartype.beartype +def _prepare_onnx_paddings(g: jit_utils.GraphContext, input, pad): + """Generate paddings in ONNX order based on pad in pytorch. + + Args: + input: the input tensor. + pad: the paddings in pytorch. + The order is dim_n_begin, dim_n_end, dim_n-1_begin, dim_n-1_end, ..., dim_m_begin, dim_m_end, + where m is in range [0, n]. + """ + if ( + not symbolic_helper._is_packed_list(pad) + and symbolic_helper._is_list(pad) + and symbolic_helper._is_scalar_list(pad) + ): + pad = g.op("ConcatFromSequence", pad, axis_i=0, new_axis_i=1) + # The desired order of paddings is + # dim_0_begin, dim_1_begin, ... , dim_0_end, ..., dim_n_end. + # n is the dimension of input. + # Assume zero-dimensions in the beginning, pad the "pad" sequence with zeros in the beginning + pad_len = opset9.size(g, pad, g.op("Constant", value_t=torch.tensor([0]))) + # Set extension = [0] * (dim * 2 - len(pad)) + rank = symbolic_helper._get_tensor_rank(input) + if rank is None: + rank = g.op("Size", g.op("Shape", input)) + else: + rank = g.op("Constant", value_t=torch.tensor(rank, dtype=torch.int64)) + extension = g.op( + "Sub", + g.op("Mul", rank, g.op("Constant", value_t=torch.tensor(2, dtype=torch.int64))), + pad_len, + ) + # Concat pad with extension: paddings = [dim_n_begin, dim_n_end, dim_n-1_begin, dim_n-1_end, 0, 0, ... ] + # Currently ONNX only supports int64 type for Pad + pad = g.op("Cast", pad, to_i=_C_onnx.TensorProtoDataType.INT64) + paddings = g.op( + "Concat", + pad, + g.op( + "ConstantOfShape", extension, value_t=torch.tensor([0], dtype=torch.int64) + ), + axis_i=0, + ) + # Reshape and reverse order and collate first beginnings and then ends + # paddings = [[..., 0, dim_n-1_begin, dim_n_begin], + # [..., 0, dim_n-1_end, dim_n_end]] + # Reshape back to 1-D paddings = [..., 0, dim_n - 1_begin, dim_n_begin, ..., 0, dim_n - 1_end, dim_n_end] + paddings = symbolic_helper._reshape_helper( + g, paddings, g.op("Constant", value_t=torch.tensor([-1, 2])) + ) + paddings = g.op("Transpose", opset10.flip(g, paddings, [0]), perm_i=[1, 0]) + paddings = symbolic_helper._reshape_helper( + g, paddings, g.op("Constant", value_t=torch.tensor([-1])) + ) + padding_c = g.op("Cast", paddings, to_i=_C_onnx.TensorProtoDataType.INT64) + return padding_c + + +@_onnx_symbolic("aten::constant_pad_nd") +@_beartype.beartype +def constant_pad_nd(g: jit_utils.GraphContext, input, padding, value=None): + mode = "constant" + value = symbolic_helper._maybe_get_scalar(value) + value = symbolic_helper._if_scalar_type_as(value, input) + pad = _prepare_onnx_paddings(g, input, padding) + return g.op("Pad", input, pad, value, mode_s=mode) + + +@_onnx_symbolic("aten::reflection_pad1d") +@_onnx_symbolic("aten::reflection_pad2d") +@_onnx_symbolic("aten::reflection_pad3d") +@_beartype.beartype +def reflection_pad(g: jit_utils.GraphContext, input, padding): + mode = "reflect" + paddings = _prepare_onnx_paddings(g, input, padding) + return g.op("Pad", input, paddings, mode_s=mode) + + +@_onnx_symbolic("aten::replication_pad1d") +@_onnx_symbolic("aten::replication_pad2d") +@_onnx_symbolic("aten::replication_pad3d") +@_beartype.beartype +def replication_pad(g: jit_utils.GraphContext, input, padding): + mode = "edge" + paddings = _prepare_onnx_paddings(g, input, padding) + return g.op("Pad", input, paddings, mode_s=mode) + + +@_onnx_symbolic("aten::pad") +@_beartype.beartype +def pad( + g: jit_utils.GraphContext, + input: _C.Value, + pad: _C.Value, + mode: _C.Value, + value: _C.Value, +): + mode = symbolic_helper._parse_arg(mode, "s") + if mode == "replicate": + return replication_pad(g, input, pad) + elif mode == "reflect": + return reflection_pad(g, input, pad) + elif mode == "constant": + return constant_pad_nd(g, input, pad, value) + elif mode == "circular": + return opset9._pad_circular(g, input, pad) + else: + raise errors.SymbolicValueError(f"Unrecognized padding mode {mode}", input) + + +@_onnx_symbolic("aten::linalg_det") +@_beartype.beartype +def linalg_det(g: jit_utils.GraphContext, self): + return g.op("Det", self) + + +@_onnx_symbolic("aten::logdet") +@_beartype.beartype +def logdet(g: jit_utils.GraphContext, input): + return opset9.log(g, linalg_det(g, input)) + + +@_onnx_symbolic("aten::arange") +@_beartype.beartype +def arange(g: jit_utils.GraphContext, *args): + def _get_arange_dtype(dtype): + dtype = symbolic_helper._maybe_get_const(dtype, "i") + return dtype + + if len(args) == 2 and all(isinstance(val, int) for val in args): + # aten::arange(Scalar start, Scalar end) + dtype = torch.int64 + # Start index. + start = g.op( + "Constant", + value_t=torch.tensor(args[0], dtype=dtype), + ) + # End (exclusive) index. + end = g.op( + "Constant", + value_t=torch.tensor(args[1], dtype=dtype), + ) + # Step size from start to end indexes. + delta_default = g.op( + "Constant", + value_t=torch.tensor(1, dtype=dtype), + ) + return g.op("Range", start, end, delta_default) + elif len(args) == 2 or len(args) == 5: + if len(args) == 2: + # aten::arange(Scalar end, Tensor out) + dtype = None + else: + # aten::arange(Scalar end, ScalarType dtype, Layout, Device, bool pin_memory) + dtype = _get_arange_dtype(args[1]) + type_, end, start, step = symbolic_helper._arange_cast_helper( + g, end=args[0], dtype=dtype + ) + start_default = g.op( + "Constant", + value_t=torch.tensor(0, dtype=type_.dtype()), + ) + delta_default = g.op( + "Constant", + value_t=torch.tensor(1, dtype=type_.dtype()), + ) + return g.op("Range", start_default, end, delta_default) + elif len(args) == 4 or len(args) == 7: + if len(args) == 4: + # aten::arange(Scalar start, Scalar end, Scalar step, Tensor out) + dtype = None + else: + # aten::arange(Scalar start, Scalar end, Scalar step, ScalarType dtype, Layout, Device, bool pin_memory) + dtype = _get_arange_dtype(args[3]) + _, end, start, step = symbolic_helper._arange_cast_helper( + g, start=args[0], end=args[1], step=args[2], dtype=dtype + ) + return g.op("Range", start, end, step) + elif len(args) == 6: + # aten::arange(Scalar start, Scalar end, ScalarType dtype, Layout, Device, bool pin_memory) + dtype = _get_arange_dtype(args[2]) + type_, end, start, step = symbolic_helper._arange_cast_helper( + g, start=args[0], end=args[1], dtype=dtype + ) + delta_default = g.op( + "Constant", + value_t=torch.tensor(1, dtype=type_.dtype()), + ) + return g.op("Range", start, end, delta_default) + else: + return symbolic_helper._unimplemented( + "aten::arange", f"with {len(args)} arguments" + ) + + +@_onnx_symbolic("aten::_dim_arange") +@symbolic_helper.parse_args("v", "i") +@_beartype.beartype +def _dim_arange(g: jit_utils.GraphContext, like, dim): + like_shape = g.op("Shape", like) + stop = g.op( + "Gather", like_shape, g.op("Constant", value_t=torch.tensor(dim)), axis_i=0 + ) + if symbolic_helper.is_caffe2_aten_fallback(): + return g.op("_caffe2::Range", stop) + return arange(g, stop, 4, None, None, None) + + +@_onnx_symbolic("aten::size") +@symbolic_helper.quantized_args(True, quantize_output=False) +@_beartype.beartype +def size(g: jit_utils.GraphContext, self, dim=None): + if dim is None: + return g.op("Shape", self) + return symbolic_helper._size_helper(g, self, dim) + + +@_onnx_symbolic("aten::squeeze") +@_beartype.beartype +def squeeze(g: jit_utils.GraphContext, self, dim=None): + if dim is None: + return g.op("Squeeze", self) + + # dim as a tensor + if not symbolic_helper._is_constant(dim): + return symbolic_helper._squeeze_helper(g, self, [dim]) + + dim = symbolic_helper._get_const(dim, "i", "dim") + + input_rank = symbolic_helper._get_tensor_rank(self) + adjusted_dim = dim + if input_rank is not None and dim < 0: + adjusted_dim += input_rank + dim_size = symbolic_helper._get_tensor_dim_size(self, adjusted_dim) + if (dim < 0 and input_rank is None) or dim_size is None: + # If onnx shape inference is not on, export always as dynamic. + # Because we cannot tell if observed static shape is also static at runtime. + # create "cond" node (condition is shape[i]==1) + dim_constant = g.op("Constant", value_t=torch.tensor([dim])) + size = symbolic_helper._size_helper(g, self, dim_constant) + const_one = g.op("Constant", value_t=torch.ones(1, dtype=torch.int64)) + cond = g.op("Equal", size, const_one) + # create the "If" node and add the "then" and "else" blocks to it. + if_op, (if_context, else_context), _ = jit_utils.add_op_with_blocks( + g, "If", cond, n_blocks=2 + ) + squeeze_ = symbolic_helper._squeeze_helper(if_context, self, [dim]) + utils._add_output_to_block(if_context.block, squeeze_) + identity_ = else_context.op("Identity", self) + utils._add_output_to_block(else_context.block, identity_) + return if_op + + # For static input shape + dim = adjusted_dim + if dim_size > 1: + warnings.warn( + "This model contains a squeeze operation on dimension " + + str(dim) + + ". The size of " + + "this dimension in the given input is " + + str(dim_size) + + ". The model will " + + "be exported without the squeeze node. If the model is intended to be used with dynamic " + + "input shapes, please export with dynamic_axes argument." + ) + return self + return symbolic_helper._squeeze_helper(g, self, [dim]) + + +@_onnx_symbolic("aten::unsqueeze") +@_beartype.beartype +def unsqueeze(g: jit_utils.GraphContext, self, dim): + if symbolic_helper._is_constant(dim): + dim = symbolic_helper._get_const(dim, "i", "dim") + + return symbolic_helper._unsqueeze_helper(g, self, [dim]) + + +@_onnx_symbolic("aten::mm") +@_beartype.beartype +def mm(g: jit_utils.GraphContext, self, other): + return g.op("Gemm", self, other, beta_f=0.0, alpha_f=1.0) + + +@_onnx_symbolic("aten::index") +@_beartype.beartype +def index(g: jit_utils.GraphContext, self, index): + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("index", self, index, overload_name="Tensor") + + if symbolic_helper._is_packed_list(index): + indices = symbolic_helper._unpack_list(index) + else: + indices = [index] + + # Handle single mask index. + if len(indices) == 1: + index = indices[0] + if not symbolic_helper._is_none(index) and ( + symbolic_helper._is_bool(index) + or _type_utils.JitScalarType.from_value(index) + == _type_utils.JitScalarType.UINT8 + ): + index = opset9.nonzero(g, index) + return g.op("GatherND", self, index) + return opset9.index(g, self, index) + + +@_onnx_symbolic("aten::index_fill") +@_beartype.beartype +def index_fill(g: jit_utils.GraphContext, self, dim, index, value): + dim_value = symbolic_helper._parse_arg(dim, "i") + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at( + "index_fill", + self, + index, + value, + overload_name="int_Scalar", + dim_i=dim_value, + ) + + expanded_index_shape, expanded_index = symbolic_helper._index_fill_reshape_helper( + g, self, dim, index + ) + value = symbolic_helper._maybe_get_scalar(value) + value = symbolic_helper._if_scalar_type_as(value, self) + expanded_value = opset9.expand(g, value, expanded_index_shape, None) + return scatter(g, self, dim, expanded_index, expanded_value) + + +@_onnx_symbolic("aten::index_copy") +@_beartype.beartype +def index_copy(g: jit_utils.GraphContext, self, dim, index, source): + dim_value = symbolic_helper._parse_arg(dim, "i") + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("index_copy", self, index, source, dim_i=dim_value) + expanded_index_shape, expanded_index = symbolic_helper._index_fill_reshape_helper( + g, self, dim, index + ) + return scatter(g, self, dim, expanded_index, source) + + +@_onnx_symbolic("aten::__rshift_") +@_beartype.beartype +def __rshift_(g: jit_utils.GraphContext, self, other): + # make sure to cast other to self's type + # (when self is long, make sure that other is not float) + if _type_utils.JitScalarType.from_value( + other, _type_utils.JitScalarType.UNDEFINED + ) != _type_utils.JitScalarType.from_value(self): + other = g.op( + "Cast", + other, + to_i=_type_utils.JitScalarType.from_value(self).onnx_type(), + ) + + if ( + _type_utils.JitScalarType.from_value(self, _type_utils.JitScalarType.UNDEFINED) + == _type_utils.JitScalarType.UINT8 + ): + return g.op("BitShift", self, other, direction_s="RIGHT") + + two = g.op("Constant", value_t=torch.tensor(2, dtype=torch.float32)) + # exponent (same type as self) has to be float or double in onnx::Pow + if not symbolic_helper._is_fp(self): + other = g.op("Cast", other, to_i=_C_onnx.TensorProtoDataType.FLOAT) + two_pow = g.op("Pow", two, other) + two_pow = g.op( + "Cast", + two_pow, + to_i=_type_utils.JitScalarType.from_value(self).onnx_type(), + ) + rshift = g.op("Div", self, two_pow) + return rshift + + +@_onnx_symbolic("aten::__lshift_") +@_beartype.beartype +def __lshift_(g: jit_utils.GraphContext, self, other): + # make sure to cast other to self's type + # (when self is long, make sure that other is not float) + if _type_utils.JitScalarType.from_value( + other, _type_utils.JitScalarType.UNDEFINED + ) != _type_utils.JitScalarType.from_value(self): + other = g.op( + "Cast", + other, + to_i=_type_utils.JitScalarType.from_value(self).onnx_type(), + ) + + if ( + _type_utils.JitScalarType.from_value(self, _type_utils.JitScalarType.UNDEFINED) + == _type_utils.JitScalarType.UINT8 + ): + return g.op("BitShift", self, other, direction_s="LEFT") + + two = g.op("Constant", value_t=torch.tensor(2, dtype=torch.float32)) + # exponent (same type as self) has to be float or double in onnx::Pow + if not symbolic_helper._is_fp(self): + other = g.op("Cast", other, to_i=_C_onnx.TensorProtoDataType.FLOAT) + two_pow = g.op("Pow", two, other) + two_pow = g.op( + "Cast", + two_pow, + to_i=_type_utils.JitScalarType.from_value(self).onnx_type(), + ) + lshift = g.op("Mul", self, two_pow) + return lshift + + +@_beartype.beartype +def _get_im2col_indices_along_dim( + g: jit_utils.GraphContext, input_d, kernel_size_d, dilation_d, padding_d, stride_d +): + # Input is always 4-D (N, C, H, W) + # Calculate indices of sliding blocks along spatial dimension + # Slide kernel over input each dim d: + # each dimension d ranges from 0 to input[d]+2xpadding[d]-dilation[d]x(kernel_size[d]-1) + # with steps = stride + + blocks_d = g.op( + "Add", input_d, g.op("Constant", value_t=torch.tensor(padding_d * 2)) + ) + blocks_d = g.op( + "Sub", + blocks_d, + g.op("Constant", value_t=torch.tensor(dilation_d * (kernel_size_d - 1))), + ) + + # Stride kernel over input and find starting indices along dim d + blocks_d_indices = g.op( + "Range", + g.op("Constant", value_t=torch.tensor(0)), + blocks_d, + g.op("Constant", value_t=torch.tensor(stride_d)), + ) + + # Apply dilation on kernel and find its indices along dim d + kernel_grid = torch.arange(0, kernel_size_d * dilation_d, dilation_d) + kernel_grid = g.op("Constant", value_t=kernel_grid.unsqueeze(0)) + + # Broadcast and add kernel staring positions (indices) with + # kernel_grid along dim d, to get block indices along dim d + blocks_d_indices = symbolic_helper._unsqueeze_helper( + g, blocks_d_indices, [0] + ) # Reshape to [1, -1] + kernel_mask = symbolic_helper._reshape_helper( + g, kernel_grid, g.op("Constant", value_t=torch.tensor([-1, 1])) + ) + block_mask = g.op("Add", blocks_d_indices, kernel_mask) + + return block_mask + + +@_beartype.beartype +def _get_im2col_padded_input(g: jit_utils.GraphContext, input, padding_h, padding_w): + # Input is always 4-D tensor (N, C, H, W) + # Padding tensor has the following format: (padding_h, padding_w) + # Reshape the padding to follow ONNX format: (dim1_begin, dim2_begin,...,dim1_end, dim2_end,...) + pad = g.op("Constant", value_t=torch.LongTensor([0, 0, padding_h, padding_w] * 2)) + return g.op("Pad", input, pad) + + +@_beartype.beartype +def _get_im2col_output_shape(g: jit_utils.GraphContext, input, kernel_h, kernel_w): + batch_dim = size(g, input, g.op("Constant", value_t=torch.tensor(0))) + channel_dim = size(g, input, g.op("Constant", value_t=torch.tensor(1))) + channel_unfolded = g.op( + "Mul", channel_dim, g.op("Constant", value_t=torch.tensor(kernel_h * kernel_w)) + ) + + return g.op( + "Concat", + symbolic_helper._unsqueeze_helper(g, batch_dim, [0]), + symbolic_helper._unsqueeze_helper(g, channel_unfolded, [0]), + g.op("Constant", value_t=torch.tensor([-1])), + axis_i=0, + ) + + +@_onnx_symbolic("aten::im2col") +@symbolic_helper.parse_args("v", "is", "is", "is", "is") +@_beartype.beartype +def im2col(g: jit_utils.GraphContext, input, kernel_size, dilation, padding, stride): + # Input is always 4-D tensor (N, C, H, W) + # All other args are int[2] + + input_h = size(g, input, g.op("Constant", value_t=torch.tensor(2))) + input_w = size(g, input, g.op("Constant", value_t=torch.tensor(3))) + + stride_h, stride_w = stride[0], stride[1] + padding_h, padding_w = padding[0], padding[1] + dilation_h, dilation_w = dilation[0], dilation[1] + kernel_h, kernel_w = kernel_size[0], kernel_size[1] + + blocks_row_indices = _get_im2col_indices_along_dim( + g, input_h, kernel_h, dilation_h, padding_h, stride_h + ) + blocks_col_indices = _get_im2col_indices_along_dim( + g, input_w, kernel_w, dilation_w, padding_w, stride_w + ) + + output_shape = _get_im2col_output_shape(g, input, kernel_h, kernel_w) + padded_input = _get_im2col_padded_input(g, input, padding_h, padding_w) + + # For a 4D matrix of size (1, 1, 3, 3) as below with kernel_size=2, stride=1, and dilation=1 + # [[[[1., 2., 3.,], + # [4., 5., 6.,], + # [7., 8., 9.,]]]] + # First gather indices along rows (dim=2) with blocks_row_indices = [[0,1], [1,2]] to get: + # [[[[[1., 2., 3.], + # [4., 5., 6.]], + # [[4., 5., 6.], + # [7., 8., 9.]]]]] + # And then gather along cols (dim=4) with blocks_row_indices = [[0,1], [1,2]] to get: + # [[[[[[1., 2.], + # [4., 5.]], + # [[2., 3.], + # [5., 6]]], + # [[[4., 5.], + # [7., 8.]], + # [[5., 6.], + # [8., 9.]]]]]] + # Transpose dims 3 (depth) and 4 (rows), and then reshape to output shape (1, 1, 4, 4) to get: + # [[[1., 2., 4., 5.], + # [2., 3., 5., 6.], + # [4., 5., 7., 8.], + # [5., 6., 8., 9.]]] + output = g.op("Gather", padded_input, blocks_row_indices, axis_i=2) + output = g.op("Gather", output, blocks_col_indices, axis_i=4) + output = g.op("Transpose", output, perm_i=[0, 1, 2, 4, 3, 5]) + return symbolic_helper._reshape_helper(g, output, output_shape) + + +@_onnx_symbolic("aten::narrow") +@_beartype.beartype +def narrow(g: jit_utils.GraphContext, input, dim, start, length): + end = g.op("Add", start, length) + return symbolic_helper._slice_helper(g, input, axes=dim, starts=start, ends=end) + + +@_onnx_symbolic("aten::flatten") +@symbolic_helper.quantized_args(True, False, False) +@symbolic_helper.parse_args("v", "i", "i") +@_beartype.beartype +def flatten(g: jit_utils.GraphContext, input, start_dim, end_dim): + dim = symbolic_helper._get_tensor_rank(input) + if dim == 1: + return input + # use ONNX's Flatten operator for cases where the output shape is 2D + if start_dim == 1: + if end_dim == -1 or (dim is not None and end_dim == dim - 1): + return g.op("Flatten", input, axis_i=start_dim) + elif start_dim == 0: + if end_dim == -2 or (dim is not None and end_dim == dim - 2): + return g.op("Flatten", input, axis_i=end_dim + 1) + if dim is None: + return symbolic_helper._unimplemented( + "dim", + "ONNX and PyTorch use different strategies to split the input. " + "Input rank must be known at export time.", + ) + # if end_dim is negative add dim + if end_dim < 0: + end_dim = dim + end_dim + + return symbolic_helper._flatten_helper(g, input, start_dim, end_dim, dim) + + +@_onnx_symbolic("aten::linalg_vector_norm") +@symbolic_helper.parse_args("v", "f", "is", "b", "v") +@_beartype.beartype +def linalg_vector_norm( + g: jit_utils.GraphContext, + self, + ord, + dim: Optional[Sequence[int]], + keepdim: bool, + dtype, +): + if ord == 0: + if dim is None: + self = symbolic_helper._reshape_helper( + g, self, g.op("Constant", value_t=torch.tensor([-1], dtype=torch.int64)) + ) + keepdim = False + + cond_op = g.op( + "Not", g.op("Equal", self, g.op("Constant", value_t=torch.LongTensor([0]))) + ) + cond_op = g.op( + "Cast", + cond_op, + to_i=_type_utils.JitScalarType.from_value(self).onnx_type(), + ) + return symbolic_helper._reducesum_helper( + g, cond_op, axes_i=dim, keepdims_i=keepdim + ) + else: + return opset9.linalg_vector_norm(g, self, ord, dim, keepdim, dtype) + + +@_onnx_symbolic("aten::embedding_bag") +@symbolic_helper.parse_args("v", "v", "v", "i", "i", "i", "v", "i", "i") +@_beartype.beartype +def embedding_bag( + g: jit_utils.GraphContext, + embedding_matrix, + indices, + offsets, + scale_grad_by_freq, + mode, + sparse, + per_sample_weights, + include_last_offset, + padding_idx, +): + if scale_grad_by_freq and GLOBALS.export_training: + return symbolic_helper._onnx_unsupported( + "embedding_bag with scale_grad_by_freq for training mode" + ) + if padding_idx is not None and padding_idx >= 0: + raise RuntimeError("embedding_bag with padding_idx") + + loop_condition = g.op("Constant", value_t=torch.tensor(1)) + loop_condition = g.op("Cast", loop_condition, to_i=_C_onnx.TensorProtoDataType.BOOL) + zero = g.op("Constant", value_t=torch.tensor([0])) + + indices_len = symbolic_helper._unsqueeze_helper( + g, + symbolic_helper._size_helper( + g, indices, g.op("Constant", value_t=torch.tensor(0)) + ), + [0], + ) + if not include_last_offset: + offsets = [offsets, indices_len] + offsets = g.op("Concat", *offsets, axis_i=0) + + # Offsets holds the starting index position of each bag. So we create a list of the indices slices (determined by + # offsets) and gather those indices in indices_row. Then we use this subset of indices to gather from embeddings. + # The embeddings output is a loop scan output, so we can avoid creating a sequence and inserting elements in. + offsets_starts = symbolic_helper._slice_helper( + g, offsets, axes=[0], starts=[0], ends=[sys.maxsize], steps=[1] + ) + offsets_ends = symbolic_helper._slice_helper( + g, offsets, axes=[0], starts=[1], ends=[sys.maxsize], steps=[1] + ) + + loop_len = symbolic_helper._size_helper( + g, offsets_ends, g.op("Constant", value_t=torch.tensor(0)) + ) + + loop, (loop_context,), _ = jit_utils.add_op_with_blocks( + g, "Loop", loop_len, loop_condition, n_blocks=1 + ) + loop_block = loop_context.block + + # FIXME(justinchuby): We need to handle what happens when we call b.op on a node return + block_input_iter = utils._add_input_to_block(loop_block) + cond = utils._add_input_to_block(loop_block) + + indices_start = loop_context.op( + "Gather", offsets_starts, block_input_iter, axis_i=0 + ) + indices_end = loop_context.op("Gather", offsets_ends, block_input_iter, axis_i=0) + indices_start = symbolic_helper._unsqueeze_helper(loop_context, indices_start, [0]) + indices_end = symbolic_helper._unsqueeze_helper(loop_context, indices_end, [0]) + + indices_row = loop_context.op("Slice", indices, indices_start, indices_end, zero) + embeddings = loop_context.op("Gather", embedding_matrix, indices_row, axis_i=0) + if not symbolic_helper._is_none(per_sample_weights): + per_sample_weights_row = loop_context.op( + "Slice", per_sample_weights, indices_start, indices_end, zero + ) + per_sample_weights_row = symbolic_helper._unsqueeze_helper( + loop_context, per_sample_weights_row, [1] + ) + embeddings = loop_context.op("Mul", embeddings, per_sample_weights_row) + if mode == 0: + embeddings = symbolic_helper._reducesum_helper( + loop_context, embeddings, axes_i=[0], keepdims_i=0 + ) + elif mode == 1: + embeddings = loop_context.op("ReduceMean", embeddings, axes_i=[0], keepdims_i=0) + else: + embeddings = loop_context.op("ReduceMax", embeddings, axes_i=[0], keepdims_i=0) + + cond_out = loop_context.op( + "Cast", loop_condition, to_i=_C_onnx.TensorProtoDataType.BOOL + ) + utils._add_output_to_block(loop_block, cond_out) + utils._add_output_to_block(loop_block, embeddings) + + # aten::embedding_bag returns a tuple of 4 elements: output, offset2bag, bag_size, max_indices. + # But the last three outputs are not used in torch.nn.EmbeddingBag or torch.nn.functional.embedding_bag. + return loop.node().output(), None, None, None + + +@_onnx_symbolic("aten::embedding_renorm") +@symbolic_helper.parse_args("v", "v", "f", "f") +@_beartype.beartype +def embedding_renorm(g: jit_utils.GraphContext, weight, indices, max_norm, norm_type): + unique_indices = g.op("Unique", indices) + partial_weight = g.op("Gather", weight, unique_indices) + norm_i = int(norm_type) + if norm_i == 1: + norm_type = "ReduceL1" + elif norm_i == 2: + norm_type = "ReduceL2" + else: + raise errors.SymbolicValueError( + f"Unsupported: ONNX export of embedding_renorm with norm: {norm_i}. " + "Only 1. and 2. are supported.", + weight, + ) + partial_weight_norm = g.op(norm_type, partial_weight, axes_i=[1], keepdims_i=1) + # https://github.com/pytorch/pytorch/blob/0a07488ed2c47765e337e290bd138c0e6e459cbd/aten/src/ATen/native/Embedding.cpp#L177 + # Add 1e-7 to prevent division by zero. + partial_weight_norm_ = g.op( + "Add", partial_weight_norm, g.op("Constant", value_t=torch.tensor(1e-7)) + ) + max_norm = torch.tensor(max_norm) + scales = g.op("Div", max_norm, partial_weight_norm_) + partial_weight_renorm = g.op("Mul", partial_weight, scales) + partial_weight_renorm = g.op( + "Where", + g.op("Greater", partial_weight_norm, max_norm), + partial_weight_renorm, + partial_weight, + ) + return g.op( + "ScatterND", + weight, + symbolic_helper._unsqueeze_helper(g, unique_indices, [1]), + partial_weight_renorm, + ) + + +@_onnx_symbolic("aten::chunk") +@_beartype.beartype +def chunk(g: jit_utils.GraphContext, self, chunks, dim): + # Calculate chunk size for dynamic chunk + dim_size = g.op("Gather", g.op("Shape", self), dim, axis_i=0) + chunk_size_s = g.op( + "Sub", chunks, g.op("Constant", value_t=torch.tensor([1], dtype=torch.long)) + ) + chunk_size = g.op("Div", g.op("Add", dim_size, chunk_size_s), chunks) + # Create splits vector + chunk_vec = [ + opset9.expand(g, chunk_size, chunk_size_s, None), + g.op("Sub", dim_size, g.op("Mul", chunk_size, chunk_size_s)), + ] + chunk_vec = g.op("Concat", *chunk_vec, axis_i=0) + return split(g, self, chunk_vec, dim) + + +@_onnx_symbolic("aten::normal") +@_beartype.beartype +def normal( + g: jit_utils.GraphContext, + mean, + std, + sizes=None, + generator=None, + dtype=None, + layout=None, + device=None, + pin_memory=None, +): + # If you can sample from a given distribution with mean 0 and variance 1, then you can easily sample from a + # scale-location transformation of that distribution, which has mean μ and variance σ's square. If x is a sample + # from a mean 0 and variance 1 distribution then + # σx+μ + # is a sample with mean μ and variance σ's square. + if sizes is not None and not symbolic_helper._is_none(sizes): + mean = opset9.expand(g, mean, sizes, None) + result = opset9.mul(g, std, g.op("RandomNormalLike", mean)) + return add(g, result, mean) + + +@_onnx_symbolic("aten::atleast_1d") +@_beartype.beartype +def atleast_1d(g: jit_utils.GraphContext, self: torch._C.Value): + # NOTE: If it's 0D, reshape to 1D + + # NOTE: self could be a packed list or a tensor + if symbolic_helper._is_value(self) and symbolic_helper._is_packed_list(self): + tensor_list = symbolic_helper._unpack_list(self) + new_tensor_list = [] + for tensor in tensor_list: + new_tensor = tensor + tensor_rank = symbolic_helper._get_tensor_rank(tensor) + if tensor_rank == 0: + new_tensor = symbolic_helper._reshape_helper( + g, new_tensor, g.op("Constant", value_t=torch.tensor([1])) + ) + new_tensor_list.append(new_tensor) + return g.op("SequenceConstruct", *new_tensor_list) + + tensor_rank = symbolic_helper._get_tensor_rank(self) + if tensor_rank == 0: + self = symbolic_helper._reshape_helper( + g, self, g.op("Constant", value_t=torch.tensor([1])) + ) + return self + + +@_onnx_symbolic("aten::atleast_2d") +@_beartype.beartype +def atleast_2d(g: jit_utils.GraphContext, self: torch._C.Value): + # NOTE: If it's 0D, reshape to 2D + # If it's 1D, unsqueeze to 2D + + # NOTE: self could be a packed list or a tensor + if symbolic_helper._is_value(self) and symbolic_helper._is_packed_list(self): + tensor_list = symbolic_helper._unpack_list(self) + new_tensor_list = [] + for tensor in tensor_list: + new_tensor = tensor + tensor_rank = symbolic_helper._get_tensor_rank(tensor) + if tensor_rank == 0: + new_tensor = symbolic_helper._reshape_helper( + g, new_tensor, g.op("Constant", value_t=torch.tensor([1, 1])) + ) + elif tensor_rank == 1: + new_tensor = symbolic_helper._unsqueeze_helper( + g, new_tensor, axes_i=[0] + ) + new_tensor_list.append(new_tensor) + return g.op("SequenceConstruct", *new_tensor_list) + + tensor_rank = symbolic_helper._get_tensor_rank(self) + if tensor_rank == 0: + self = symbolic_helper._reshape_helper( + g, self, g.op("Constant", value_t=torch.tensor([1, 1])) + ) + elif tensor_rank == 1: + self = symbolic_helper._unsqueeze_helper(g, self, axes_i=[0]) + return self + + +@_onnx_symbolic("aten::atleast_3d") +@_beartype.beartype +def atleast_3d(g: jit_utils.GraphContext, self: torch._C.Value): + # NOTE: If it's 0D, reshape to 3D + # If it's 1D, unsqueeze to 3D + # If it's 2D, unsqueeze to 3D + + # NOTE: self could be a packed list or a tensor + if symbolic_helper._is_value(self) and symbolic_helper._is_packed_list(self): + tensor_list = symbolic_helper._unpack_list(self) + new_tensor_list = [] + for tensor in tensor_list: + new_tensor = tensor + tensor_rank = symbolic_helper._get_tensor_rank(tensor) + if tensor_rank == 0: + new_tensor = symbolic_helper._reshape_helper( + g, new_tensor, g.op("Constant", value_t=torch.tensor([1, 1, 1])) + ) + elif tensor_rank == 1: + new_tensor = symbolic_helper._unsqueeze_helper( + g, new_tensor, axes_i=[0] + ) + new_tensor = symbolic_helper._unsqueeze_helper( + g, new_tensor, axes_i=[-1] + ) + elif tensor_rank == 2: + new_tensor = symbolic_helper._unsqueeze_helper( + g, new_tensor, axes_i=[-1] + ) + new_tensor_list.append(new_tensor) + return g.op("SequenceConstruct", *new_tensor_list) + + tensor_rank = symbolic_helper._get_tensor_rank(self) + if tensor_rank == 0: + self = symbolic_helper._reshape_helper( + g, self, g.op("Constant", value_t=torch.tensor([1, 1, 1])) + ) + elif tensor_rank == 1: + self = symbolic_helper._unsqueeze_helper(g, self, axes_i=[0]) + self = symbolic_helper._unsqueeze_helper(g, self, axes_i=[-1]) + elif tensor_rank == 2: + self = symbolic_helper._unsqueeze_helper(g, self, axes_i=[-1]) + return self + + +@_onnx_symbolic("prim::ConstantChunk") +@_beartype.beartype +def prim_constant_chunk(g: jit_utils.GraphContext, self, chunks, dim): + input_shape = g.op("Shape", self) + axis = g.op("Constant", value_t=torch.tensor([dim], dtype=torch.long)) + input_shape_dim = g.op("Gather", input_shape, axis, axis_i=0) + start = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long)) + chunk_size = g.op("Constant", value_t=torch.tensor([chunks], dtype=torch.long)) + chunk_size_minus_1 = g.op( + "Constant", value_t=torch.tensor([chunks - 1], dtype=torch.long) + ) + input_shape_dim_shift = g.op("Add", input_shape_dim, chunk_size_minus_1) + chunk_dim = g.op("Div", input_shape_dim_shift, chunk_size) + res = [] + for i in range(chunks): + index = g.op("Constant", value_t=torch.tensor([i + 1], dtype=torch.long)) + end = g.op("Mul", chunk_dim, index) + res.append(g.op("Slice", self, start, end, axis)) + start = end + return res + + +@_onnx_symbolic("aten::hstack") +@_beartype.beartype +def hstack(g: jit_utils.GraphContext, tensor_list: _C.Value): + tensor_list = atleast_1d(g, tensor_list) + first_tensor = g.op( + "SequenceAt", + tensor_list, + g.op("Constant", value_t=torch.tensor(0, dtype=torch.long)), + ) + first_tensor_shape = g.op("Shape", first_tensor) + first_tensor_dim = g.op("Size", first_tensor_shape) + + const_one = g.op("Constant", value_t=torch.tensor(1, dtype=torch.long)) + equal_to_one = g.op("Equal", first_tensor_dim, const_one) + + ( + if_op_greater, + (if_context_equal, else_context_equal), + _, + ) = jit_utils.add_op_with_blocks(g, "If", equal_to_one, n_blocks=2, outputs=1) + result_if = if_context_equal.op( + "ConcatFromSequence", tensor_list, axis_i=0, new_axis_i=0 + ) + utils._add_output_to_block(if_context_equal.block, result_if) + result_else = else_context_equal.op( + "ConcatFromSequence", tensor_list, axis_i=1, new_axis_i=0 + ) + utils._add_output_to_block(else_context_equal.block, result_else) + result = if_op_greater.node().output() + + return result + + +@_onnx_symbolic("aten::vstack") +@_beartype.beartype +def vstack(g: jit_utils.GraphContext, tensor_list: _C.Value): + tensor_list = atleast_2d(g, tensor_list) + return g.op("ConcatFromSequence", tensor_list, axis_i=0, new_axis_i=0) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset12.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset12.py new file mode 100644 index 0000000000000000000000000000000000000000..130b02a889b04e75ad01577245e264cb755ed868 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset12.py @@ -0,0 +1,485 @@ +from __future__ import annotations + +import functools +import sys +from typing import Optional, Tuple + +import torch +from torch._C import _onnx as _C_onnx +from torch.onnx import ( + _type_utils, + errors, + symbolic_helper, + symbolic_opset9 as opset9, + utils, +) +from torch.onnx._internal import _beartype, jit_utils, registration + + +# EDITING THIS FILE? READ THIS FIRST! +# see Note [Edit Symbolic Files] in README.md + +# This file exports ONNX ops for opset 12 + +__all__ = [ + "argmax", + "argmin", + "binary_cross_entropy_with_logits", + "celu", + "cross_entropy_loss", + "dropout", + "einsum", + "ge", + "le", + "native_dropout", + "nll_loss", + "nll_loss2d", + "nll_loss_nd", + "outer", + "pow", + "tensordot", + "unfold", +] + +_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=12) + + +@_beartype.beartype +def _einsum_helper(g: jit_utils.GraphContext, equation, tensors): + if not tensors: + raise RuntimeError("Einsum inputs are empty.") + # ONNX does not support bool for Einsum inputs. + if symbolic_helper._is_bool(tensors[0]): + tensors = [ + g.op("Cast", tensor, to_i=_C_onnx.TensorProtoDataType.INT64) + for tensor in tensors + ] + return g.op( + "Cast", + g.op("Einsum", *tensors, equation_s=equation), + to_i=_C_onnx.TensorProtoDataType.BOOL, + ) + else: + return g.op("Einsum", *tensors, equation_s=equation) + + +@_onnx_symbolic("aten::einsum") +@symbolic_helper.parse_args("s", "v", "is") +@_beartype.beartype +def einsum(g: jit_utils.GraphContext, equation, tensor_list, path=None): + tensors = symbolic_helper._unpack_list(tensor_list) + return _einsum_helper(g, equation, tensors) + + +@_onnx_symbolic("aten::outer") +@symbolic_helper.parse_args("v", "v") +@_beartype.beartype +def outer(g: jit_utils.GraphContext, input, other): + # make sure to cast other to self's type + if _type_utils.JitScalarType.from_value( + other, _type_utils.JitScalarType.UNDEFINED + ) != _type_utils.JitScalarType.from_value(input): + other = g.op( + "Cast", + other, + to_i=_type_utils.JitScalarType.from_value(input).onnx_type(), + ) + return _einsum_helper(g, "i,j->ij", [input, other]) + + +@_beartype.beartype +def _dropout_returns_masked_input_and_mask( + g: jit_utils.GraphContext, input: torch._C.Value, p: float, train: bool +) -> Tuple[torch._C.Value, Optional[torch._C.Value]]: + symbolic_helper.check_training_mode(train, "dropout") + # In eval mode, dropout is non-op. That is, if the node's + # train param is set to False, dropout just returns its inputs. + if not train: + return input, None + p = g.op("Constant", value_t=torch.tensor(p)) + t = g.op("Constant", value_t=torch.tensor(train, dtype=torch.bool)) + r, mask = g.op("Dropout", input, p, t, outputs=2) + return r, mask + + +@_onnx_symbolic("aten::dropout") +@symbolic_helper.parse_args("v", "f", "b") +@_beartype.beartype +def dropout(g: jit_utils.GraphContext, input, p, train): + masked, _ = _dropout_returns_masked_input_and_mask(g, input, p, train) + return masked + + +@_onnx_symbolic("aten::native_dropout") +@symbolic_helper.parse_args("v", "f", "b") +@_beartype.beartype +def native_dropout(g: jit_utils.GraphContext, input, p, train): + return _dropout_returns_masked_input_and_mask(g, input, p, train) + + +@_onnx_symbolic("aten::nll_loss") +@_beartype.beartype +def nll_loss(g: jit_utils.GraphContext, self, target, weight, reduction, ignore_index): + # none reduction : onnx::Constant[value={0}] + # mean reduction : onnx::Constant[value={1}] + # sum reduction : onnx::Constant[value={2}] + reduction = symbolic_helper._maybe_get_const(reduction, "i") + reduction_vals = ["none", "mean", "sum"] + reduction = reduction_vals[reduction] + + # in onnx NegativeLogLikelihoodLoss specification, ignore_index is optional without default value. + # therefore we need to set ignore_index attribute even if it is not specified (e.g. ignore_index=-100). + ignore_index = symbolic_helper._maybe_get_const(ignore_index, "i") + if weight.node().mustBeNone(): + nllloss = g.op( + "NegativeLogLikelihoodLoss", + self, + target, + reduction_s=reduction, + ignore_index_i=ignore_index, + ) + else: + nllloss = g.op( + "NegativeLogLikelihoodLoss", + self, + target, + weight, + reduction_s=reduction, + ignore_index_i=ignore_index, + ) + + return nllloss + + +@_onnx_symbolic("aten::nll_loss2d") +@_beartype.beartype +def nll_loss2d( + g: jit_utils.GraphContext, self, target, weight, reduction, ignore_index +): + return nll_loss(g, self, target, weight, reduction, ignore_index) + + +@_onnx_symbolic("aten::nll_loss_nd") +@_beartype.beartype +def nll_loss_nd( + g: jit_utils.GraphContext, self, target, weight, reduction, ignore_index +): + return nll_loss(g, self, target, weight, reduction, ignore_index) + + +@_onnx_symbolic("aten::cross_entropy_loss") +@_beartype.beartype +def cross_entropy_loss( + g: jit_utils.GraphContext, + self, + target, + weight, + reduction, + ignore_index, + label_smoothing, +): + # none reduction : onnx::Constant[value={0}] + # mean reduction : onnx::Constant[value={1}] + # sum reduction : onnx::Constant[value={2}] + reduction = symbolic_helper._maybe_get_const(reduction, "i") + reduction_vals = ["none", "mean", "sum"] + reduction = reduction_vals[reduction] + + label_smoothing = symbolic_helper._maybe_get_const(label_smoothing, "f") + if label_smoothing is not None and label_smoothing > 0.0: + raise errors.SymbolicValueError( + "Unsupported: ONNX does not support label_smoothing", self + ) + + # in onnx SoftmaxCrossEntropyLoss specification, ignore_index is optional without default value. + # therefore we need to set ignore_index attribute even if it is not specified (e.g. ignore_index=-100). + ignore_index = symbolic_helper._maybe_get_const(ignore_index, "i") + if weight.node().mustBeNone(): + celoss = g.op( + "SoftmaxCrossEntropyLoss", + self, + target, + reduction_s=reduction, + ignore_index_i=ignore_index, + ) + else: + celoss = g.op( + "SoftmaxCrossEntropyLoss", + self, + target, + weight, + reduction_s=reduction, + ignore_index_i=ignore_index, + ) + + return celoss + + +@_onnx_symbolic("aten::binary_cross_entropy_with_logits") +@symbolic_helper.parse_args("v", "v", "v", "v", "i") +@_beartype.beartype +def binary_cross_entropy_with_logits( + g: jit_utils.GraphContext, input, target, weight, pos_weight, reduction +): + p = g.op("Constant", value_t=torch.tensor([1])) + sig_x = opset9.sigmoid(g, input) + log_sig_x = opset9.log(g, sig_x) + sub_1_x = opset9.sub(g, p, sig_x) + sub_1_y = opset9.sub(g, p, target) + log_1_x = opset9.log(g, sub_1_x) + if pos_weight is None or symbolic_helper._is_none(pos_weight): + output = opset9.neg( + g, + opset9.add( + g, opset9.mul(g, target, log_sig_x), opset9.mul(g, sub_1_y, log_1_x) + ), + ) + else: + output = opset9.neg( + g, + opset9.add( + g, + opset9.mul(g, opset9.mul(g, target, log_sig_x), pos_weight), + opset9.mul(g, sub_1_y, log_1_x), + ), + ) + + if weight is not None and not symbolic_helper._is_none(weight): + output = opset9.mul(g, weight, output) + + reduction = symbolic_helper._maybe_get_const(reduction, "i") + if reduction == 0: + return output + elif reduction == 1: + return g.op("ReduceMean", output, keepdims_i=0) + elif reduction == 2: + return g.op("ReduceSum", output, keepdims_i=0) + else: + return symbolic_helper._onnx_unsupported( + "binary_cross_entropy_with_logits with reduction other than none, mean, or sum", + input, + ) + + +@_onnx_symbolic("aten::celu") +@_beartype.beartype +def celu(g: jit_utils.GraphContext, self, alpha): + alpha = symbolic_helper._maybe_get_const(alpha, "f") + # if the input is of type double cast it to float + if ( + _type_utils.JitScalarType.from_value(self, _type_utils.JitScalarType.UNDEFINED) + == _type_utils.JitScalarType.DOUBLE + ): + self = g.op("Cast", self, to_i=_C_onnx.TensorProtoDataType.FLOAT) + out = g.op("Celu", self, alpha_f=alpha) + return g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.DOUBLE) + + return g.op("Celu", self, alpha_f=alpha) + + +@_onnx_symbolic("aten::argmax") +@symbolic_helper.parse_args("v", "v", "b") +@_beartype.beartype +def argmax( + g: jit_utils.GraphContext, + input: torch._C.Value, + dim: torch._C.Value, + keepdim: bool, +): + return symbolic_helper._argmin_argmax_helper(g, input, dim, keepdim, "ArgMax") + + +@_onnx_symbolic("aten::argmin") +@symbolic_helper.parse_args("v", "v", "b") +@_beartype.beartype +def argmin( + g: jit_utils.GraphContext, + input: torch._C.Value, + dim: torch._C.Value, + keepdim: bool, +): + return symbolic_helper._argmin_argmax_helper(g, input, dim, keepdim, "ArgMin") + + +@_onnx_symbolic("aten::pow") +@_beartype.beartype +def pow(g: jit_utils.GraphContext, self, exponent): + return g.op("Pow", self, exponent) + + +@_onnx_symbolic("aten::ge") +@_beartype.beartype +def ge(g: jit_utils.GraphContext, input, other): + return g.op("GreaterOrEqual", input, other) + + +@_onnx_symbolic("aten::le") +@_beartype.beartype +def le(g: jit_utils.GraphContext, input, other): + return g.op("LessOrEqual", input, other) + + +@_onnx_symbolic("aten::unfold") +@symbolic_helper.parse_args("v", "i", "v", "v") +@_beartype.beartype +def unfold(g: jit_utils.GraphContext, input, dimension, size, step): + const_size = symbolic_helper._maybe_get_const(size, "i") + const_step = symbolic_helper._maybe_get_const(step, "i") + if not symbolic_helper._is_value(const_size) and not symbolic_helper._is_value( + const_step + ): + return opset9.unfold(g, input, dimension, const_size, const_step) + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("unfold", input, dimension_i=dimension, size_i=size, step_i=step) + + sizedim = symbolic_helper._get_tensor_dim_size(input, dimension) + if sizedim is not None: + low_start = g.op("Constant", value_t=torch.tensor(0)) + low_end = g.op("Constant", value_t=torch.tensor(sizedim)) + hi_end = g.op("Constant", value_t=torch.tensor(sizedim + 1)) + low_indices = g.op("Range", low_start, low_end, step) + hi_indices = g.op("Range", size, hi_end, step) + + low_size = symbolic_helper._size_helper( + g, low_indices, g.op("Constant", value_t=torch.tensor(0)) + ) + hi_size = symbolic_helper._size_helper( + g, hi_indices, g.op("Constant", value_t=torch.tensor(0)) + ) + + ndim = symbolic_helper._get_tensor_rank(input) + assert ndim is not None + perm = list(range(0, ndim)) + perm.append(perm.pop(dimension)) + + unsqueeze_list = [] + loop_condition = g.op("Constant", value_t=torch.tensor(1)) + loop_condition = g.op( + "Cast", loop_condition, to_i=_C_onnx.TensorProtoDataType.BOOL + ) + loop_len = g.op("Min", low_size, hi_size) + + loop, (loop_context,), _ = jit_utils.add_op_with_blocks( + g, "Loop", loop_len, loop_condition, n_blocks=1 + ) + + loop_block = loop_context.block + block_input_iter = utils._add_input_to_block(loop_block) + # FIXME(justinchuby): cond is unused? + cond = utils._add_input_to_block(loop_block) + + starts = loop_context.op("Gather", low_indices, block_input_iter) + ends = loop_context.op("Gather", hi_indices, block_input_iter) + axes = loop_context.op("Constant", value_t=torch.tensor([2])) + starts = symbolic_helper._unsqueeze_helper(loop_context, starts, [0]) + ends = symbolic_helper._unsqueeze_helper(loop_context, ends, [0]) + stack = loop_context.op("Slice", input, starts, ends, axes) + + unsqueeze = symbolic_helper._unsqueeze_helper( + loop_context, loop_context.op("Transpose", stack, perm_i=perm), [dimension] + ) + unsqueeze_list.append(unsqueeze) + concat = loop_context.op("Concat", *unsqueeze_list, axis_i=0) + + cond_out = loop_context.op( + "Cast", loop_condition, _C_onnx.TensorProtoDataType.BOOL + ) + utils._add_output_to_block(loop_block, cond_out) + utils._add_output_to_block(loop_block, concat) + + loop_output = loop.node().output() + perm = [0, 1, 2, 3, 4] + perm[0], perm[dimension + 1] = perm[dimension + 1], perm[0] + transpose = g.op("Transpose", loop_output, perm_i=perm) + squeeze = symbolic_helper._squeeze_helper(g, transpose, [0]) + + return squeeze + + return symbolic_helper._unimplemented("Unfold", "input size not accessible") + + +@_onnx_symbolic("aten::tensordot") +@symbolic_helper.parse_args("v", "v", "is", "is", "v") +@_beartype.beartype +def tensordot(g: jit_utils.GraphContext, input_a, input_b, dims_a, dims_b, out=None): + if out is not None: + symbolic_helper._unimplemented( + "Tensordot", "Out parameter is not supported for tensordot." + ) + + dim_count_a = symbolic_helper._get_tensor_rank(input_a) + if dim_count_a is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of tensordot for tensor(input_a) of unknown rank.", + input_a, + ) + + dim_count_b = symbolic_helper._get_tensor_rank(input_b) + if dim_count_b is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of tensordot for tensor(input_b) of unknown rank.", + input_b, + ) + + dims_a = [ + (dims_a[i] + dim_count_a) if (dims_a[i] < 0) else dims_a[i] + for i in range(len(dims_a)) + ] + dims_b = [ + (dims_b[i] + dim_count_b) if (dims_b[i] < 0) else dims_b[i] + for i in range(len(dims_b)) + ] + + left_dims_a = [i for i in range(dim_count_a) if (i not in dims_a)] + left_dims_b = [i for i in range(dim_count_b) if (i not in dims_b)] + + new_input_a = opset9.permute(g, input_a, left_dims_a + dims_a) + new_input_b = opset9.permute(g, input_b, dims_b + left_dims_b) + + input_shape = g.op("Shape", new_input_a) + left_sizes_a = symbolic_helper._slice_helper( + g, input_shape, axes=[0], starts=[0], ends=[len(left_dims_a)] + ) + shape_sizes = [ + left_sizes_a, + g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)), + ] + output_a = opset9._reshape_from_tensor(g, new_input_a, shape_sizes) + + input_shape = g.op("Shape", output_a) + slices = symbolic_helper._slice_helper( + g, input_shape, axes=[0], starts=[-1], ends=[sys.maxsize] + ) + shape_sizes = [ + g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)), + slices, + ] + output_a = opset9._reshape_from_tensor(g, new_input_a, shape_sizes) + + input_shape = g.op("Shape", new_input_b) + left_sizes_b = symbolic_helper._slice_helper( + g, input_shape, axes=[0], starts=[len(dims_b)], ends=[sys.maxsize] + ) + slices = symbolic_helper._slice_helper( + g, input_shape, axes=[0], starts=[0], ends=[len(dims_b)] + ) + shape_sizes = [ + slices, + g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)), + ] + output_b = opset9._reshape_from_tensor(g, new_input_b, shape_sizes) + + input_shape = g.op("Shape", output_b) + slices = symbolic_helper._slice_helper( + g, input_shape, axes=[0], starts=[-1], ends=[sys.maxsize] + ) + shape_sizes = [ + g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)), + slices, + ] + output_b = opset9._reshape_from_tensor(g, new_input_b, shape_sizes) + + output = einsum(g, "ij,jk->ik", g.op("prim::ListConstruct", *[output_a, output_b])) + + shape_sizes = [left_sizes_a, left_sizes_b] + return opset9._reshape_from_tensor(g, output, shape_sizes) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset13.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset13.py new file mode 100644 index 0000000000000000000000000000000000000000..866984921c2e7f32a117e9a7e82cd568e650e604 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset13.py @@ -0,0 +1,1156 @@ +# EDITING THIS FILE? READ THIS FIRST! +# see Note [Edit Symbolic Files] in README.md + +# This file exports ONNX ops for opset 13 +import functools + +import torch +import torch._C._onnx as _C_onnx +from torch.onnx import ( + _constants, + _type_utils, + errors, + symbolic_helper, + symbolic_opset11 as opset11, + symbolic_opset9 as opset9, + utils, +) +from torch.onnx._internal import _beartype, jit_utils, registration + + +_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=13) + + +def _apply_params(*args, **kwargs): + """Returns a decorator that calls the decorated (higher-order) function with the given parameters.""" + + def _apply(fn): + return fn(*args, **kwargs) + + return _apply + + +@_onnx_symbolic("aten::softmax") +@symbolic_helper.parse_args("v", "i", "none") +@_beartype.beartype +def softmax(g: jit_utils.GraphContext, input, dim, dtype=None): + softmax = g.op("Softmax", input, axis_i=dim) + if dtype and dtype.node().kind() != "prim::Constant": + parsed_dtype = symbolic_helper._get_const(dtype, "i", "dtype") + softmax = g.op( + "Cast", softmax, to_i=_type_utils.JitScalarType(parsed_dtype).onnx_type() + ) + + return softmax + + +@_onnx_symbolic("aten::log_softmax") +@symbolic_helper.parse_args("v", "i", "none") +@_beartype.beartype +def log_softmax(g: jit_utils.GraphContext, input, dim, dtype=None): + return_op = g.op("LogSoftmax", input, axis_i=dim) + if dtype and dtype.node().kind() != "prim::Constant": + parsed_dtype = symbolic_helper._get_const(dtype, "i", "dtype") + return_op = g.op( + "Cast", return_op, to_i=_type_utils.JitScalarType(parsed_dtype).onnx_type() + ) + return return_op + + +@_onnx_symbolic("aten::frobenius_norm") +@symbolic_helper.parse_args("v", "v", "i") +@_beartype.beartype +def frobenius_norm(g: jit_utils.GraphContext, self, dim=None, keepdim=False): + dim_val = symbolic_helper._maybe_get_const(dim, "is") + if not symbolic_helper._is_value(dim_val) and len(dim_val) == 0: + return g.op("ReduceL2", self, keepdims_i=0) + sqr = g.op("Mul", self, self) + sumsqr = symbolic_helper._reducesum_helper(g, sqr, dim, keepdims_i=keepdim) + return g.op("Sqrt", sumsqr) + + +@_onnx_symbolic("aten::split") +@symbolic_helper.parse_args("v", "v", "i", "i") +@_beartype.beartype +def split(g: jit_utils.GraphContext, self, split_size_or_sizes, dim, _outputs=None): + if not symbolic_helper._is_split_static(split_size_or_sizes, _outputs): + split_out = g.op("SplitToSequence", self, split_size_or_sizes, axis_i=dim) + if _outputs is None: + return split_out + # Convert to multiple slice nodes iff number of splits and number of outputs are statically known. + if ( + symbolic_helper._is_packed_list(split_size_or_sizes) + and len(symbolic_helper._unpack_list(split_size_or_sizes)) == _outputs + ): + split_sizes = [ + symbolic_helper._unsqueeze_helper(g, v, [0]) + for v in symbolic_helper._unpack_list(split_size_or_sizes) + ] + + start = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long)) + axis = g.op("Constant", value_t=torch.tensor([dim], dtype=torch.long)) + res = [] + for i in range(_outputs): + end = g.op( + "Add", start, split_sizes[i] + ) # split_sizes is a list of same length as _outputs + res.append(g.op("Slice", self, start, end, axis)) + start = end + return res + return [ + g.op( + "SequenceAt", + split_out, + g.op("Constant", value_t=torch.tensor([i], dtype=torch.long)), + ) + for i in range(_outputs) + ] + + split_val = symbolic_helper._node_get(split_size_or_sizes.node(), "value") + if split_val.dim() > 0: + return g.op("Split", self, split_size_or_sizes, axis_i=dim, outputs=_outputs) + split_size = symbolic_helper._get_const(split_size_or_sizes, "i", "split_size") + + size = symbolic_helper._get_tensor_dim_size(self, dim) + if size is None: + if _outputs is not None: + size = split_size * _outputs + else: + raise errors.SymbolicValueError( + "Unknown dimension size not supported", self + ) + splits = [split_size] * (size // split_size) + leftover = size % split_size + if leftover: + splits.append(leftover) + splits = g.op("Constant", value_t=torch.tensor(splits)) + return g.op("Split", self, splits, axis_i=dim, outputs=_outputs) + + +@_onnx_symbolic("aten::split_with_sizes") +@_beartype.beartype +def split_with_sizes(g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None): + return split(g, self, split_sizes, dim, _outputs) + + +@_onnx_symbolic("aten::unsafe_split") +@_beartype.beartype +def unsafe_split( + g: jit_utils.GraphContext, self, split_size_or_sizes, dim, _outputs=None +): + return split(g, self, split_size_or_sizes, dim, _outputs) + + +@_onnx_symbolic("aten::unsafe_split_with_sizes") +@_beartype.beartype +def unsafe_split_with_sizes( + g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None +): + return split_with_sizes(g, self, split_sizes, dim, _outputs) + + +@_onnx_symbolic("aten::tensor_split") +@symbolic_helper.parse_args("v", "v", "i", "i") +@_beartype.beartype +def tensor_split( + g: jit_utils.GraphContext, self, indices_or_sections, dim, _outputs=None +): + axis = g.op("Constant", value_t=torch.tensor(dim, dtype=torch.long)) + axis = opset11.unsqueeze(g, axis, 0) + const_1 = g.op("Constant", value_t=torch.tensor(1, dtype=torch.long)) + + if symbolic_helper._is_split_static(indices_or_sections, _outputs): + split_val = symbolic_helper._node_get(indices_or_sections.node(), "value") + + if split_val.dim() > 0: + start = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long)) + res = [] + assert _outputs is not None + for i in range(_outputs - 1): + end = g.op( + "Gather", + indices_or_sections, + g.op("Constant", value_t=torch.tensor([i], dtype=torch.long)), + axis_i=0, + ) + res.append(g.op("Slice", self, start, end, axis)) + start = end + + end = symbolic_helper._size_helper(g, self, axis) + res.append(g.op("Slice", self, start, end, axis)) + return res + + split_size = symbolic_helper._get_const( + indices_or_sections, "i", "indices_or_sections" + ) + + size = symbolic_helper._get_tensor_dim_size(self, dim) + if size is None: + if _outputs is not None: + size = split_size * _outputs + else: + raise errors.SymbolicValueError( + "Unknown dimension size not supported", self + ) + + min_split_size = size // split_size + num_splits_one_extra = size % split_size + + splits = num_splits_one_extra * [min_split_size + 1] + leftover = (split_size - num_splits_one_extra) * [min_split_size] + + splits = g.op( + "Constant", value_t=torch.tensor(splits + leftover, dtype=torch.long) + ) + return g.op("Split", self, splits, axis_i=dim, outputs=_outputs) + + if ( + symbolic_helper._is_tensor(indices_or_sections) + and symbolic_helper._get_tensor_rank(indices_or_sections) == 1 + ): + loop_len = symbolic_helper._size_helper( + g, indices_or_sections, g.op("Constant", value_t=torch.tensor(0)) + ) + loop_len = opset11.unsqueeze(g, loop_len, 0) + loop_condition = g.op("Cast", const_1, to_i=_C_onnx.TensorProtoDataType.BOOL) + + # To make the first slice in the below loop work, + # we pad a zero to the first position so that it will be the initial start of slice. + padding_0 = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long)) + indices_or_sections = g.op("Concat", padding_0, indices_or_sections, axis_i=0) + + final_splits = g.op("SequenceEmpty") + # Loop inputs + loop, (loop_context,), _ = jit_utils.add_op_with_blocks( + g, "Loop", loop_len, loop_condition, final_splits, outputs=1, n_blocks=1 + ) + + loop_block = loop_context.block + block_input_iter = utils._add_input_to_block(loop_block) + cond = utils._add_input_to_block(loop_block) + final_splits = utils._add_input_to_block(loop_block) + + start = loop_context.op( + "Gather", indices_or_sections, block_input_iter, axis_i=0 + ) + end = loop_context.op( + "Gather", + indices_or_sections, + loop_context.op("Add", block_input_iter, const_1), + axis_i=0, + ) + + slice = loop_context.op("Slice", self, start, end, axis) + final_splits = loop_context.op("SequenceInsert", final_splits, slice) + + # Loop outputs + cond_out = loop_context.op("Identity", loop_condition) + utils._add_output_to_block(loop_block, cond_out) + utils._add_output_to_block(loop_block, final_splits) + + loop_out = loop.node().output() + start = g.op( + "Gather", + indices_or_sections, + g.op("Constant", value_t=torch.tensor(-1, dtype=torch.long)), + axis_i=0, + ) + start = opset11.unsqueeze(g, start, 0) + end = symbolic_helper._size_helper(g, self, axis) + + last_slice = g.op("Slice", self, start, end, axis) + + return g.op("SequenceInsert", loop_out, last_slice) + + else: # scalar tensor + dim_size = symbolic_helper._size_helper(g, self, axis) + min_split_size = g.op("Div", dim_size, indices_or_sections) + min_split_size_plus_1 = g.op( + "Add", + min_split_size, + const_1, + ) + num_splits_one_extra = g.op("Mod", dim_size, indices_or_sections) + splits = g.op("Tile", min_split_size_plus_1, num_splits_one_extra) + leftover = g.op( + "Tile", + min_split_size, + g.op( + "Sub", + opset11.unsqueeze(g, indices_or_sections, 0), + num_splits_one_extra, + ), + ) + + splits = g.op("Concat", splits, leftover, axis_i=0) + if _outputs is None: + return g.op("SplitToSequence", self, splits, axis_i=dim) + return g.op("Split", self, splits, axis_i=dim, outputs=_outputs) + + +@_onnx_symbolic("aten::unbind") +@symbolic_helper.parse_args("v", "i", "i") +@_beartype.beartype +def unbind(g: jit_utils.GraphContext, self, dim=0, _outputs=None): + if _outputs is None: + return g.op( + "SplitToSequence", + self, + g.op("Constant", value_t=torch.tensor(1, dtype=torch.long)), + axis_i=dim, + keepdims_i=0, + ) + + splits = g.op("Constant", value_t=torch.tensor([1] * _outputs)) + outputs = g.op("Split", self, splits, axis_i=dim, outputs=_outputs) + outputs = [outputs] if _outputs == 1 else outputs + squeezed_outputs = [ + g.op("Squeeze", out, g.op("Constant", value_t=torch.tensor([dim]))) + for out in outputs + ] + return squeezed_outputs + + +@_onnx_symbolic("aten::nonzero_numpy") +# Emitted from `torch.nonzero(x, as_tuple=True)` +@_beartype.beartype +def nonzero_numpy(g: jit_utils.GraphContext, input, _outputs=None): + return unbind(g, opset9.nonzero(g, input), 1, _outputs=_outputs) + + +@_onnx_symbolic("aten::where") +@symbolic_helper.parse_args("v", "v", "v", "i") +@_beartype.beartype +def where(g: jit_utils.GraphContext, condition, self=None, other=None, _outputs=None): + # Assumes that torch.where's first argument takes only Bool and Byte tensors. + if not symbolic_helper._is_bool(condition): + condition = g.op("Cast", condition, to_i=_C_onnx.TensorProtoDataType.BOOL) + if self is None: + condition = opset9.nonzero(g, condition) + return symbolic_helper._unbind_helper( + g, condition, g.op("Constant", value_t=torch.tensor(1)), _outputs + ) + return g.op("Where", condition, self, other) + + +@_onnx_symbolic("aten::fake_quantize_per_channel_affine") +@symbolic_helper.parse_args("v", "v", "v", "i", "i", "i") +@_beartype.beartype +def fake_quantize_per_channel_affine( + g: jit_utils.GraphContext, + inputs, + scale, + zero_point, + axis, + quant_min=-128, + quant_max=127, +): + # NOTE: (0, 127) is allowed as special case. PyTorch restricts activations to be in the range (0, 127). + # https://github.com/pytorch/pytorch/blob/b34b192d6b97325c9f78e5995c48c8498ede34bd/torch/ao/quantization/observer.py#L1422 + if (quant_min, quant_max) not in [(0, 255), (-128, 127), (0, 127)]: + raise errors.SymbolicValueError( + "For (quant_min, quant_max), ONNX allows only (0, 127), (0, 255) and (-128, 127). " + f"Got ({quant_min}, {quant_max})", + inputs, + ) + # ONNX defines zero_point to be int8 or uint8 + if quant_min == 0: + zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.UINT8) + else: + zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.INT8) + quantized = g.op("QuantizeLinear", inputs, scale, zero_point, axis_i=axis) + if (quant_min, quant_max) == (0, 127): + quantized = g.op( + "Clip", + quantized, + opset9.unused(g), + g.op("Constant", value_t=torch.tensor(127, dtype=torch.uint8)), + ) + return g.op("DequantizeLinear", quantized, scale, zero_point, axis_i=axis) + + +@_onnx_symbolic("aten::fake_quantize_per_tensor_affine") +@symbolic_helper.parse_args("v", "v", "v", "i", "i") +@_beartype.beartype +def fake_quantize_per_tensor_affine( + g: jit_utils.GraphContext, + inputs, + scale, + zero_point, + quant_min=-128, + quant_max=127, +): + # NOTE: (0, 127) is allowed as special case. PyTorch restricts activations to be in the range (0, 127). + # https://github.com/pytorch/pytorch/blob/b34b192d6b97325c9f78e5995c48c8498ede34bd/torch/ao/quantization/observer.py#L1422 + if (quant_min, quant_max) not in [(0, 255), (-128, 127), (0, 127)]: + raise errors.SymbolicValueError( + "For (quant_min, quant_max), ONNX allows only (0, 127), (0, 255) and (-128, 127). " + f"Got ({quant_min}, {quant_max})", + inputs, + ) + if quant_min == 0: + zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.UINT8) + else: + zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.INT8) + if ( + _type_utils.JitScalarType.from_value(scale, _type_utils.JitScalarType.UNDEFINED) + != _type_utils.JitScalarType.FLOAT + ): + scale = g.op("Cast", scale, to_i=_C_onnx.TensorProtoDataType.FLOAT) + quantized = g.op("QuantizeLinear", inputs, scale, zero_point) + if (quant_min, quant_max) == (0, 127): + quantized = g.op( + "Clip", + quantized, + opset9.unused(g), + g.op("Constant", value_t=torch.tensor(127, dtype=torch.uint8)), + ) + return g.op("DequantizeLinear", quantized, scale, zero_point) + + +@_beartype.beartype +def _reduce_op_symbolic(onnx_op_name): + @_beartype.beartype + def symbolic(g, self, dim=None, keepdim=None): + self = opset9._maybe_cast_reduce_op_input(g, self) + if dim is None: + # all-reduce path + return symbolic_helper._handle_reduce_dim_none(g, self, onnx_op_name) + else: + keepdim = symbolic_helper._get_const(keepdim, "i", "keepdim") + return g.op(onnx_op_name, self, dim, keepdims_i=keepdim) + + return symbolic + + +@_onnx_symbolic( + "aten::sum", + decorate=[_apply_params("ReduceSum", "sum")], +) +@_beartype.beartype +def _reduce_with_dtype(onnx_op, name): + symbolic = _reduce_op_symbolic(onnx_op) + + @opset9.overload_by_arg_count + @_beartype.beartype + def reduce(g, *args, **kwargs): + @symbolic_helper.parse_args("v", "none") + @_beartype.beartype + def reduce_nodim(g, self, dtype): + dtype_onnx = None + if dtype.node().kind() == "onnx::Constant": + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + dtype_onnx = _type_utils.JitScalarType(dtype).onnx_type() + self = g.op("Cast", self, to_i=dtype_onnx) + elif dtype.node().kind() != "prim::Constant": + return symbolic_helper._unimplemented(name, "dtype", dtype) + result = symbolic(g, self) + if dtype_onnx is not None: + result_dtype_onnx = _type_utils.JitScalarType.from_value( + result + ).onnx_type() + if result_dtype_onnx != dtype_onnx: + result = g.op("Cast", result, to_i=dtype_onnx) + return result + + @symbolic_helper.parse_args("v", "v", "i", "none") + @_beartype.beartype + def reduce_dim(g, self, dim, keepdim, dtype): + dtype_onnx = None + if dtype.node().kind() == "onnx::Constant": + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + dtype_onnx = _type_utils.JitScalarType(dtype).onnx_type() + self = g.op("Cast", self, to_i=dtype_onnx) + elif dtype.node().kind() != "prim::Constant": + return symbolic_helper._unimplemented(name, "dtype", dtype) + result = symbolic(g, self, dim, keepdim) + if dtype_onnx is not None: + result_dtype_onnx = _type_utils.JitScalarType.from_value( + result + ).onnx_type() + if result_dtype_onnx != dtype_onnx: + result = g.op("Cast", result, to_i=dtype_onnx) + return result + + return reduce_nodim, reduce_dim + + return reduce + + +# Ported from +# https://github.com/microsoft/onnxscript/blob/6b1b81700b4523f31d8c6d3321e5d8ef5d42b764/onnxscript/function_libs/torch_aten/ops/core.py#L6097 +# NOTE: Supporting aten::unflatten before opset13 needs helper function to adjust ONNX op changes in Concat, Slice, ... +@_onnx_symbolic("aten::unflatten") +@_beartype.beartype +def unflatten(g: jit_utils.GraphContext, input, dim, unflattened_size): + input_dim = symbolic_helper._get_tensor_rank(input) + if input_dim is None: + return symbolic_helper._unimplemented( + "dim", + "ONNX and PyTorch use different strategies to split the input. " + "Input rank must be known at export time.", + ) + + # dim could be negative + input_dim = g.op("Constant", value_t=torch.tensor([input_dim], dtype=torch.int64)) + dim = g.op("Add", input_dim, dim) + dim = g.op("Mod", dim, input_dim) + + input_size = g.op("Shape", input) + + head_start_idx = g.op("Constant", value_t=torch.tensor([0], dtype=torch.int64)) + head_end_idx = g.op( + "Reshape", dim, g.op("Constant", value_t=torch.tensor([1], dtype=torch.int64)) + ) + head_part_rank = g.op("Slice", input_size, head_start_idx, head_end_idx) + + dim_plus_one = g.op( + "Add", dim, g.op("Constant", value_t=torch.tensor([1], dtype=torch.int64)) + ) + tail_start_idx = g.op( + "Reshape", + dim_plus_one, + g.op("Constant", value_t=torch.tensor([1], dtype=torch.int64)), + ) + tail_end_idx = g.op( + "Constant", value_t=torch.tensor([_constants.INT64_MAX], dtype=torch.int64) + ) + tail_part_rank = g.op("Slice", input_size, tail_start_idx, tail_end_idx) + + final_shape = g.op( + "Concat", head_part_rank, unflattened_size, tail_part_rank, axis_i=0 + ) + + return symbolic_helper._reshape_helper(g, input, final_shape) + + +@_onnx_symbolic("aten::unsafe_chunk") +@symbolic_helper.parse_args("v", "i", "i", "i") +@_beartype.beartype +def unsafe_chunk(g: jit_utils.GraphContext, self, chunks, dim, _outputs=None): + if _outputs is None: + return g.op( + "SplitToSequence", + self, + g.op("Constant", value_t=torch.tensor(1, dtype=torch.long)), + axis_i=dim, + keepdims_i=0, + ) + + size = symbolic_helper._get_tensor_dim_size(self, dim) + if size is None: + return symbolic_helper._unimplemented("unsafe_chunk", "unknown dimension size") + split_size = (size + chunks - 1) // chunks + splits = [split_size] * (size // split_size) + leftover = size % split_size + if leftover: + splits.append(leftover) + + # TODO: So far we don"t have a module using this method. We"ll keep + # this as a constant unless we see a request of dynamics in any + # user's modules. + splits = g.op("Constant", value_t=torch.tensor(splits, dtype=torch.long)) + return g.op("Split", self, splits, axis_i=dim, outputs=_outputs) + + +@_onnx_symbolic("aten::tile") +@_beartype.beartype +def tile(g: jit_utils.GraphContext, self, dims): + self_shape = g.op("Shape", self) + self_rank = g.op("Size", self_shape) + dims_rank = g.op("Size", dims) + diff = g.op("Sub", self_rank, dims_rank) + const_zero = g.op("Constant", value_t=torch.tensor([0])) + + # 1. If dims is shorter than self.shape pad dims with 1 + dims_shorter_than_self_shape = g.op("Greater", diff, const_zero) + ( + if_op_greater, + (if_context_greater, else_context_greater), + _, + ) = jit_utils.add_op_with_blocks( + g, "If", dims_shorter_than_self_shape, n_blocks=2, outputs=1 + ) + const_one = if_context_greater.op("Constant", value_t=torch.LongTensor([1])) + diff_1d_greater = if_context_greater.op("Reshape", diff, const_one) + exapnd_ones_greater = if_context_greater.op("Expand", const_one, diff_1d_greater) + dims_ = if_context_greater.op("Concat", exapnd_ones_greater, dims, axis_i=0) + utils._add_output_to_block(if_context_greater.block, dims_) + identity_dim = else_context_greater.op("Identity", dims) + utils._add_output_to_block(else_context_greater.block, identity_dim) + dims_final = if_op_greater.node().output() + + # 2. If dims is longer than self.shape pad self.shape with 1 + dims_longer_than_self_shape = g.op("Less", diff, const_zero) + ( + if_op_less, + (if_context_less, else_context_less), + _, + ) = jit_utils.add_op_with_blocks( + g, "If", dims_longer_than_self_shape, n_blocks=2, outputs=1 + ) + const_one = if_context_less.op("Constant", value_t=torch.LongTensor([1])) + diff_1d_less = if_context_less.op( + "Reshape", + if_context_less.op("Abs", diff), + const_one, + ) + exapnd_ones_less = if_context_less.op("Expand", const_one, diff_1d_less) + self_final_shape = if_context_less.op( + "Concat", exapnd_ones_less, self_shape, axis_i=0 + ) + self_ = if_context_less.op("Reshape", self, self_final_shape) + utils._add_output_to_block(if_context_less.block, self_) + identity_self = else_context_less.op("Identity", self) + utils._add_output_to_block(else_context_less.block, identity_self) + self_final = if_op_less.node().output() + + dims_final = g.op("Cast", dims_final, to_i=_C_onnx.TensorProtoDataType.INT64) + return g.op("Tile", self_final, dims_final) + + +@_onnx_symbolic("aten::repeat_interleave") +@_beartype.beartype +def repeat_interleave( + g: jit_utils.GraphContext, self, repeats, dim=None, output_size=None +): + input = self + final_dim = dim + # if dim is None flatten + # By default, use the flattened input array, and return a flat output array + if symbolic_helper._is_none(dim): + input = symbolic_helper._reshape_helper( + g, self, g.op("Constant", value_t=torch.tensor([-1])) + ) + dim = torch.tensor(0, dtype=torch.int64) + else: + dim = symbolic_helper._maybe_get_scalar(dim) + + repeats_dim = symbolic_helper._get_tensor_rank(repeats) + repeats_sizes = symbolic_helper._get_tensor_sizes(repeats) + input_sizes = symbolic_helper._get_tensor_sizes(input) + if repeats_dim is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of repeat_interleave for unknown repeats rank.", + self, + ) + if repeats_sizes is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of repeat_interleave for unknown repeats size.", + self, + ) + if input_sizes is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of repeat_interleave for unknown input size.", + self, + ) + # Handle cases where dim is negative + if dim < 0: + dim += len(input_sizes) + + output_sizes = input_sizes.copy() + for idx, input_size in enumerate(input_sizes): + if input_size is None: + output_sizes[idx], input_sizes[idx] = 0, -1 + + # Check if all indices should be repeated the same number of times. + if repeats_dim == 0 or (repeats_dim == 1 and repeats_sizes[0] == 1): + return symbolic_helper._repeat_interleave_single_value_repeat_helper( + g, self, repeats, dim + ) + + cond_dynamic_repeats = repeats_dim == 1 and repeats_sizes[0] is None + # If input size is dynamic or repeats vector is dynamic + if output_sizes[dim] == 0 or cond_dynamic_repeats: + reps = symbolic_helper._size_helper(g, input, dim) + reps = opset11.unsqueeze(g, reps, 0) + + # Check if repeats is dynamic + # As repeats is dynamic, we use a where node as a substitute for the if statement + # If repests_dim = 1, expand repeats otherwise use original tensor + if cond_dynamic_repeats: + repeat_dim = symbolic_helper._size_helper( + g, repeats, g.op("Constant", value_t=torch.LongTensor([0])) + ) + repeat_cond = g.op( + "Equal", repeat_dim, g.op("Constant", value_t=torch.LongTensor([1])) + ) + repeats = where(g, repeat_cond, g.op("Expand", repeats, reps), repeats) + # There are cases when the repeats are 1-d tensor with multiple repeats, but dim + # provided along one of the dynamic axes provided. A simple example would be + # input.shape -> [1, 1, *] where * represents the dynamic axes, and dim = 2 + # Now, repeat interleaving can be performed in pytorch when the value of * matches + # with the number of elements in repeat, for example if * -> 2, number of repeats + # should be 2 as well. + else: + return opset9.repeat_interleave(g, self, repeats, final_dim) + + reps_like = g.op( + "ConstantOfShape", + g.op("Shape", repeats), + value_t=torch.tensor([1], dtype=torch.long), + ) + r_splits = split(g, repeats, reps_like, 0) + i_splits = split(g, input, reps_like, dim) + + output_sizes[dim], input_sizes[dim] = -1, 1 + + # Create a loop to iterate over each value along the dimension + # and perform individual interleaving using the repeats tensor + # Loop is of the following pattern + # input (trip_count, cond) + # int trip_count = ...; + # bool cond = ...; + # for (int i=0; i < trip_count && cond; ++i) { + # cond = ...; + # } + + # Loop conditions + loop_condition = g.op("Constant", value_t=torch.tensor(1)) + loop_condition = g.op("Cast", loop_condition, to_i=_C_onnx.TensorProtoDataType.BOOL) + loop_len = reps + + # Create an empty sequence to store final expansions + final_splits = g.op("SequenceEmpty") + + # Loop inputs + loop, (loop_context,), _ = jit_utils.add_op_with_blocks( + g, "Loop", loop_len, loop_condition, final_splits, n_blocks=1 + ) + + loop_block = loop_context.block + block_input_iter = utils._add_input_to_block(loop_block) + cond = utils._add_input_to_block(loop_block) + final_splits = utils._add_input_to_block(loop_block) + + r_split = loop_context.op("SequenceAt", r_splits, block_input_iter) + i_split = loop_context.op("SequenceAt", i_splits, block_input_iter) + + i_split = opset11.unsqueeze(loop_context, i_split, dim + 1) + r_concat = [ + loop_context.op("Constant", value_t=torch.LongTensor(input_sizes[: dim + 1])), + r_split, + loop_context.op("Constant", value_t=torch.LongTensor(input_sizes[dim + 1 :])), + ] + r_concat = loop_context.op("Concat", *r_concat, axis_i=0) + i_split = opset9.expand(loop_context, i_split, r_concat, None) + i_split = symbolic_helper._reshape_helper( + loop_context, i_split, g.op("Constant", value_t=torch.LongTensor(output_sizes)) + ) + final_splits = loop_context.op("SequenceInsert", final_splits, i_split) + + # Loop outputs + cond_out = loop_context.op( + "Cast", loop_condition, to_i=_C_onnx.TensorProtoDataType.BOOL + ) + utils._add_output_to_block(loop_block, cond_out) + utils._add_output_to_block(loop_block, final_splits) + + loop_out = loop.node().output() + loop_out = g.op("ConcatFromSequence", loop_out, axis_i=dim) + return loop_out + + +@_onnx_symbolic("aten::diagonal") +@symbolic_helper.parse_args("v", "i", "i", "i") +@_beartype.beartype +def diagonal(g: jit_utils.GraphContext, self, offset, dim1, dim2): + rank = symbolic_helper._get_tensor_rank(self) + # Replace negative indexing when rank is known + if rank is not None: + dim1 = dim1 if dim1 >= 0 else dim1 + rank + dim2 = dim2 if dim2 >= 0 else dim2 + rank + + dim1_size = opset9.size( + g, self, dim=g.op("Constant", value_t=torch.LongTensor([dim1])) + ) + dim2_size = opset9.size( + g, self, dim=g.op("Constant", value_t=torch.LongTensor([dim2])) + ) + # Create appropriate mask + mask_shape = g.op("Concat", dim1_size, dim2_size, axis_i=0) + mask = opset9.zeros(g, mask_shape, None, None, None) + mask = g.op("EyeLike", mask, k_i=offset) + # dim1 and dim2 appended as a dimension at the end of the shape + + if rank is not None: + axes = list(range(rank)) + axes.remove(dim1) + axes.remove(dim2) + self = g.op("Transpose", self, perm_i=axes + [dim1, dim2]) + else: + return symbolic_helper._unimplemented("diagonal", "unknown input rank") + + # Multiply input and mask to calculate values along diagonal + # The mask consists of one values where diagonal values are to be calculated + # For example: + # [[1.1, 1.2, 1.3], * [[1, 0, 0] = [[1.1, 0, 0], + # [2.1, 2.2, 2.3], [0, 1, 0] [0, 2.2, 0], + # [3.1, 3.2, 3.3]] [0, 0, 1]] [0, 0, 3.3]] + result = g.op("Mul", self, mask) + result = symbolic_helper._reducesum_helper(g, result, axes_i=[-1], keepdims_i=0) + + # Calculate gather indices based on offset and dims + # If offset is greater than zero, set offset to zero as this aids in + # calculation of selection window + offset_op = g.op("Constant", value_t=torch.LongTensor([offset])) + if offset >= 0: + diag_size = g.op( + "Max", + g.op("Min", dim1_size, g.op("Sub", dim2_size, offset_op)), + g.op("Constant", value_t=torch.LongTensor([0])), + ) + offset = 0 + else: + diag_size = g.op( + "Max", + g.op("Min", g.op("Add", dim1_size, offset_op), dim2_size), + g.op("Constant", value_t=torch.LongTensor([0])), + ) + diag_size = g.op("Concat", diag_size, axis_i=0) + + # Calculate which diagonal values to select + # For example, in cases with offsets: + # [[0, 1.1, 0] + # [0, 0, 2.2]] + # we need to select the last two columns, so we create a tensor + # with all columns that are to be selected + # So in this example, it is [1, 2] + select_window_ones_fill = opset9.ones(g, diag_size, 4, None, None) + select_window = g.op( + "CumSum", + select_window_ones_fill, + g.op("Constant", value_t=torch.LongTensor([0])), + ) + select_window = g.op( + "Add", + select_window, + g.op("Constant", value_t=torch.LongTensor([abs(offset) - 1])), + ) + + gather_shape = [ + opset9.size(g, result, dim=g.op("Constant", value_t=torch.LongTensor([axis]))) + for axis in list(range(rank))[:-2] + ] + gather_shape.append(diag_size) + gather_shape = g.op("Concat", *gather_shape, axis_i=0) + gather_indices = opset9.zeros(g, gather_shape, 4, None, None) + + # There might be cases where offset value is greater than number of rows/columns + # and might cause the diagonal to overrun and as a result of this, diag_size would be zero. + # For example, if + # offset = 9, dim1_size = 2 (columns), dim2_size = 4 (rows) + # diag_size = max(min(2, (4-9)), 0) = 0, based on calculation above + # Cases with diagonal overrun always result in diag_size = max(0, -ve value) = 0 + # In cases without diagonal overrun, we select the appropriate rows/columns along which we + # are calculating diagonal values. In cases with diagonal overrun, we return a tensor which has + # the dimension of the row/column where overrun occurred as 0-dim, as we are essentially + # returning an empty tensor + overrun_cond = g.op( + "Not", + g.op( + "Equal", + diag_size, + g.op("Constant", value_t=torch.tensor(0, dtype=torch.int64)), + ), + ) + + if_op, (if_context, else_context), _ = jit_utils.add_op_with_blocks( + g, "If", overrun_cond, n_blocks=2 + ) + + gather_indices_if_block = if_context.op("Add", gather_indices, select_window) + gather_indices_if_block = symbolic_helper._unsqueeze_helper( + if_context, gather_indices_if_block, [rank - 1] + ) + final_non_overrun = if_context.op( + "GatherND", result, gather_indices_if_block, batch_dims_i=rank - 2 + ) + final_overrun = opset9.zeros(else_context, gather_shape, 6, None, None) + utils._add_output_to_block(if_context.block, final_non_overrun) + utils._add_output_to_block(else_context.block, final_overrun) + return if_op + + +# Quantized ops + + +@_onnx_symbolic("quantized::linear") +@_beartype.beartype +def quantized_linear( + g: jit_utils.GraphContext, q_input, q_weight, bias, op_scale, op_zero_point +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper( + g, bias, input_scale, weight_scale, axis + ) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.linear(g, input, weight, bias) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::linear_relu") +@_beartype.beartype +def quantized_linear_relu( + g: jit_utils.GraphContext, q_input, q_weight, bias, op_scale, op_zero_point +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper( + g, bias, input_scale, weight_scale, axis + ) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.linear(g, input, weight, bias) + output = opset9.relu(g, output) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv1d_relu") +@_beartype.beartype +def quantized_conv1d_relu( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper( + g, bias, input_scale, weight_scale, axis + ) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv1d(g, input, weight, bias, stride, padding, dilation, groups) + output = opset9.relu(g, output) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv2d_relu") +@_beartype.beartype +def quantized_conv2d_relu( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper( + g, bias, input_scale, weight_scale, axis + ) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv2d(g, input, weight, bias, stride, padding, dilation, groups) + output = opset9.relu(g, output) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv3d_relu") +@_beartype.beartype +def quantized_conv3d_relu( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper( + g, bias, input_scale, weight_scale, axis + ) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv3d(g, input, weight, bias, stride, padding, dilation, groups) + output = opset9.relu(g, output) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv1d") +@_beartype.beartype +def quantized_conv1d( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper( + g, bias, input_scale, weight_scale, axis + ) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv1d(g, input, weight, bias, stride, padding, dilation, groups) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv2d") +@_beartype.beartype +def quantized_conv2d( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper( + g, bias, input_scale, weight_scale, axis + ) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv2d(g, input, weight, bias, stride, padding, dilation, groups) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv3d") +@_beartype.beartype +def quantized_conv3d( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper( + g, bias, input_scale, weight_scale, axis + ) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv3d(g, input, weight, bias, stride, padding, dilation, groups) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv_transpose1d") +@_beartype.beartype +def quantized_conv_transpose1d( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + output_padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper( + g, bias, input_scale, weight_scale, axis + ) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv_transpose2d( + g, input, weight, bias, stride, padding, output_padding, groups, dilation + ) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv_transpose2d") +@_beartype.beartype +def quantized_conv_transpose2d( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + output_padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper( + g, bias, input_scale, weight_scale, axis + ) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv_transpose2d( + g, input, weight, bias, stride, padding, output_padding, groups, dilation + ) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv_transpose3d") +@_beartype.beartype +def quantized_conv_transpose3d( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + output_padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper( + g, bias, input_scale, weight_scale, axis + ) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv_transpose3d( + g, input, weight, bias, stride, padding, output_padding, groups, dilation + ) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset16.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset16.py new file mode 100644 index 0000000000000000000000000000000000000000..24306b4753665489a9c8ae170fa39aef3168e08a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset16.py @@ -0,0 +1,187 @@ +"""This file exports ONNX ops for opset 16. + +Note [ONNX Operators that are added/updated in opset 16] + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +https://github.com/onnx/onnx/blob/main/docs/Changelog.md#version-16-of-the-default-onnx-operator-set +New operators: + GridSample https://github.com/onnx/onnx/pull/3557 + +Updated operators: + Identity + If + LeakyRelu + Loop + PRelu + RoiAlign + Scan + ScatterElements + ScatterND + Where + GreaterOrEqual + LessOrEqual +""" + +# EDITING THIS FILE? READ THIS FIRST! +# see Note [Edit Symbolic Files] in README.md + +import functools + +import torch +from torch.nn.functional import ( + GRID_SAMPLE_INTERPOLATION_MODES, + GRID_SAMPLE_PADDING_MODES, +) +from torch.onnx import _type_utils, errors, symbolic_helper, utils +from torch.onnx._internal import _beartype, jit_utils, registration + +_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=16) + + +# note (mkozuki): Why `grid_sampler` instead of `grid_sample`? +# Because `torch.nn.functional.grid_sample` calls `torch.grid_sampler`. +@_onnx_symbolic("aten::grid_sampler") +@symbolic_helper.parse_args("v", "v", "i", "i", "b") +@_beartype.beartype +def grid_sampler( + g: jit_utils.GraphContext, + input, + grid, + mode_enum, + padding_mode_enum, + align_corners, +): + # Check the input and grid tensor rank beforehand. + if symbolic_helper._get_tensor_rank(input) == 5: + return symbolic_helper._onnx_unsupported("GridSample with 5D volumetric input") + mode_s = {v: k for k, v in GRID_SAMPLE_INTERPOLATION_MODES.items()}[mode_enum] # type: ignore[call-arg] + padding_mode_s = {v: k for k, v in GRID_SAMPLE_PADDING_MODES.items()}[padding_mode_enum] # type: ignore[call-arg] + return g.op( + "GridSample", + input, + grid, + align_corners_i=int(align_corners), + mode_s=mode_s, + padding_mode_s=padding_mode_s, + ) + + +@_onnx_symbolic("aten::scatter_add") +@symbolic_helper.parse_args("v", "i", "v", "v") +@_beartype.beartype +def scatter_add(g: jit_utils.GraphContext, self, dim, index, src): + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("scatter", self, dim, index, src, overload_name="src") + + src_type = _type_utils.JitScalarType.from_value( + src, _type_utils.JitScalarType.UNDEFINED + ) + src_sizes = symbolic_helper._get_tensor_sizes(src) + index_sizes = symbolic_helper._get_tensor_sizes(index) + + if len(src_sizes) != len(index_sizes): + return symbolic_helper._unimplemented( + "scatter_add", + f"`index` ({index_sizes}) should have the same dimensionality as `src` ({src_sizes})", + ) + + # PyTorch only allows index shape <= src shape, so we can only consider + # taking index as subset size to src, like PyTorch does. When sizes for src + # and index are not matched or there are dynamic axes, we take index shape to + # slice src to accommodate. + if src_sizes != index_sizes or None in index_sizes: + adjusted_shape = g.op("Shape", index) + starts = g.op("Constant", value_t=torch.tensor([0] * len(index_sizes))) + src = g.op("Slice", src, starts, adjusted_shape) + + src = symbolic_helper._maybe_get_scalar(src) + if symbolic_helper._is_value(src): + return g.op("ScatterElements", self, index, src, axis_i=dim, reduction_s="add") + else: + # Check if scalar "src" has same type as self (PyTorch allows different + # type for scalar src (but not when src is tensor)). If not, insert Cast node. + if _type_utils.JitScalarType.from_value(self) != src_type: + src = g.op( + "Cast", + src, + to_i=_type_utils.JitScalarType.from_value(self).onnx_type(), + ) + + return g.op( + "ScatterElements", + self, + index, + src, + axis_i=dim, + reduction_s="add", + ) + + +@_onnx_symbolic("aten::scatter_reduce") +@symbolic_helper.parse_args("v", "i", "v", "v", "s", "b") +@_beartype.beartype +def scatter_reduce( + g: jit_utils.GraphContext, + self: torch._C.Value, + dim: int, + index: torch._C.Value, + src: torch._C.Value, + reduce: str, + include_self: bool, +): + if reduce == "mean": + raise errors.OnnxExporterError( + "ONNX does not support mean reduction for scatter_reduce" + ) + if not include_self: + raise errors.OnnxExporterError( + "ONNX does not support include_self=False for scatter_reduce" + ) + + reduce_mode = { # convert torch string name to onnx string name + "mean": "none", # 'mean' doesn't support in ONNX 1.14 definition + "sum": "add", + "prod": "mul", + "amin": "min", + "amax": "max", + } + onnx_reduce = reduce_mode[reduce] + + self_rank = g.op("Size", g.op("Shape", self)) + + # if self_rank == 0: # assert (index_rank == 0 and rank_src == 0) + self_rank_is_zero = g.op( + "Equal", self_rank, g.op("Constant", value_t=torch.tensor(0, dtype=torch.int64)) + ) + if_op, (if_context, else_context), _ = jit_utils.add_op_with_blocks( + g, "If", self_rank_is_zero, n_blocks=2, outputs=3 + ) + neg_1 = if_context.op("Constant", value_t=torch.tensor([-1], dtype=torch.int64)) + + self_reshape = if_context.op("Reshape", self, neg_1) + utils._add_output_to_block(if_context.block, self_reshape) + index_reshape = if_context.op("Reshape", index, neg_1) + utils._add_output_to_block(if_context.block, index_reshape) + src_reshape = if_context.op("Reshape", src, neg_1) + utils._add_output_to_block(if_context.block, src_reshape) + + self_identity = else_context.op("Identity", self) + utils._add_output_to_block(else_context.block, self_identity) + index_identitye = else_context.op("Identity", index) + utils._add_output_to_block(else_context.block, index_identitye) + src_identity = else_context.op("Identity", src) + utils._add_output_to_block(else_context.block, src_identity) + + result = g.op("ScatterElements", *if_op, axis_i=dim, reduction_s=onnx_reduce) + + # if self_rank == 0: + if_op, (if_context, else_context), _ = jit_utils.add_op_with_blocks( + g, "If", self_rank_is_zero, n_blocks=2, outputs=1 + ) + result_squeezed = if_context.op("Squeeze", result) + utils._add_output_to_block(if_context.block, result_squeezed) + result_identity = else_context.op("Identity", result) + utils._add_output_to_block(else_context.block, result_identity) + result_final = if_op.node().output() + + return result_final diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset9.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset9.py new file mode 100644 index 0000000000000000000000000000000000000000..7d1867d9317a35294874862e02e6f4babbc04221 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset9.py @@ -0,0 +1,7208 @@ +"""This file exports ONNX ops for opset 9. + +Opset 9 is supported by ONNX release 1.4.1 +release on 01/23/19 +""" +from __future__ import annotations + +import builtins +import functools +import math +import sys +import warnings +from typing import Callable, List, Optional, Sequence, Tuple, Union + +import torch +import torch._C._onnx as _C_onnx +import torch.nn.modules.utils +import torch.onnx +from torch import _C + +# Monkey-patch graph manipulation methods on Graph, used for the ONNX symbolics +from torch.onnx import _constants, _deprecation, _type_utils, errors, symbolic_helper +from torch.onnx._globals import GLOBALS +from torch.onnx._internal import _beartype, jit_utils, registration +from torch.types import Number + +# EDITING THIS FILE? READ THIS FIRST! +# see Note [Edit Symbolic Files] in README.md + +__all__ = [ + "abs", + "acos", + "add", + "addcmul", + "addmm", + "alias", + "amax", + "amin", + "aminmax", + "arange", + "argmax", + "argmin", + "as_strided", + "as_tensor", + "asin", + "atan", + "atan2", + "baddbmm", + "batch_norm", + "bernoulli", + "bitwise_not", + "bitwise_or", + "bmm", + "broadcast_tensors", + "broadcast_to", + "bucketize", + "cat", + "cdist", + "ceil", + "clamp_max", + "clamp_min", + "clamp", + "clone", + "constant_pad_nd", + "contiguous", + "conv_tbc", + "conv_transpose1d", + "conv_transpose2d", + "conv_transpose3d", + "conv1d", + "conv2d", + "conv3d", + "convert_element_type", + "convolution", + "cos", + "cosine_similarity", + "cross", + "cumsum", + "detach", + "dim", + "div", + "dot", + "dropout", + "elu", + "embedding_bag", + "embedding", + "empty_like", + "empty", + "eq", + "erf", + "exp", + "expand_as", + "expand", + "eye", + "fill", + "flatten", + "floor_divide", + "floor", + "floordiv", + "frobenius_norm", + "full_like", + "full", + "gather", + "ge", + "gelu", + "get_pool_ceil_padding", + "glu", + "group_norm", + "gt", + "hann_window", + "hardshrink", + "hardsigmoid", + "hardswish", + "hardtanh", + "index_add", + "index_copy", + "index_fill", + "index_put", + "index_select", + "index", + "instance_norm", + "is_floating_point", + "is_pinned", + "isnan", + "item", + "kl_div", + "layer_norm", + "le", + "leaky_relu", + "lerp", + "lift", + "linalg_cross", + "linalg_matrix_norm", + "linalg_norm", + "linalg_vector_norm", + "linear", + "linspace", + "log_sigmoid", + "log_softmax", + "log", + "log10", + "log1p", + "log2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "logit", + "logsumexp", + "lstm_cell", + "lstm", + "lt", + "masked_fill", + "masked_fill_", + "matmul", + "max_pool1d_with_indices", + "max_pool2d_with_indices", + "max_pool3d_with_indices", + "max", + "maximum", + "meshgrid", + "min", + "minimum", + "mish", + "mm", + "movedim", + "mse_loss", + "mul", + "multinomial", + "mv", + "narrow", + "native_layer_norm", + "ne", + "neg", + "new_empty", + "new_full", + "new_ones", + "new_zeros", + "nonzero_numpy", + "nonzero", + "norm", + "numel", + "numpy_T", + "one_hot", + "ones_like", + "ones", + "onnx_placeholder", + "overload_by_arg_count", + "pad", + "pairwise_distance", + "permute", + "pixel_shuffle", + "pixel_unshuffle", + "pow", + "prelu", + "prim_constant_chunk", + "prim_constant_split", + "prim_constant", + "prim_data", + "prim_device", + "prim_dtype", + "prim_if", + "prim_layout", + "prim_list_construct", + "prim_list_unpack", + "prim_loop", + "prim_max", + "prim_min", + "prim_shape", + "prim_tolist", + "prim_tuple_construct", + "prim_type", + "prim_unchecked_cast", + "prim_uninitialized", + "rand_like", + "rand", + "randint_like", + "randint", + "randn_like", + "randn", + "reciprocal", + "reflection_pad", + "relu", + "relu6", + "remainder", + "repeat_interleave", + "repeat", + "replication_pad", + "reshape_as", + "reshape", + "roll", + "rrelu", + "rsqrt", + "rsub", + "scalar_tensor", + "scatter_add", + "scatter", + "select", + "selu", + "sigmoid", + "sign", + "silu", + "sin", + "size", + "slice", + "softmax", + "softplus", + "softshrink", + "sort", + "split_with_sizes", + "split", + "sqrt", + "square", + "squeeze", + "stack", + "std_mean", + "std", + "sub", + "t", + "take", + "tan", + "tanh", + "tanhshrink", + "tensor", + "threshold", + "to", + "topk", + "transpose", + "true_divide", + "type_as", + "unbind", + "unfold", + "unsafe_chunk", + "unsafe_split_with_sizes", + "unsafe_split", + "unsqueeze", + "unsupported_complex_operators", + "noop_complex_operators", + "unused", + "var_mean", + "var", + "view_as", + "view", + "where", + "wrap_logical_op_with_cast_to", + "wrap_logical_op_with_negation", + "zeros_like", + "zeros", + "zero", +] + + +_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=9) + + +def _apply_params(*args, **kwargs): + """Returns a decorator that calls the decorated (higher-order) function with the given parameters.""" + + def _apply(fn): + return fn(*args, **kwargs) + + return _apply + + +def _export(name: str): + """Exports the function in the current global namespace.""" + + def wrapper(func): + globals()[name] = func + __all__.append(name) + return func + + return wrapper + + +@_beartype.beartype +def unused(g): + """Represents "missing" optional inputs.""" + n = g.op("prim::Constant") + n.setType(_C.OptionalType.ofTensor()) + return n + + +@_onnx_symbolic("aten::_shape_as_tensor") +@_beartype.beartype +def _shape_as_tensor(g: jit_utils.GraphContext, input): + return g.op("Shape", input) + + +@_onnx_symbolic("aten::_reshape_from_tensor") +@_beartype.beartype +def _reshape_from_tensor(g: jit_utils.GraphContext, input, shape): + if isinstance(shape, list): + shape = g.op("Concat", *shape, axis_i=0) + return reshape(g, input, shape) + + +@_onnx_symbolic("aten::reshape") +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def reshape(g: jit_utils.GraphContext, self, shape): + return symbolic_helper._reshape_helper(g, self, shape) + + +@_onnx_symbolic("aten::reshape_as") +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def reshape_as(g: jit_utils.GraphContext, self, other): + shape = g.op("Shape", other) + return reshape(g, self, shape) + + +@_onnx_symbolic("aten::add") +@_beartype.beartype +def add(g: jit_utils.GraphContext, self, other, alpha=None): + if symbolic_helper._is_value(self) and symbolic_helper._is_tensor_list(self): + return symbolic_helper._onnx_opset_unsupported_detailed( + "Add", 9, 11, "Add between list of tensors not supported", self + ) + if alpha and symbolic_helper._scalar(symbolic_helper._maybe_get_scalar(alpha)) != 1: + other = g.op("Mul", other, alpha) + return g.op("Add", self, other) + + +@_onnx_symbolic("aten::sub") +@_beartype.beartype +def sub(g: jit_utils.GraphContext, self, other, alpha=None): + if alpha and symbolic_helper._scalar(symbolic_helper._maybe_get_scalar(alpha)) != 1: + other = g.op("Mul", other, alpha) + return g.op("Sub", self, other) + + +@_onnx_symbolic("aten::rsub") +@_beartype.beartype +def rsub(g: jit_utils.GraphContext, self, other, alpha=None): + return sub(g, other, self, alpha=alpha) + + +@_onnx_symbolic("aten::mul") +@_beartype.beartype +def mul(g: jit_utils.GraphContext, self, other): + if symbolic_helper._is_bool(self) and symbolic_helper._is_bool(other): + # ONNX Mul doesn't support Boolean, so use And as an equivalent operator. + return g.op("And", self, other) + else: + return g.op("Mul", self, other) + + +@_onnx_symbolic("aten::div") +@_beartype.beartype +def div(g: jit_utils.GraphContext, self, other, *args): + if len(args) == 0: + return true_divide(g, self, other) + else: + return _div_rounding_mode(g, self, other, *args) + + +@_onnx_symbolic("aten::addcmul") +@symbolic_helper.parse_args("v", "v", "v", "f") +@_beartype.beartype +def addcmul(g: jit_utils.GraphContext, self, tensor1, tensor2, value=1.0): + value_tens = g.op("Constant", value_t=torch.tensor([value])) + return add(g, self, mul(g, mul(g, tensor1, tensor2), value_tens)) + + +@symbolic_helper.parse_args("v", "v", "s") +@_beartype.beartype +def _div_rounding_mode(g: jit_utils.GraphContext, self, other, rounding_mode): + if rounding_mode is None: + return true_divide(g, self, other) + elif rounding_mode == "floor": + return _floor_divide(g, self, other) + elif rounding_mode == "trunc": + return _trunc_divide(g, self, other) + else: + raise errors.SymbolicValueError( + f'Unsupported rounding mode: "{rounding_mode}". Expected None, "floor" or "trunc"', + self, + ) + + +@_beartype.beartype +def _trunc_divide(g: jit_utils.GraphContext, self, other): + out = g.op("Div", self, other) + # the correct operation is truncate, which is not supported in ONNX, + # we cannot call floor since it will behave differently for negative numbers + # (eg. -0.1 should become -0 ) + # - if scalar_type information are not available, assume that + # we need to call floor (treat as float) + out = g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.INT64) + + # Matching PyTorch's behavior: + # - if self is fp the output's type is self's type + # - if self is not fp and other is fp, the output is of type JitScalarType.FLOAT + # - self is not fp and other is not fp, the output's type is self's output type + # - the output type defaults to Float + scalar_type = _type_utils.JitScalarType.from_value( + self, _type_utils.JitScalarType.UNDEFINED + ) + if scalar_type != _type_utils.JitScalarType.UNDEFINED: + if not symbolic_helper._is_fp(self) and symbolic_helper._is_fp(other): + out = g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.FLOAT) + else: + out = g.op( + "Cast", + out, + to_i=scalar_type.onnx_type(), + ) + else: + out = g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.FLOAT) + return out + + +@_beartype.beartype +def _floor_divide(g: jit_utils.GraphContext, self, other): + if symbolic_helper._is_fp(self) or symbolic_helper._is_fp(other): + out = true_divide(g, self, other) + return g.op("Floor", out) + else: + # Integer division does trunction rounding + div = g.op("Div", self, other) + # Division is negative if: self < 0 != other < 0 + zero = g.op("Constant", value_t=torch.tensor(0, dtype=torch.int64)) + negative = g.op( + "Xor", + symbolic_helper._lt_helper(g, self, zero), + symbolic_helper._lt_helper(g, other, zero), + ) + + # For negative numbers with self % other != 0, subtract 1 to round down instead of up + mod = g.op("Sub", self, g.op("Mul", div, other)) + fixup_mask = g.op("And", negative, g.op("Not", g.op("Equal", mod, zero))) + + one = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)) + fixup = g.op("Mul", fixup_mask, one) + return g.op("Sub", div, fixup) + + +@_onnx_symbolic("aten::floor_divide") +@_beartype.beartype +def floor_divide(g: jit_utils.GraphContext, self, other): + # Deprecated behavior, floor_divide actually truncates + return _trunc_divide(g, self, other) + + +@_onnx_symbolic("aten::floordiv") +@_beartype.beartype +def floordiv(g: jit_utils.GraphContext, self, other): + return floor_divide(g, self, other) + + +@_onnx_symbolic("aten::true_divide") +@_beartype.beartype +def true_divide(g: jit_utils.GraphContext, self, other): + """Division where both inputs are cast to floating types + + If both inputs are floating, performs div as usual + If only one input is a floating type, the other input is cast to its type + If neither input is a floating type, both inputs are cast to the default scalar type + """ + + # Case 1: either values are floating + # Performs div as usual. + # Implicit casting will be handled in scalar type analysis pass. + if symbolic_helper._is_fp(self) or symbolic_helper._is_fp(other): + return g.op("Div", self, other) + + # Case 2: neither is floating + # Casts both inputs to the default scalar type + scalar_type = torch.get_default_dtype() + onnx_scalar_type = _C_onnx.TensorProtoDataType.FLOAT + assert scalar_type is torch.float or scalar_type is torch.double + if torch.get_default_dtype() is torch.double: + onnx_scalar_type = _C_onnx.TensorProtoDataType.DOUBLE + + self = g.op("Cast", self, to_i=onnx_scalar_type) + other = g.op("Cast", other, to_i=onnx_scalar_type) + return g.op("Div", self, other) + + +@_onnx_symbolic("aten::reciprocal") +@_beartype.beartype +def reciprocal(g: jit_utils.GraphContext, self): + # torch.reciprocal implicitly casts to float, so we do the same. + if not symbolic_helper._is_fp(self): + self = g.op("Cast", self, to_i=_C_onnx.TensorProtoDataType.FLOAT) + return g.op("Reciprocal", self) + + +@_onnx_symbolic("aten::cat") +@symbolic_helper.parse_args("v", "i") +@_beartype.beartype +def cat(g: jit_utils.GraphContext, tensor_list, dim): + tensors = symbolic_helper._unpack_list(tensor_list) + # torch.cat ignores empty tensors such as `torch.Tensor([])` + # These needs to be removed as input from ONNX's concat too, otherwise shape inference + # will likely fail due to inputs with different ranks (0 for empty tensor, > 0 for anything else) + nonempty_tensors = [] + for t in tensors: + if symbolic_helper._is_constant(t) and not symbolic_helper._get_tensor_dim_size( + t, 0 + ): + continue + nonempty_tensors.append(t) + assert len(nonempty_tensors) > 0 + assert all( + symbolic_helper._get_tensor_rank(nonempty_tensors[0]) is None + or symbolic_helper._get_tensor_rank(t) is None + or symbolic_helper._get_tensor_rank(t) + == symbolic_helper._get_tensor_rank(nonempty_tensors[0]) + for t in nonempty_tensors + ) + tensor_list.node().removeAllInputs() + for t in nonempty_tensors: + tensor_list.node().addInput(t) + + tensors = symbolic_helper._unpack_list(tensor_list) + return g.op("Concat", *tensors, axis_i=dim) + + +@_onnx_symbolic("aten::stack") +@symbolic_helper.parse_args("v", "i") +@_beartype.beartype +def stack(g: jit_utils.GraphContext, tensor_list, dim): + unsqueezed = [ + symbolic_helper._unsqueeze_helper(g, t, [dim]) + for t in symbolic_helper._unpack_list(tensor_list) + ] + return g.op("Concat", *unsqueezed, axis_i=dim) + + +@_onnx_symbolic("aten::list") +@_beartype.beartype +def _list(g: jit_utils.GraphContext, self): + return self + + +@_onnx_symbolic("aten::mm") +@_beartype.beartype +def mm(g: jit_utils.GraphContext, self, other): + # Create a dummy C tensor. Only needed for API purposes, the value is + # since beta = 0 + C = g.op("Constant", value_t=torch.tensor([1])) + return g.op("Gemm", self, other, C, beta_f=0.0, alpha_f=1.0) + + +@_onnx_symbolic("aten::bmm") +@_beartype.beartype +def bmm(g: jit_utils.GraphContext, self, other): + return g.op("MatMul", self, other) + + +@_onnx_symbolic("aten::matmul") +@_beartype.beartype +def matmul(g: jit_utils.GraphContext, self, other): + return g.op("MatMul", self, other) + + +@_onnx_symbolic("aten::addmm") +@symbolic_helper.parse_args("v", "v", "v", "t", "t") +@_beartype.beartype +def addmm(g: jit_utils.GraphContext, self, mat1, mat2, beta, alpha): + scalar_type = None + self_scalar_type = symbolic_helper._try_get_scalar_type(self) + mat1_scalar_type = symbolic_helper._try_get_scalar_type(mat1) + mat2_scalar_type = symbolic_helper._try_get_scalar_type(mat2) + if self_scalar_type is not None: + scalar_type = self_scalar_type + elif mat1_scalar_type is not None: + scalar_type = mat1_scalar_type + elif mat2_scalar_type is not None: + scalar_type = mat2_scalar_type + + mat1_rank = symbolic_helper._get_tensor_rank(mat1) + mat2_rank = symbolic_helper._get_tensor_rank(mat2) + + def is_not_none_nor(v, u): + return v is not None and v != u + + if scalar_type is not None and ( + is_not_none_nor(mat1_rank, 2) or is_not_none_nor(mat2_rank, 2) + ): + res1 = g.op("MatMul", mat1, mat2) + res2 = self + + alpha = symbolic_helper._scalar(alpha) + beta = symbolic_helper._scalar(beta) + + if alpha != 1: + alpha = g.op( + "Constant", value_t=torch.tensor(alpha, dtype=scalar_type.dtype()) + ) + res1 = g.op("Mul", res1, alpha) + if beta != 1: + beta = g.op( + "Constant", + value_t=torch.tensor( + symbolic_helper._scalar(beta), dtype=scalar_type.dtype() + ), + ) + res2 = g.op("Mul", res2, beta) + + return g.op("Add", res1, res2) + + return g.op( + "Gemm", + mat1, + mat2, + self, + beta_f=symbolic_helper._scalar(beta), + alpha_f=symbolic_helper._scalar(alpha), + ) + + +@_onnx_symbolic("aten::neg") +@_beartype.beartype +def neg(g: jit_utils.GraphContext, self): + return g.op("Neg", self) + + +@_onnx_symbolic("aten::sqrt") +@_beartype.beartype +def sqrt(g: jit_utils.GraphContext, self): + if _type_utils.JitScalarType.from_value( + self, _type_utils.JitScalarType.UNDEFINED + ) in { + _type_utils.JitScalarType.UINT8, + _type_utils.JitScalarType.INT8, + _type_utils.JitScalarType.INT16, + _type_utils.JitScalarType.INT, + _type_utils.JitScalarType.INT64, + }: + # torch converts all int inputs to sqrt to float + self = g.op("Cast", self, to_i=_C_onnx.TensorProtoDataType.FLOAT) + + return g.op("Sqrt", self) + + +@_onnx_symbolic("aten::rsqrt") +@_beartype.beartype +def rsqrt(g: jit_utils.GraphContext, self): + return g.op( + "Div", symbolic_helper._if_scalar_type_as(torch.ones(1), self), sqrt(g, self) + ) + + +@_onnx_symbolic("aten::tanh") +# Fixed scale and zero_point, discovered from aten/src/ATen/native/quantized/cpu/qtanh.cpp +@symbolic_helper.quantized_args(True, scale=2.0 / 256.0, zero_point=128) +@_beartype.beartype +def tanh(g: jit_utils.GraphContext, self): + return g.op("Tanh", self) + + +@_onnx_symbolic("aten::sin") +@_beartype.beartype +def sin(g: jit_utils.GraphContext, self): + return g.op("Sin", self) + + +@_onnx_symbolic("aten::cos") +@_beartype.beartype +def cos(g: jit_utils.GraphContext, self): + return g.op("Cos", self) + + +@_onnx_symbolic("aten::tan") +@_beartype.beartype +def tan(g: jit_utils.GraphContext, self): + return g.op("Tan", self) + + +@_onnx_symbolic("aten::asin") +@_beartype.beartype +def asin(g: jit_utils.GraphContext, self): + return g.op("Asin", self) + + +@_onnx_symbolic("aten::acos") +@_beartype.beartype +def acos(g: jit_utils.GraphContext, self): + return g.op("Acos", self) + + +@_onnx_symbolic("aten::atan") +@_beartype.beartype +def atan(g: jit_utils.GraphContext, self): + return g.op("Atan", self) + + +@_onnx_symbolic("aten::atan2") +@_beartype.beartype +def atan2(g: jit_utils.GraphContext, self, other): + # self is y, and other is x on coordinate + slope = g.op("Div", self, other) + atan = g.op("Atan", slope) + const_zero = g.op("Constant", value_t=torch.tensor(0)) + const_pi = g.op("Constant", value_t=torch.tensor(math.pi)) + + condition_second_or_third_quadrant = g.op("Greater", self, const_zero) + second_third_quadrant = g.op( + "Where", + condition_second_or_third_quadrant, + g.op("Add", atan, const_pi), + g.op("Sub", atan, const_pi), + ) + + condition_14_or_23_quadrant = g.op("Less", other, const_zero) + result = g.op("Where", condition_14_or_23_quadrant, second_third_quadrant, atan) + + return result + + +@_onnx_symbolic("aten::sigmoid") +# Fixed scale and zero_point, discovered from aten/src/ATen/native/quantized/cpu/qsigmoid.cpp +@symbolic_helper.quantized_args(True, scale=1.0 / 256.0, zero_point=0) +@_beartype.beartype +def sigmoid(g: jit_utils.GraphContext, self): + return g.op("Sigmoid", self) + + +@_onnx_symbolic("aten::sign") +@_beartype.beartype +def sign(g: jit_utils.GraphContext, self): + return g.op("Sign", self) + + +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def _slice(g: jit_utils.GraphContext, input, axes, starts, ends): + assert len(starts) == len(ends) + if len(starts) == 1 and starts[0] == 0 and ends[0] == _constants.INT64_MAX: + return input + return g.op("Slice", input, axes_i=axes, starts_i=starts, ends_i=ends) + + +@_beartype.beartype +def _maybe_cast_reduce_op_input(g: jit_utils.GraphContext, self): + scalar_type = _type_utils.JitScalarType.from_value( + self, _type_utils.JitScalarType.UNDEFINED + ) + if scalar_type != _type_utils.JitScalarType.UNDEFINED: + # This check only covers traced modules where dtype is present + # pytorch reduce-ops cast all other integral types to int64 + if ( + not symbolic_helper._is_fp(self) + and scalar_type != _type_utils.JitScalarType.INT64 + ): + self = g.op("Cast", self, to_i=_C_onnx.TensorProtoDataType.INT64) + return self + + +@_beartype.beartype +def _reduce_op_symbolic(onnx_op_name, allow_multi_dim_support=True): + @_beartype.beartype + def symbolic(g, self, dim=None, keepdim=None): + self = _maybe_cast_reduce_op_input(g, self) + if dim is None or dim == tuple(): + # Dim can be 0, which will cause (not dim) == True. So we don't want to do + # (not dim) + # all-reduce path + return symbolic_helper._handle_reduce_dim_none(g, self, onnx_op_name) + else: + # dim-reduce path + desc = "is" if allow_multi_dim_support else "i" + dim, keepdim = symbolic_helper._get_const( + dim, desc, "dim" + ), symbolic_helper._get_const(keepdim, "i", "keepdim") + dim_list = dim if allow_multi_dim_support else [dim] + return g.op(onnx_op_name, self, axes_i=dim_list, keepdims_i=keepdim) + + return symbolic + + +@_beartype.beartype +def overload_by_arg_count(fn): + @functools.wraps(fn) + @_beartype.beartype + def wrapper(g, *args): + overloads = fn(g, *args) + for overload in overloads: + arg_descriptors = overload._arg_descriptors + if len(arg_descriptors) == len(args): + return overload(g, *args) + return symbolic_helper._unimplemented( + f"aten::{fn.__name__}", f"with {len(args)} arguments" + ) + + return wrapper + + +@_onnx_symbolic("aten::sum", decorate=[_apply_params("ReduceSum", "sum")]) +@_onnx_symbolic("aten::mean", decorate=[_apply_params("ReduceMean", "mean")]) +# torch.prod does not support multidimensional "dim" +@_onnx_symbolic( + "aten::prod", + decorate=[_apply_params("ReduceProd", "prod", allow_multi_dim_support=False)], +) +@_beartype.beartype +def _reduce_with_dtype(onnx_op: str, name: str, allow_multi_dim_support: bool = True): + symbolic = _reduce_op_symbolic( + onnx_op, allow_multi_dim_support=allow_multi_dim_support + ) + + @overload_by_arg_count + def reduce(g, *args, **kwargs): + @symbolic_helper.quantized_args(True) + @symbolic_helper.parse_args("v", "none") + def reduce_nodim(g, self, dtype): + dtype_onnx = None + if dtype.node().kind() == "onnx::Constant": + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + dtype_onnx = _type_utils.JitScalarType(dtype).onnx_type() + self = g.op("Cast", self, to_i=dtype_onnx) + elif dtype.node().kind() != "prim::Constant": + return symbolic_helper._unimplemented(name, "dtype", dtype) + result = symbolic(g, self) + if dtype_onnx is not None: + result_dtype_onnx = _type_utils.JitScalarType.from_value( + result + ).onnx_type() + if result_dtype_onnx != dtype_onnx: + result = g.op("Cast", result, to_i=dtype_onnx) + return result + + dim_desc = "is" if allow_multi_dim_support else "i" + + @symbolic_helper.quantized_args(True) + @symbolic_helper.parse_args("v", dim_desc, "i", "none") # type: ignore[arg-type] + def reduce_dim(g, self, dim, keepdim, dtype): + dtype_onnx = None + if dtype.node().kind() == "onnx::Constant": + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + dtype_onnx = _type_utils.JitScalarType(dtype).onnx_type() + self = g.op("Cast", self, to_i=dtype_onnx) + elif dtype.node().kind() != "prim::Constant": + return symbolic_helper._unimplemented(name, "dtype", dtype) + result = symbolic(g, self, dim, keepdim) + if dtype_onnx is not None: + result_dtype_onnx = _type_utils.JitScalarType.from_value( + result + ).onnx_type() + if result_dtype_onnx != dtype_onnx: + result = g.op("Cast", result, to_i=dtype_onnx) + return result + + return reduce_nodim, reduce_dim + + return reduce + + +@_onnx_symbolic("aten::cumsum") +@symbolic_helper.parse_args("v", "i", "none") +@_beartype.beartype +def cumsum(g: jit_utils.GraphContext, input, dim, dtype): + if symbolic_helper.is_caffe2_aten_fallback(): + if dtype.node().kind() != "prim::Constant": + return symbolic_helper._unimplemented("cumsum", "dtype", dtype) + return g.at("cumsum", input, dim_i=dim) + + symbolic_helper._onnx_opset_unsupported("cumsum", 9, 11, input) + + +@_onnx_symbolic("aten::_sample_dirichlet") +@_beartype.beartype +def _sample_dirichlet(g: jit_utils.GraphContext, self, generator): + if symbolic_helper.is_caffe2_aten_fallback(): + if not symbolic_helper._is_none(generator): + return symbolic_helper._unimplemented( + "_sample_dirichlet", "We are not able to export generator", self + ) + return g.at("_sample_dirichlet", self) + return symbolic_helper._onnx_unsupported("_sample_dirichlet", self) + + +@_onnx_symbolic("aten::_standard_gamma") +@_beartype.beartype +def _standard_gamma(g: jit_utils.GraphContext, self, generator): + if symbolic_helper.is_caffe2_aten_fallback(): + if not symbolic_helper._is_none(generator): + return symbolic_helper._unimplemented( + "_standard_gamma", "not able to export generator", self + ) + return g.at("_standard_gamma", self) + + return symbolic_helper._onnx_unsupported("_standard_gamma", self) + + +@_onnx_symbolic("aten::t") +@_beartype.beartype +def t(g: jit_utils.GraphContext, self): + rank = symbolic_helper._get_tensor_rank(self) + if rank is None or rank < 2: + # The transpose of a 1d or 0d tensor is itself. ONNX does not define the behavior + # clearly and onnxruntime fails on these cases. So we add an Identity node to + # mirror the behavior of eager mode. + return g.op("Identity", self) + return g.op("Transpose", self, perm_i=(1, 0)) + + +@_onnx_symbolic("aten::numpy_T") +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def numpy_T(g: jit_utils.GraphContext, input): + ndim = symbolic_helper._get_tensor_rank(input) + assert ndim is not None + perm = list(reversed(range(0, ndim))) + return g.op("Transpose", input, perm_i=perm) + + +@_onnx_symbolic("aten::expand") +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def expand(g: jit_utils.GraphContext, self, size, implicit): + size = symbolic_helper._maybe_get_const(size, "is") + if not symbolic_helper._is_value(size): + size = g.op("Constant", value_t=torch.LongTensor(size)) + elif symbolic_helper._is_packed_list(size): + # Expand with -1 dim value means dim is unchanged. + # Since onnx::expand supports two-way broadcasting, + # -1 dim value can be exported to onnx as 1 + size = symbolic_helper._reshape_helper( + g, stack(g, size, 0), g.op("Constant", value_t=torch.tensor([-1])) + ) + dtype = _type_utils.JitScalarType.INT64 + ones = ones_like(g, size, dtype) + neg_ones = mul(g, ones, g.op("Constant", value_t=torch.tensor(-1))) + size = where(g, g.op("Equal", size, neg_ones), ones, size) + return g.op("Expand", self, size) + + +@_onnx_symbolic("aten::broadcast_to") +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def broadcast_to(g: jit_utils.GraphContext, self, size): + size = symbolic_helper._maybe_get_const(size, "is") + if not symbolic_helper._is_value(size): + size = g.op("Constant", value_t=torch.LongTensor(size)) + elif symbolic_helper._is_packed_list(size): + # Expand with -1 dim value means dim is unchanged. + # Since onnx::expand supports two-way broadcasting, + # -1 dim value can be exported to onnx as 1 + size = symbolic_helper._reshape_helper( + g, stack(g, size, 0), g.op("Constant", value_t=torch.tensor([-1])) + ) + dtype = _type_utils.JitScalarType.INT64 + ones = ones_like(g, size, dtype) + neg_ones = mul(g, ones, g.op("Constant", value_t=torch.tensor(-1))) + size = where(g, g.op("Equal", size, neg_ones), ones, size) + return g.op("Expand", self, size) + + +@_onnx_symbolic("aten::expand_as") +@symbolic_helper.quantized_args(True, True) +@_beartype.beartype +def expand_as(g: jit_utils.GraphContext, self, other): + self_t = symbolic_helper._maybe_get_const(self, "t") + if isinstance(self_t, torch.Tensor): + orig_type = self_t.dtype + self_t = self_t.to(torch.double) + dims = [] + for d in range(self_t.dim()): + if torch.equal(self_t.mean(d).unsqueeze(d).expand_as(self_t), self_t): + dims.append(d) + self = g.op( + "Constant", value_t=self_t.mean(dims, keepdim=True).to(orig_type) + ) + + shape = g.op("Shape", other) + return g.op("Expand", self, shape) + + +@_onnx_symbolic("aten::embedding") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "v", "i", "b", "v") +@_beartype.beartype +def embedding( + g: jit_utils.GraphContext, + weight, + indices, + padding_idx, + scale_grad_by_freq, + sparse, +): + if scale_grad_by_freq and GLOBALS.export_training: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of embedding with scale_grad_by_freq=True " + "for training mode. ONNX does not support scaling the gradients.", + weight, + ) + if padding_idx >= 0 and GLOBALS.export_training: + warnings.warn( + "Warning: ONNX export of embedding with padding_idx >= 0 " + "for training mode. " + "ONNX does not support not updating the embedding vector at padding_idx during training." + ) + + return g.op("Gather", weight, indices) + + +@_onnx_symbolic("aten::embedding_bag") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "v", "v", "i", "i", "i", "v", "i", "i") +@_beartype.beartype +def embedding_bag( + g: jit_utils.GraphContext, + embedding_matrix, + indices, + offsets, + scale_grad_by_freq, + mode, + sparse, + per_sample_weights, + include_last_offset, + padding_idx, +): + if not symbolic_helper._is_none(per_sample_weights): + return symbolic_helper._onnx_unsupported( + "embedding_bag with per_sample_weights" + ) + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at( + "embedding_bag", + embedding_matrix, + indices, + offsets, + outputs=4, + scale_grad_by_freq_i=scale_grad_by_freq, + mode_i=mode, + sparse_i=sparse, + include_last_offset_i=include_last_offset, + padding_idx_i=padding_idx, + ) + + return symbolic_helper._onnx_unsupported("embedding_bag", embedding_matrix) + + +@_onnx_symbolic("aten::size") +@symbolic_helper.quantized_args(True, quantize_output=False) +@_beartype.beartype +def size(g: jit_utils.GraphContext, self, dim=None): + if dim is None: + return g.op("Shape", self) + if symbolic_helper._maybe_get_const(dim, "i") < 0: + rank = symbolic_helper._get_tensor_rank(self) + if rank is not None: + dim = symbolic_helper._maybe_get_const(dim, "i") + rank + dim = g.op("Constant", value_t=torch.tensor(dim)) + return symbolic_helper._size_helper(g, self, dim) + + +@_onnx_symbolic("aten::transpose") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "i", "i") +@_beartype.beartype +def transpose(g: jit_utils.GraphContext, self, dim0, dim1): + if dim0 == dim1: # micro-optimization + return self + + # NB: Transpose in ONNX is actually a Permute + rank = symbolic_helper._get_tensor_rank(self) + if rank is not None: + axes = list(range(rank)) + axes[dim0], axes[dim1] = axes[dim1], axes[dim0] + return g.op("Transpose", self, perm_i=axes) + elif symbolic_helper.is_caffe2_aten_fallback(): + # if we don't have dim information we cannot + # output a permute so use ATen instead + return g.at("transpose", self, overload_name="int", dim0_i=dim0, dim1_i=dim1) + else: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of transpose for tensor of unknown rank.", + self, + ) + + +@_onnx_symbolic("aten::permute") +@symbolic_helper.parse_args("v", "is") +@_beartype.beartype +def permute(g: jit_utils.GraphContext, self, dims): + if dims == list(range(0, len(dims))): + return self + return g.op("Transpose", self, perm_i=dims) + + +@_onnx_symbolic("aten::view") +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def view(g: jit_utils.GraphContext, self, size): + return reshape(g, self, size) + + +@_onnx_symbolic("aten::view_as") +@_beartype.beartype +def view_as(g: jit_utils.GraphContext, self, other): + shape = g.op("Shape", other) + return reshape(g, self, shape) + + +@_onnx_symbolic("aten::unsafe_chunk") +@symbolic_helper.parse_args("v", "i", "i", "i") +@_beartype.beartype +def unsafe_chunk(g: jit_utils.GraphContext, self, chunks, dim, _outputs=None): + if _outputs is None: + return symbolic_helper._onnx_opset_unsupported_detailed( + "unsafe_chunk", 9, 11, "Dynamic number of outputs not supported", self + ) + size = symbolic_helper._get_tensor_dim_size(self, dim) + if size is None: + return symbolic_helper._unimplemented( + "unsafe_chunk", "unknown dimension size", self + ) + split_size = (size + chunks - 1) // chunks + splits = [split_size] * (size // split_size) + leftover = size % split_size + if leftover: + splits.append(leftover) + return g.op("Split", self, split_i=splits, axis_i=dim, outputs=_outputs) + + +@_onnx_symbolic("aten::split") +@symbolic_helper.parse_args("v", "v", "i", "i") +@_beartype.beartype +def split(g: jit_utils.GraphContext, self, split_size_or_sizes, dim, _outputs=None): + if not symbolic_helper._is_split_static(split_size_or_sizes, _outputs): + return symbolic_helper._onnx_opset_unsupported_detailed( + "split", 9, 11, "Dynamic number of outputs not supported", self + ) + split_val = symbolic_helper._node_get(split_size_or_sizes.node(), "value") + if split_val.dim() > 0: + return split_with_sizes(g, self, split_size_or_sizes, dim, _outputs) + split_size = symbolic_helper._get_const(split_size_or_sizes, "i", "split_size") + + size = symbolic_helper._get_tensor_dim_size(self, dim) + if size is None: + if _outputs is not None: + size = split_size * _outputs + else: + return symbolic_helper._onnx_opset_unsupported_detailed( + "split", 9, 11, "Unknown dimension size not supported", self + ) + splits = [split_size] * (size // split_size) + leftover = size % split_size + if leftover: + splits.append(leftover) + return g.op("Split", self, split_i=splits, axis_i=dim, outputs=_outputs) + + +@_onnx_symbolic("aten::unsafe_split") +@_beartype.beartype +def unsafe_split( + g: jit_utils.GraphContext, self, split_size_or_sizes, dim, _outputs=None +): + return split(g, self, split_size_or_sizes, dim, _outputs) + + +@_onnx_symbolic("aten::split_with_sizes") +@symbolic_helper.parse_args("v", "is", "i", "i") +@_beartype.beartype +def split_with_sizes(g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None): + if not symbolic_helper._is_split_static(split_sizes, _outputs): + return symbolic_helper._onnx_opset_unsupported_detailed( + "split_with_sizes", 9, 11, "Dynamic number of outputs not supported", self + ) + return g.op("Split", self, split_i=split_sizes, axis_i=dim, outputs=_outputs) + + +@_onnx_symbolic("aten::unsafe_split_with_sizes") +@_beartype.beartype +def unsafe_split_with_sizes( + g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None +): + return split_with_sizes(g, self, split_sizes, dim, _outputs) + + +@_onnx_symbolic("aten::unbind") +@symbolic_helper.parse_args("v", "i", "i") +@_beartype.beartype +def unbind(g: jit_utils.GraphContext, self, dim=0, _outputs=None): + if _outputs is None: + return symbolic_helper._onnx_opset_unsupported_detailed( + "unbind", 9, 11, "Dynamic number of outputs not supported", self + ) + + outputs = g.op("Split", self, split_i=[1] * _outputs, axis_i=dim, outputs=_outputs) + outputs = [outputs] if _outputs == 1 else outputs + squeezed_outputs = [ + symbolic_helper._squeeze_helper(g, out, [dim]) for out in outputs + ] + return squeezed_outputs + + +@_onnx_symbolic("aten::select") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "i", "v") +@_beartype.beartype +def select(g: jit_utils.GraphContext, self, dim, index): + index = symbolic_helper._maybe_get_scalar(index) + if (not symbolic_helper._is_value(index)) and (index < 0): + if index == -1: + end_index = _constants.INT64_MAX + else: + end_index = index + 1 + slice_node = symbolic_helper._slice_helper( + g, self, axes=[dim], starts=[index], ends=[end_index] + ) + return symbolic_helper._squeeze_helper(g, slice_node, [dim]) + else: + # FIXME(justinchuby): can index be an int and not a value? + return g.op("Gather", self, index, axis_i=dim) + + +@_onnx_symbolic("aten::square") +@_beartype.beartype +def square(g: jit_utils.GraphContext, self): + return g.op("Mul", self, self) + + +@_onnx_symbolic("aten::squeeze") +@_beartype.beartype +def squeeze(g: jit_utils.GraphContext, self, dim=None): + if dim is None: + return g.op("Squeeze", self) + + squeeze_dim = symbolic_helper._get_const(dim, "i", "dim") + # Handle negative dims + if squeeze_dim < 0: + rank = symbolic_helper._get_tensor_rank(self) + if rank is not None: + warnings.warn( + "ONNX export squeeze with negative axis " + + str(squeeze_dim) + + " might cause the onnx model to be incorrect. " + + "Negative axis is not supported in ONNX. " + + "Axis is converted to " + + str(squeeze_dim + rank) + + " based on input shape at export time. " + + "Passing an tensor of different rank in execution will be incorrect." + ) + squeeze_dim += rank + else: + return symbolic_helper._unimplemented( + "squeeze", "negative axis with unknown input rank", self + ) + + dim_size = symbolic_helper._get_tensor_dim_size(self, squeeze_dim) + if dim_size is None: + warnings.warn( + "This model contains a squeeze operation on dimension " + + str(squeeze_dim) + + " on an input " + + "with unknown shape. Note that if the size of dimension " + + str(squeeze_dim) + + " of the input " + + "is not 1, the ONNX model will return an error. Opset version 11 supports squeezing on " + + "non-singleton dimensions, it is recommended to export this model using opset " + + "version 11 or higher." + ) + return symbolic_helper._squeeze_helper(g, self, axes_i=[squeeze_dim]) + if dim_size > 1: + warnings.warn( + "This model contains a squeeze operation on dimension " + + str(squeeze_dim) + + ". The size of " + + "this dimension in the given input is " + + str(dim_size) + + ". The model will " + + "be exported without the squeeze node. If the model is intended to be used with dynamic " + + "input shapes, please use opset version 11 to " + + "export the model." + ) + return self + + warnings.warn( + "This model contains a squeeze operation on dimension " + + str(squeeze_dim) + + ". If the model is " + + "intended to be used with dynamic input shapes, please use opset version 11 to export the model." + ) + return symbolic_helper._squeeze_helper(g, self, axes_i=[squeeze_dim]) + + +@_onnx_symbolic("aten::prelu") +@_beartype.beartype +def prelu(g: jit_utils.GraphContext, self, weight): + self_rank = symbolic_helper._get_tensor_rank(self) + weight_sizes = symbolic_helper._get_tensor_sizes(weight) + weight_rank = len(weight_sizes) + if self_rank is not None: + if self_rank > 2: + # make weight unidirectional broadcastable + weight = symbolic_helper._unsqueeze_helper( + g, weight, list(range(1, self_rank - 1)) + ) + elif self_rank == 0 and weight_sizes == [1]: + # self and weight are both scalar but weight has rank == 1, squeeze weight. + weight = symbolic_helper._squeeze_helper(g, weight, [0]) + weight_rank = 0 + + if self_rank is not None and weight_rank is not None: + assert ( + self_rank >= weight_rank + ), f"rank(x) should be >= rank(slope) but got {self_rank} < {weight_rank}" + return g.op("PRelu", self, weight) + + +@_onnx_symbolic("aten::silu") +@_beartype.beartype +def silu(g: jit_utils.GraphContext, input): + return g.op("Mul", input, g.op("Sigmoid", input)) + + +@_onnx_symbolic("aten::mish") +@_beartype.beartype +def mish(g: jit_utils.GraphContext, input): + return g.op("Mul", input, g.op("Tanh", g.op("Softplus", input))) + + +@_beartype.beartype +def _op_with_optional_float_cast(g: jit_utils.GraphContext, op_name, *args, **kwargs): + """Some PyTorch operators (e.g., Clip/Min/ReLU/Pad) are super set of ONNX in terms of data types. + This function maximizes the exportability of PyTorch-ONNX by allowing ONNX-unsupported PyTorch + operator data type. For example, `Cast(Clip(Cast(INPUT)))` can be used to mimic + `Clip(INPUT)` (opset version < 12). + + Args: + g (torch._C.Graph): graph to write the ONNX representation into. + op_name (str): operator name in ONNX. + *args (tuple): operands to the operator. + **kwargs (dict): attributes to the operator along with "opset_before" (optional, None by default) + indicating the smallest opset version to trigger such casting behavior and "target_float_t" + (optional, torch.onnx.JitScalarType.FLOAT by default) indicating the data type of internal operator. + + Returns: + Optional[torch._C.Value, Tuple[torch._C.Value, ...]]: output(s) of the operator. + """ + opset_before = kwargs.pop("opset_before", None) + target_float_t = kwargs.pop("target_float_t", _type_utils.JitScalarType.FLOAT) + + inputs = list(args) + dtype_0 = _type_utils.JitScalarType.from_value(inputs[0]) + + require_cast = not symbolic_helper._is_fp(inputs[0]) and ( + opset_before is None or GLOBALS.export_onnx_opset_version < opset_before + ) + + if require_cast: + for input in inputs: + if input.isCompleteTensor(): + input_scalar_type = _type_utils.JitScalarType.from_value(input) + if input_scalar_type != dtype_0: + raise errors.SymbolicValueError( + f"Inputs of {op_name} must have same dtype." + f"Got {dtype_0.scalar_name()} and {input_scalar_type.scalar_name()}", + input, + ) + for i, input in enumerate(inputs): + if input.isCompleteTensor() and not symbolic_helper._is_fp(input): + inputs[i] = g.op( + "Cast", + input, + to_i=target_float_t.onnx_type(), + ) + + self = g.op(op_name, *inputs, **kwargs) + + if require_cast: + self = g.op("Cast", self, to_i=dtype_0.onnx_type()) + + return self + + +@_onnx_symbolic("aten::relu") +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def relu(g: jit_utils.GraphContext, input): + return _op_with_optional_float_cast(g, "Relu", input, opset_before=14) + + +@_onnx_symbolic("aten::relu6") +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def relu6(g: jit_utils.GraphContext, input): + return clamp(g, input, 0, 6) + + +@_onnx_symbolic("aten::ceil") +@_beartype.beartype +def ceil(g: jit_utils.GraphContext, input): + return g.op("Ceil", input) + + +@_onnx_symbolic("aten::floor") +@_beartype.beartype +def floor(g: jit_utils.GraphContext, input): + return g.op("Floor", input) + + +@_onnx_symbolic("aten::len") +@_beartype.beartype +def _len(g: jit_utils.GraphContext, self): + sz_0 = size(g, self, g.op("Constant", value_t=torch.LongTensor([0]))) + return symbolic_helper._squeeze_helper(g, sz_0, [0]) + + +@_onnx_symbolic("aten::threshold") +@symbolic_helper.parse_args("v", "t", "t") +@_beartype.beartype +def threshold(g: jit_utils.GraphContext, self, threshold, value): + # See Note [Export inplace] + if symbolic_helper._scalar(threshold) != 0: + return symbolic_helper._unimplemented("threshold", "non-zero threshold", self) + if symbolic_helper._scalar(value) != 0: + return symbolic_helper._unimplemented("threshold", "non-zero value", self) + return g.op("Relu", self) + + +@_onnx_symbolic("aten::leaky_relu") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "f", "b") +@_beartype.beartype +def leaky_relu( + g: jit_utils.GraphContext, + input: _C.Value, + negative_slope: float, + inplace: bool = False, +): + # See Note [Export inplace] + return g.op("LeakyRelu", input, alpha_f=negative_slope) + + +@_onnx_symbolic("aten::glu") +@symbolic_helper.parse_args("v", "i") +@_beartype.beartype +def glu(g: jit_utils.GraphContext, input, dim): + dim_size = symbolic_helper._get_tensor_dim_size(input, dim) + if dim_size is not None: + assert dim_size % 2 == 0 + + first, second = g.op("Split", input, axis_i=dim, outputs=2) + return g.op("Mul", first, g.op("Sigmoid", second)) + + +@_onnx_symbolic("aten::softmax") +@symbolic_helper.parse_args("v", "i", "none") +@_beartype.beartype +def softmax(g: jit_utils.GraphContext, input, dim, dtype=None): + # Softmax does normalization at vector level. + # PyTorch and ONNX use different strategies to split the input tensor into vectors. + # Thus dim and axis have different meanings. + # PyTorch slices the input tensor into vectors along the `dim`-th dimension. + # ONNX reshapes the input into a 2-D tensor, and `axis` indicates where the input is coerced. + # If input is a 2 x 3 tensor: + # input = [[1.0, 1.0, 1.0], + # [1.0, 1,0, 1,0]] + # with dim = 0, the result is: + # result = [[0.5, 0.5, 0.5], + # [0.5, 0.5, 0.5]] + # with axis = 0, the result is: + # result = [[0.167, 0.167, 0.167], + # [0.167, 0.167, 0.167]] + # So only when dim and axis both equal to ndim - 1 (the last dimension), + # their semantics are equivalent. + # So use softmax when dim and axis both equal to ndim - 1, + # otherwise transpose the input to put the vectors to be normalized to the last dimension. + # When input rank is not known at export time we compute softmax using a subgraph + # with other operators + input_dim = symbolic_helper._get_tensor_rank(input) + if input_dim is not None: + # TODO: remove this as onnx opset 11 spec allows negative axes + if dim < 0: + dim = input_dim + dim + + is_transpose_required = input_dim != dim + 1 + + if is_transpose_required: + axes = list(range(input_dim)) + axes[dim], axes[-1] = axes[-1], axes[dim] + input = g.op("Transpose", input, perm_i=axes) + dim = input_dim - 1 + + softmax = g.op("Softmax", input, axis_i=dim) + if dtype and dtype.node().kind() != "prim::Constant": + parsed_dtype = symbolic_helper._get_const(dtype, "i", "dtype") + softmax = g.op( + "Cast", + softmax, + to_i=_type_utils.JitScalarType(parsed_dtype).onnx_type(), + ) + + if is_transpose_required: + softmax = g.op("Transpose", softmax, perm_i=axes) + return softmax + + # Apply max normalization. + input = g.op("Sub", input, g.op("ReduceMax", input, axes_i=[dim], keepdims_i=1)) + + exp = g.op("Exp", input) + sum = symbolic_helper._reducesum_helper(g, exp, axes_i=[dim]) + softmax = g.op("Div", exp, sum) + if dtype and dtype.node().kind() != "prim::Constant": + parsed_dtype = symbolic_helper._get_const(dtype, "i", "dtype") + softmax = g.op( + "Cast", softmax, to_i=_type_utils.JitScalarType(parsed_dtype).onnx_type() + ) + return softmax + + +@_onnx_symbolic("aten::softplus") +@_beartype.beartype +def softplus(g: jit_utils.GraphContext, self, beta, threshold): + beta_const = symbolic_helper._maybe_get_const(beta, "f") + if beta_const != 1: + return g.op("Div", g.op("Softplus", g.op("Mul", self, beta)), beta) + return g.op("Softplus", self) + + +@_onnx_symbolic("aten::get_pool_ceil_padding") +@_beartype.beartype +def get_pool_ceil_padding(input, kernel_size, stride, padding): + # TODO(justinchuby): Looks like this op is deprecated in torch + sizes = symbolic_helper._get_tensor_sizes(input) + dim = sizes[-len(padding) :] if sizes is not None else None + if dim is None or any(i is None for i in dim): + return symbolic_helper._unimplemented( + "get_pool_ceil_padding", "input size not accessible", input + ) + ceiled_output_dim = [ + int(math.ceil((dim[i] + 2 * padding[i] - kernel_size[i]) / float(stride[i]))) + + 1 + for i in range(0, len(padding)) + ] + # ensure last pooling starts inside + ceiled_output_dim = [ + ceiled_output_dim[i] - 1 + if (((ceiled_output_dim[i] - 1) * stride[i]) >= (dim[i] + padding[i])) + else ceiled_output_dim[i] + for i in range(0, len(ceiled_output_dim)) + ] + padding_ceil = [ + 0 + if (stride[i] == 1) + else ( + kernel_size[i] + - (dim[i] + 2 * padding[i] - ((ceiled_output_dim[i] - 1) * stride[i] + 1)) + ) + for i in range(0, len(padding)) + ] + # ensure padding is not > kernel_size + padding_ceil = [ + ( + int(padding_ceil[i]) + if padding_ceil[i] < kernel_size[i] - 1 + else int(kernel_size[i] - 1) + ) + if ((padding_ceil[i] + 2 * padding[i]) >= (kernel_size[i])) + else int(padding_ceil[i]) + for i in range(0, len(padding_ceil)) + ] + return padding_ceil + + +@_onnx_symbolic( + "aten::max_pool1d", + decorate=[ + _apply_params( + "max_pool1d", torch.nn.modules.utils._single, 1, return_indices=False + ), + _export("max_pool1d"), + ], +) +@_onnx_symbolic( + "aten::max_pool2d", + decorate=[ + _apply_params( + "max_pool2d", torch.nn.modules.utils._pair, 2, return_indices=False + ), + _export("max_pool2d"), + ], +) +@_onnx_symbolic( + "aten::max_pool3d", + decorate=[ + _apply_params( + "max_pool3d", torch.nn.modules.utils._triple, 3, return_indices=False + ), + _export("max_pool3d"), + ], +) +@_beartype.beartype +def _max_pool(name, tuple_fn, ndims, return_indices): + @symbolic_helper.quantized_args(True, False, False, False, False, False) + @symbolic_helper.parse_args("v", "is", "is", "is", "is", "i") + @_beartype.beartype + def symbolic_fn(g, input, kernel_size, stride, padding, dilation, ceil_mode): + if set(tuple_fn(dilation)) != {1}: + return symbolic_helper._unimplemented(name, "dilation", input) + if not stride: + stride = kernel_size + padding = tuple(tuple_fn(padding)) + if ceil_mode: + padding_ceil = get_pool_ceil_padding(input, kernel_size, stride, padding) + padding = padding + tuple(a + b for (a, b) in zip(padding_ceil, padding)) + else: + padding = padding * 2 + kwargs = { + "kernel_shape_i": tuple_fn(kernel_size), + "pads_i": padding, + "strides_i": tuple_fn(stride), + } + # easy but hacky way to get flattened indices values + # to be used to convert the indices values to non-flattened. + # In ONNX the indices are computed as a flatten 1-D tensor, + # so the values in indices are in [0, N x C x D1 x ... x Dn). + # To convert the indices to the same format used by Pytorch, + # we first execute a maxpool with a kernel and stride of 1 on the same input. + # This will result in a tensor of indices in which each index will have it's own value. + # Using this tensor as a reference, we extract the first index of each axis and subtract + # it from each index of this axis in the indices to convert. + # This step will result in a tensor were each dimension has values of indices within + # the dimension it is in. + # For more information : + # https://github.com/pytorch/pytorch/pull/16455#issuecomment-460776407 + if return_indices: + r, indices = g.op("MaxPool", input, outputs=2, **kwargs) + _, flattened_indices = g.op( + "MaxPool", + input, + outputs=2, + kernel_shape_i=[1 for _ in range(ndims)], + strides_i=[1 for _ in range(ndims)], + ) + # convert indices to have non-flattened indices values + s = symbolic_helper._slice_helper( + g, + flattened_indices, + axes=[2 + i for i in range(ndims)], + starts=list(tuple_fn(0)), + ends=list(tuple_fn(1)), + ) + indices = sub(g, indices, s) + return r, indices + else: + r = g.op("MaxPool", input, outputs=1, **kwargs) + return r + + return symbolic_fn + + +max_pool1d_with_indices = _onnx_symbolic("aten::max_pool1d_with_indices")( + _max_pool( + "max_pool1d_with_indices", + torch.nn.modules.utils._single, + 1, + return_indices=True, + ) +) +max_pool2d_with_indices = _onnx_symbolic("aten::max_pool2d_with_indices")( + _max_pool( + "max_pool2d_with_indices", + torch.nn.modules.utils._pair, + 2, + return_indices=True, + ) +) +max_pool3d_with_indices = _onnx_symbolic("aten::max_pool3d_with_indices")( + _max_pool( + "max_pool3d_with_indices", + torch.nn.modules.utils._triple, + 3, + return_indices=True, + ) +) + + +@_onnx_symbolic( + "aten::avg_pool1d", + decorate=[ + _apply_params("avg_pool1d", torch.nn.modules.utils._single), + _export("avg_pool1d"), + ], +) +@_onnx_symbolic( + "aten::avg_pool2d", + decorate=[ + _apply_params("avg_pool2d", torch.nn.modules.utils._pair), + _export("avg_pool2d"), + ], +) +@_onnx_symbolic( + "aten::avg_pool3d", + decorate=[ + _apply_params("avg_pool3d", torch.nn.modules.utils._triple), + _export("avg_pool3d"), + ], +) +@_beartype.beartype +def _avg_pool(name, tuple_fn): + @symbolic_helper.quantized_args(True) + @symbolic_helper.parse_args("v", "is", "is", "is", "i", "i", "none") + @_beartype.beartype + def symbolic_fn( + g, + input: _C.Value, + kernel_size: Sequence[int], + stride: Sequence[int], + padding: Union[int, Sequence[int]], + ceil_mode: int, + count_include_pad: int, + divisor_override=None, + ): + if not stride: + stride = kernel_size + padding = symbolic_helper._avgpool_helper( + tuple_fn, padding, kernel_size, stride, divisor_override, name + ) + assert isinstance(padding, tuple) + adjusted_padding = padding + # Although onnx::AvgPool provides count_include_pad, + # The corner case of Average Pooling with ceil_mode on + # PyTorch allows sliding window go off bound, which leads to + # this accommodation. + # More detail on https://github.com/pytorch/pytorch/issues/57178 + if count_include_pad: + input = _op_with_optional_float_cast( + g, + "Pad", + input, + pads_i=((0,) * 2 + padding) * 2, + mode_s="constant", + value_f=0.0, + opset_before=11, + ) + adjusted_padding = (0,) * len(padding) + if ceil_mode: + padding_ceil = get_pool_ceil_padding(input, kernel_size, stride, padding) + adjusted_padding = adjusted_padding + tuple( + a + b for (a, b) in zip(padding_ceil, adjusted_padding) + ) + else: + adjusted_padding = adjusted_padding * 2 + output = g.op( + "AveragePool", + input, + kernel_shape_i=tuple_fn(kernel_size), + strides_i=tuple_fn(stride), + pads_i=adjusted_padding, + ) + return output + + return symbolic_fn + + +@_onnx_symbolic( + "aten::adaptive_avg_pool1d", + decorate=[ + _apply_params( + "adaptive_avg_pool1d", "AveragePool", torch.nn.modules.utils._single + ), + _export("adaptive_avg_pool1d"), + ], +) +@_onnx_symbolic( + "aten::adaptive_avg_pool2d", + decorate=[ + _apply_params( + "adaptive_avg_pool2d", "AveragePool", torch.nn.modules.utils._pair + ), + _export("adaptive_avg_pool2d"), + ], +) +@_onnx_symbolic( + "aten::adaptive_avg_pool3d", + decorate=[ + _apply_params( + "adaptive_avg_pool3d", "AveragePool", torch.nn.modules.utils._triple + ), + _export("adaptive_avg_pool3d"), + ], +) +@_onnx_symbolic( + "aten::adaptive_max_pool1d", + decorate=[ + _apply_params( + "adaptive_max_pool1d", + "MaxPool", + torch.nn.modules.utils._single, + max_pool1d_with_indices, + ), + _export("adaptive_max_pool1d"), + ], +) +@_onnx_symbolic( + "aten::adaptive_max_pool2d", + decorate=[ + _apply_params( + "adaptive_max_pool2d", + "MaxPool", + torch.nn.modules.utils._pair, + max_pool2d_with_indices, + ), + _export("adaptive_max_pool2d"), + ], +) +@_onnx_symbolic( + "aten::adaptive_max_pool3d", + decorate=[ + _apply_params( + "adaptive_max_pool3d", + "MaxPool", + torch.nn.modules.utils._triple, + max_pool3d_with_indices, + ), + _export("adaptive_max_pool3d"), + ], +) +@_beartype.beartype +def _adaptive_pool(name, type, tuple_fn, fn=None): + @symbolic_helper.quantized_args(True, False) + @_beartype.beartype + def symbolic_fn(g, input, output_size): + # _adaptive_pool is supported for cases where output_size is 1 for all dimensions, + # by executing a GlobalPool. + # It is also supported for cases where the output size is a factor of the input size. + # For these cases the stride and kernel size are uniform along all the indices of + # the same dimension, which makes it possible to export it to ONNX. + # for MaxPool, GlobalMaxPool does not return indices, + # so we try using max_poolxd_with_indices, and if it is not possible + # (input is not a complete tensor or output size not factor of input size) + # then we call GlobalAveragePool and return None for the indices + output_size_value = output_size + try: + output_size = symbolic_helper._parse_arg(output_size, "is") + except Exception: + # FIXME(justinchuby): Avoid catching Exception. + # Catch a more specific exception instead. + return symbolic_helper._onnx_unsupported( + "adaptive pooling, since output_size is not constant.", input + ) + if output_size == [1] * len(output_size) and type == "AveragePool": + return g.op("GlobalAveragePool", input) + sizes = symbolic_helper._get_tensor_sizes(input) + try: + dim = sizes[2:] + except Exception: + # FIXME(justinchuby): Avoid catching Exception. + # Catch a more specific exception instead. + dim = None + if dim is None or any(i is None for i in dim): + if output_size == [1] * len(output_size): + return g.op("GlobalMaxPool", input), None + return symbolic_helper._unimplemented( + name, "input size not accessible", input + ) + # verify if output size % input size = 0 for all dim + mod = [dim[i] % output_size[i] for i in range(0, len(dim))] + if mod != [0] * len(mod): + if output_size == [1] * len(output_size): + return g.op("GlobalMaxPool", input), None + return symbolic_helper._unimplemented( + name, "output size that are not factor of input size", output_size_value + ) + k = [int(dim[i] / output_size[i]) for i in range(0, len(dim))] + # call max_poolxd_with_indices to get indices in the output + if type == "MaxPool": + return fn(g, input, k, k, (0,) * len(dim), (1,) * len(dim), False) + output = g.op(type, input, kernel_shape_i=tuple_fn(k), strides_i=tuple_fn(k)) + return output + + return symbolic_fn + + +@_beartype.beartype +def _prepare_onnx_paddings(dim: int, pad): + """Generate paddings in ONNX order based on pad in pytorch. + Args: + dim: the dimension of the tensor. + pad: the paddings in pytorch. + The order is dim_n_begin, dim_n_end, dim_n-1_begin, dim_n-1_end, ... + """ + # The desired order of paddings is + # dim_0_begin, dim_1_begin, ... , dim_0_end, ..., dim_n_end. + # n is the dimension of input. + # assume zero-dimensions in the beginning + paddings = list(pad[:]) + [0] * (dim * 2 - len(pad)) + # reverse order and collate first beginnings and then ends + paddings = paddings[-2::-2] + paddings[-1::-2] + return paddings + + +@_beartype.beartype +def _convert_padding_node(input): + padding = symbolic_helper._maybe_get_const(input, "is") + if symbolic_helper._is_value(padding) and symbolic_helper._is_packed_list(padding): + input_list = symbolic_helper._unpack_list(padding) + try: + padding = [ + symbolic_helper._get_const(v, "i", "padding") for v in input_list + ] + except Exception: + # FIXME(justinchuby): Avoid catching Exception. + # Catch a more specific exception instead. + return symbolic_helper._onnx_opset_unsupported_detailed( + "Pad", 9, 11, "The sizes of the padding must be constant", input + ) + return padding + + +@_onnx_symbolic("aten::constant_pad_nd") +@_beartype.beartype +def constant_pad_nd(g: jit_utils.GraphContext, input, padding, value): + mode = "constant" + try: + value = symbolic_helper._get_const(value, "f", "value") + except Exception: + # FIXME(justinchuby): Avoid catching Exception. + # Catch a more specific exception instead. + return symbolic_helper._onnx_opset_unsupported_detailed( + "Pad", 9, 11, "The value for the padding must be constant", value + ) + + padding = _convert_padding_node(padding) + paddings = _prepare_onnx_paddings(symbolic_helper._get_tensor_rank(input), padding) + return _op_with_optional_float_cast( + g, "Pad", input, pads_i=paddings, mode_s=mode, value_f=value, opset_before=11 + ) + + +@_beartype.beartype +def _pad_circular(g: jit_utils.GraphContext, input: _C.Value, pad: _C.Value): + padding = _convert_padding_node(pad) + assert len(padding) % 2 == 0 + ndim = len(padding) // 2 + + cur = input + for idx in range(ndim): + pad_r = padding[-(2 * idx + 1)] + pad_l = padding[-(2 * idx + 2)] + tensors = [] + if pad_l > 0: + left = symbolic_helper._slice_helper( + g, cur, axes=[2 + idx], starts=[-(pad_l)], ends=[_constants.INT64_MAX] + ) + tensors.append(left) + + if pad_l < 0 or pad_r < 0: + start = builtins.max(0, -pad_l) + end = -(builtins.max(0, -pad_r)) + middle = symbolic_helper._slice_helper( + g, + cur, + axes=[2 + idx], + starts=[start], + ends=[end], + ) + tensors.append(middle) + else: + tensors.append(cur) + + if pad_r > 0: + right = symbolic_helper._slice_helper( + g, cur, axes=[2 + idx], starts=[0], ends=[pad_r] + ) + tensors.append(right) + + cur = g.op("Concat", *tensors, axis_i=(2 + idx)) + + return cur + + +@_onnx_symbolic("aten::reflection_pad1d") +@_onnx_symbolic("aten::reflection_pad2d") +@_onnx_symbolic("aten::reflection_pad3d") +@_beartype.beartype +def reflection_pad(g: jit_utils.GraphContext, input, padding): + mode = "reflect" + padding = _convert_padding_node(padding) + paddings = _prepare_onnx_paddings(symbolic_helper._get_tensor_rank(input), padding) + return _op_with_optional_float_cast( + g, "Pad", input, pads_i=paddings, mode_s=mode, opset_before=11 + ) + + +@_onnx_symbolic("aten::replication_pad1d") +@_onnx_symbolic("aten::replication_pad2d") +@_onnx_symbolic("aten::replication_pad3d") +@_beartype.beartype +def replication_pad(g: jit_utils.GraphContext, input, padding): + mode = "edge" + padding = _convert_padding_node(padding) + paddings = _prepare_onnx_paddings(symbolic_helper._get_tensor_rank(input), padding) + return _op_with_optional_float_cast( + g, "Pad", input, pads_i=paddings, mode_s=mode, opset_before=11 + ) + + +@_onnx_symbolic("aten::pad") +@_beartype.beartype +def pad( + g: jit_utils.GraphContext, + input: _C.Value, + pad: _C.Value, + mode: _C.Value, + value: _C.Value, +): + mode = symbolic_helper._parse_arg(mode, "s") + if mode == "replicate": + return replication_pad(g, input, pad) + elif mode == "reflect": + return reflection_pad(g, input, pad) + elif mode == "constant": + return constant_pad_nd(g, input, pad, value) + elif mode == "circular": + return _pad_circular(g, input, pad) + else: + raise errors.SymbolicValueError(f"Unrecognized padding mode {mode}", input) + + +@_onnx_symbolic( + "aten::upsample_nearest1d", + decorate=[ + _apply_params("upsample_nearest1d", 3, "nearest"), + _export("upsample_nearest1d"), + ], +) +@_onnx_symbolic( + "aten::upsample_nearest2d", + decorate=[ + _apply_params("upsample_nearest2d", 4, "nearest"), + _export("upsample_nearest2d"), + ], +) +@_onnx_symbolic( + "aten::upsample_nearest3d", + decorate=[ + _apply_params("upsample_nearest3d", 5, "nearest"), + _export("upsample_nearest3d"), + ], +) +@_onnx_symbolic( + "aten::upsample_linear1d", + decorate=[ + _apply_params("upsample_linear1d", 3, "linear"), + _export("upsample_linear1d"), + ], +) +@_onnx_symbolic( + "aten::upsample_bilinear2d", + decorate=[ + _apply_params("upsample_bilinear2d", 4, "linear"), + _export("upsample_bilinear2d"), + ], +) +@_onnx_symbolic( + "aten::upsample_trilinear3d", + decorate=[ + _apply_params("upsample_trilinear3d", 5, "linear"), + _export("upsample_trilinear3d"), + ], +) +@_beartype.beartype +def _interpolate(name: str, dim: int, interpolate_mode: str): + def symbolic_fn(g, input, output_size, *args): + scales, align_corners = symbolic_helper._get_interpolate_attributes( + g, interpolate_mode, args + ) + symbolic_helper._interpolate_warning(interpolate_mode) + align_corners = symbolic_helper._maybe_get_scalar(align_corners) + if align_corners: + return symbolic_helper._unimplemented(name, "align_corners == True", input) + if scales is None: + scales = symbolic_helper._interpolate_size_to_scales( + g, input, output_size, dim + ) + return g.op("Upsample", input, scales, mode_s=interpolate_mode) + + return symbolic_fn + + +@_onnx_symbolic("aten::__interpolate") +@_beartype.beartype +def __interpolate( + g: jit_utils.GraphContext, + input, + size, + scale_factor, + mode, + align_corners, + recompute_scale_factor, + antialias, +): + scales, mode = symbolic_helper._interpolate_get_scales_and_mode( + g, input, size, scale_factor, mode, align_corners + ) + return g.op("Upsample", input, scales, mode_s=mode) + + +@_onnx_symbolic("aten::bitwise_not") +@_beartype.beartype +def bitwise_not(g: jit_utils.GraphContext, input): + if not symbolic_helper._is_bool(input): + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting bitwise Not " + "for non-boolean input values", + input, + ) + return g.op("Not", input) + + +@_onnx_symbolic("aten::bitwise_or") +@_beartype.beartype +def bitwise_or(g, self, other): + if not symbolic_helper._is_bool(self): + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting bitwise OR " + "for non-boolean input values. self: ", + self, + ) + if not symbolic_helper._is_bool(other): + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting bitwise OR " + "for non-boolean input values. other: ", + other, + ) + return g.op("Or", self, other) + + +@_beartype.beartype +def wrap_logical_op_with_cast_to(to_type): + def decorator(fn): + @functools.wraps(fn) + def wrap_with_cast(g, input, other): + to_cast_func = globals()[f"_cast_{to_type}"] + return fn(g, to_cast_func(g, input, False), to_cast_func(g, other, False)) + + return wrap_with_cast + + return decorator + + +@_beartype.beartype +def wrap_logical_op_with_negation(func: Callable) -> Callable: + @functools.wraps(func) + def wrap_with_not(g, input, other): + return g.op("Not", func(g, input, other)) + + return wrap_with_not + + +@_onnx_symbolic("aten::__not_") +@_beartype.beartype +def __not_(g: jit_utils.GraphContext, self): + if not symbolic_helper._is_bool(self): + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting bitwise Not " + "for non-boolean input values", + self, + ) + return g.op("Not", self) + + +@_onnx_symbolic("aten::eq") +@symbolic_helper.quantized_args(True, True) +@_beartype.beartype +def eq(g: jit_utils.GraphContext, self, other): + if isinstance(self.type(), _C.DeviceObjType) and isinstance( + other.type(), _C.DeviceObjType + ): + # ONNX doesn't have devices, so consider them all to be equal. + # The no-op check for equality will get constant-folded. + return g.op("Constant", value_t=torch.tensor(True, dtype=torch.bool)) + self_node = self.node() + other_node = other.node() + if self_node.kind() == other_node.kind() == "onnx::Constant": + if self_node.kindOf("value") == other_node.kindOf("value") == "s": + # Exporting strings to ONNX is not supported. + # If both strings are constant, we can compare them directly. + # The no-op check for equality will get constant-folded. + return g.op( + "Constant", + value_t=torch.tensor( + self_node.s("value") == other_node.s("value"), + dtype=torch.bool, + ), + ) + + return g.op("Equal", self, other) + + +@_onnx_symbolic("aten::ne") +@symbolic_helper.quantized_args(True, True) +@wrap_logical_op_with_negation +@_beartype.beartype +def ne(g: jit_utils.GraphContext, self, other): + return eq(g, self, other) + + +@_onnx_symbolic("aten::gt") +@symbolic_helper.quantized_args(True, True) +@_beartype.beartype +def gt(g: jit_utils.GraphContext, input, other): + return _gt_impl(g, input, other) + + +@_beartype.beartype +def _gt_impl(g: jit_utils.GraphContext, input, other): + if symbolic_helper._is_bool(input) and symbolic_helper._is_bool(other): + input = g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.INT32) + other = g.op("Cast", other, to_i=_C_onnx.TensorProtoDataType.INT32) + return g.op("Greater", input, other) + + +@_onnx_symbolic("aten::lt") +@symbolic_helper.quantized_args(True, True) +@_beartype.beartype +def lt(g: jit_utils.GraphContext, input, other): + return _lt_impl(g, input, other) + + +@_beartype.beartype +def _lt_impl(g: jit_utils.GraphContext, input, other): + if symbolic_helper._is_bool(input) and symbolic_helper._is_bool(other): + input = g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.INT32) + other = g.op("Cast", other, to_i=_C_onnx.TensorProtoDataType.INT32) + return g.op("Less", input, other) + + +@_onnx_symbolic("aten::ge") +@symbolic_helper.quantized_args(True, True) +@wrap_logical_op_with_negation +@_beartype.beartype +def ge(g: jit_utils.GraphContext, input, other): + return _lt_impl(g, input, other) + + +@_onnx_symbolic("aten::le") +@symbolic_helper.quantized_args(True, True) +@wrap_logical_op_with_negation +@_beartype.beartype +def le(g: jit_utils.GraphContext, input, other): + return _gt_impl(g, input, other) + + +@_onnx_symbolic("aten::__and_") +@_beartype.beartype +def __and_(g: jit_utils.GraphContext, input, other): + if not symbolic_helper._is_bool(input): + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting bitwise AND " + "for non-boolean input values", + input, + ) + if not symbolic_helper._is_bool(other): + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting bitwise AND " + "for non-boolean input values", + other, + ) + return g.op("And", input, other) + + +@_onnx_symbolic("aten::__or_") +@_beartype.beartype +def __or_(g: jit_utils.GraphContext, input, other): + if not symbolic_helper._is_bool(input): + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting bitwise OR " + "for non-boolean input values", + input, + ) + if not symbolic_helper._is_bool(other): + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting bitwise OR " + "for non-boolean input values", + other, + ) + return g.op("Or", input, other) + + +@_onnx_symbolic("aten::__xor_") +@_beartype.beartype +def __xor_(g: jit_utils.GraphContext, input, other): + if not symbolic_helper._is_bool(input): + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting bitwise XOR " + "for non-boolean input values", + input, + ) + if not symbolic_helper._is_bool(other): + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting bitwise XOR " + "for non-boolean input values", + other, + ) + return g.op("Xor", input, other) + + +@_onnx_symbolic("aten::logical_and") +@wrap_logical_op_with_cast_to("Bool") +@_beartype.beartype +def logical_and(g: jit_utils.GraphContext, input, other): + return g.op("And", input, other) + + +@_onnx_symbolic("aten::logical_or") +@wrap_logical_op_with_cast_to("Bool") +@_beartype.beartype +def logical_or(g: jit_utils.GraphContext, input, other): + return g.op("Or", input, other) + + +@_onnx_symbolic("aten::logical_xor") +@wrap_logical_op_with_cast_to("Bool") +@_beartype.beartype +def logical_xor(g: jit_utils.GraphContext, input, other): + return g.op("Xor", input, other) + + +@_onnx_symbolic("aten::logical_not") +@_beartype.beartype +def logical_not(g: jit_utils.GraphContext, input): + return g.op("Not", g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.BOOL)) + + +@_onnx_symbolic("aten::__rshift_") +@_beartype.beartype +def __rshift_(g: jit_utils.GraphContext, self, other): + # make sure to cast other to self's type + # (when self is long, make sure that other is not float) + self_scalar_type = _type_utils.JitScalarType.from_value(self) + if ( + _type_utils.JitScalarType.from_value(other, _type_utils.JitScalarType.UNDEFINED) + != self_scalar_type + ): + other = g.op( + "Cast", + other, + to_i=self_scalar_type.onnx_type(), + ) + + two = g.op("Constant", value_t=torch.tensor(2, dtype=torch.float32)) + # exponent (same type as self) has to be float or double in onnx::Pow + if not symbolic_helper._is_fp(self): + other = g.op("Cast", other, to_i=_C_onnx.TensorProtoDataType.FLOAT) + two_pow = g.op("Pow", two, other) + two_pow = g.op( + "Cast", + two_pow, + to_i=self_scalar_type.onnx_type(), + ) + rshift = g.op("Div", self, two_pow) + return rshift + + +@_onnx_symbolic("aten::__lshift_") +@_beartype.beartype +def __lshift_(g: jit_utils.GraphContext, self, other): + # make sure to cast other to self's type + # (when self is long, make sure that other is not float) + self_scalar_type = _type_utils.JitScalarType.from_value(self) + if ( + _type_utils.JitScalarType.from_value(other, _type_utils.JitScalarType.UNDEFINED) + != self_scalar_type + ): + other = g.op( + "Cast", + other, + to_i=self_scalar_type.onnx_type(), + ) + + two = g.op("Constant", value_t=torch.tensor(2, dtype=torch.float32)) + # exponent (same type as self) has to be float or double in onnx::Pow + if not symbolic_helper._is_fp(self): + other = g.op("Cast", other, to_i=_C_onnx.TensorProtoDataType.FLOAT) + two_pow = g.op("Pow", two, other) + two_pow = g.op( + "Cast", + two_pow, + to_i=self_scalar_type.onnx_type(), + ) + lshift = g.op("Mul", self, two_pow) + return lshift + + +@_onnx_symbolic("aten::where") +@symbolic_helper.parse_args("v", "v", "v", "i") +@_beartype.beartype +def where(g: jit_utils.GraphContext, condition, self=None, other=None, _outputs=None): + # Assumes that torch.where's first argument takes only Bool and Byte tensors. + if not symbolic_helper._is_bool(condition): + condition = g.op("Cast", condition, to_i=_C_onnx.TensorProtoDataType.BOOL) + if self is None: + condition = nonzero(g, condition) + return symbolic_helper._unbind_helper( + g, condition, g.op("Constant", value_t=torch.tensor(1)), _outputs + ) + return g.op("Where", condition, self, other) + + +@_onnx_symbolic("aten::log_softmax") +@symbolic_helper.parse_args("v", "i", "none") +@_beartype.beartype +def log_softmax(g: jit_utils.GraphContext, input, dim, dtype=None): + # PyTorch dim and ONNX axis have different meanings. + # See Softmax comment for details. + # TODO: remove this as onnx opset 11 spec allows negative axes + input_dim = symbolic_helper._get_tensor_rank(input) + if input_dim is None: + return symbolic_helper._unimplemented( + "dim", + "ONNX and PyTorch use different strategies to split the input. " + "Input rank must be known at export time.", + ) + if dim < 0: + dim = input_dim + dim + is_transpose_required = input_dim != dim + 1 + # ONNX only supports log_softmax with dim = -1. Transpose must be added before and after log_softmax to support other cases. + if is_transpose_required: + axes = list(range(input_dim)) + axes[dim], axes[-1] = axes[-1], axes[dim] + input = g.op("Transpose", input, perm_i=axes) + dim = input_dim - 1 + return_op = g.op("LogSoftmax", input, axis_i=dim) + if dtype and dtype.node().kind() != "prim::Constant": + parsed_dtype = symbolic_helper._get_const(dtype, "i", "dtype") + return_op = g.op( + "Cast", return_op, to_i=_type_utils.JitScalarType(parsed_dtype).onnx_type() + ) + if is_transpose_required: + return_op = g.op("Transpose", return_op, perm_i=axes) + return return_op + + +@_onnx_symbolic("aten::_log_softmax") +@symbolic_helper.parse_args("v", "i", "i") +@_beartype.beartype +def _log_softmax(g: jit_utils.GraphContext, input, dim, half_to_float): + if ( + half_to_float + and _type_utils.JitScalarType.from_value( + input, _type_utils.JitScalarType.UNDEFINED + ) + == _type_utils.JitScalarType.HALF + ): + input = g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.FLOAT) + return log_softmax(g, input, dim) + + +@_onnx_symbolic("aten::_convolution") +@symbolic_helper.parse_args( + "v", "v", "v", "is", "is", "is", "i", "is", "i", "i", "i", "i", "i" +) +@_beartype.beartype +def _convolution( + g: jit_utils.GraphContext, + input, + weight, + bias, + stride, + padding, + dilation, + transposed, + output_padding, + groups, + benchmark, + deterministic, + cudnn_enabled, + allow_tf32=None, +): + weight_size = symbolic_helper._get_tensor_sizes(weight) + try: + kernel_shape = weight_size[2:] + except Exception: + # FIXME(justinchuby): Avoid catching Exception. + # Catch a more specific exception instead. + kernel_shape = None + + if kernel_shape is None or any(i is None for i in kernel_shape): + raise errors.SymbolicValueError( + "Unsupported: ONNX export of convolution for kernel of unknown shape.", + input, + ) + + args = [input, weight] + # ONNX only supports 1D bias + if ( + not symbolic_helper._is_none(bias) + and symbolic_helper._get_tensor_rank(bias) == 1 + ): + args.append(bias) + + kwargs = { + "kernel_shape_i": weight_size[2:], + "strides_i": stride, + # NB: ONNX supports asymmetric padding, whereas PyTorch supports only + # symmetric padding + "pads_i": padding + padding, + "dilations_i": dilation, + "group_i": groups, + } + + if any(o != 0 for o in output_padding): + # ONNX supports both output_shape and output_padding. they are equivalent expressive. + # output_padding is more straightforward, so we use it here. + # output_shape = stride * (input_shape - 1) + output_padding + kernel_shape - padding * 2 + assert transposed + assert len(stride) == len(output_padding) + kwargs["output_padding_i"] = output_padding + + n = g.op("ConvTranspose" if transposed else "Conv", *args, **kwargs) + + if ( + not symbolic_helper._is_none(bias) + and symbolic_helper._get_tensor_rank(bias) != 1 + ): + return g.op("Add", n, bias) + else: + return n + + +@_onnx_symbolic("aten::_convolution_mode") +@symbolic_helper.parse_args( + "v", + "v", + "v", + "is", + "s", + "is", + "i", +) +@_beartype.beartype +def _convolution_mode( + g: jit_utils.GraphContext, + input, + weight, + bias, + stride, + padding, + dilation, + groups, +): + weight_size = symbolic_helper._get_tensor_sizes(weight) + try: + kernel_shape = weight_size[2:] + except Exception: + # FIXME(justinchuby): Avoid catching Exception. + # Catch a more specific exception instead. + kernel_shape = None + + if kernel_shape is None or any(i is None for i in kernel_shape): + raise errors.SymbolicValueError( + "Unsupported: ONNX export of convolution for kernel of unknown shape.", + input, + ) + + args = [input, weight] + # ONNX only supports 1D bias + if ( + not symbolic_helper._is_none(bias) + and symbolic_helper._get_tensor_rank(bias) == 1 + ): + args.append(bias) + + if padding == "valid": + padding = "VALID" + elif padding == "same": + padding = "SAME_UPPER" + kwargs = { + "kernel_shape_i": weight_size[2:], + "strides_i": stride, + "auto_pad_s": padding, + "dilations_i": dilation, + "group_i": groups, + } + + n = g.op("Conv", *args, **kwargs) + + if ( + not symbolic_helper._is_none(bias) + and symbolic_helper._get_tensor_rank(bias) != 1 + ): + return g.op("Add", n, bias) + else: + return n + + +@_onnx_symbolic("aten::convolution") +@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i", "is", "i") +@_beartype.beartype +def convolution( + g: jit_utils.GraphContext, + input, + weight, + bias, + stride, + padding, + dilation, + transposed, + output_padding, + groups, +): + return _convolution( + g, + input, + weight, + bias, + stride, + padding, + dilation, + transposed, + output_padding, + groups, + None, + None, + None, + None, + ) + + +@_onnx_symbolic("aten::conv1d") +@symbolic_helper.parse_args("v", "v", "v", "is", "v", "is", "i") +@_beartype.beartype +def conv1d( + g: jit_utils.GraphContext, input, weight, bias, stride, padding, dilation, groups +): + str_padding = symbolic_helper._parse_arg(padding, "s") + if str_padding in ["valid", "same"]: + return _convolution_mode( + g, + input, + weight, + bias, + stride, + str_padding, + dilation, + groups, + ) + else: + padding = symbolic_helper._parse_arg(padding, "is") + return _convolution( + g, + input, + weight, + bias, + stride, + padding, + dilation, + False, + (), + groups, + None, + None, + None, + None, + ) + + +@_onnx_symbolic("aten::conv2d") +@symbolic_helper.parse_args("v", "v", "v", "is", "v", "is", "i") +@_beartype.beartype +def conv2d( + g: jit_utils.GraphContext, input, weight, bias, stride, padding, dilation, groups +): + str_padding = symbolic_helper._parse_arg(padding, "s") + if str_padding in ["valid", "same"]: + return _convolution_mode( + g, + input, + weight, + bias, + stride, + str_padding, + dilation, + groups, + ) + else: + padding = symbolic_helper._parse_arg(padding, "is") + return _convolution( + g, + input, + weight, + bias, + stride, + padding, + dilation, + False, + (), + groups, + None, + None, + None, + None, + ) + + +@_onnx_symbolic("aten::conv3d") +@symbolic_helper.parse_args("v", "v", "v", "is", "v", "is", "i") +@_beartype.beartype +def conv3d( + g: jit_utils.GraphContext, input, weight, bias, stride, padding, dilation, groups +): + str_padding = symbolic_helper._parse_arg(padding, "s") + if str_padding in ["valid", "same"]: + return _convolution_mode( + g, + input, + weight, + bias, + stride, + str_padding, + dilation, + groups, + ) + else: + padding = symbolic_helper._parse_arg(padding, "is") + return _convolution( + g, + input, + weight, + bias, + stride, + padding, + dilation, + False, + (), + groups, + None, + None, + None, + None, + ) + + +@_onnx_symbolic("aten::conv_transpose1d") +@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i", "is") +@_beartype.beartype +def conv_transpose1d( + g: jit_utils.GraphContext, + input, + weight, + bias, + stride, + padding, + output_padding, + groups, + dilation, +): + return _convolution( + g, + input, + weight, + bias, + stride, + padding, + dilation, + True, + output_padding, + groups, + None, + None, + None, + None, + ) + + +@_onnx_symbolic("aten::conv_transpose2d") +@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i", "is") +@_beartype.beartype +def conv_transpose2d( + g: jit_utils.GraphContext, + input, + weight, + bias, + stride, + padding, + output_padding, + groups, + dilation, +): + return _convolution( + g, + input, + weight, + bias, + stride, + padding, + dilation, + True, + output_padding, + groups, + None, + None, + None, + None, + ) + + +@_onnx_symbolic("aten::conv_transpose3d") +@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i", "is") +@_beartype.beartype +def conv_transpose3d( + g: jit_utils.GraphContext, + input, + weight, + bias, + stride, + padding, + output_padding, + groups, + dilation, +): + return _convolution( + g, + input, + weight, + bias, + stride, + padding, + dilation, + True, + output_padding, + groups, + None, + None, + None, + None, + ) + + +@_onnx_symbolic("aten::batch_norm") +@symbolic_helper.parse_args("v", "v", "v", "v", "v", "i", "f", "f", "i") +@_beartype.beartype +def batch_norm( + g: jit_utils.GraphContext, + input, + weight, + bias, + running_mean, + running_var, + training, + momentum, + eps, + cudnn_enabled, +): + symbolic_helper.check_training_mode(training, "batch_norm") + + if ( + torch.is_autocast_enabled() + and not symbolic_helper.args_have_same_dtype( + [input, weight, bias, running_mean, running_var] + ) + and GLOBALS.export_onnx_opset_version < 15 + ): + return symbolic_helper._onnx_opset_unsupported_detailed( + "BatchNormalization", + 9, + 15, + "All input tensors must have the same `dtype`." + " Turn off Autocast or export using opset version 15.", + input, + ) + + weight, bias, running_mean, running_var = symbolic_helper._batchnorm_helper( + g, input, weight, bias, running_mean, running_var + ) + out = g.op( + "BatchNormalization", + input, + weight, + bias, + running_mean, + running_var, + epsilon_f=eps, + momentum_f=1 - momentum, + outputs=1 if not training else 5, + ) + if not training: + return out + else: + res, new_running_mean, new_running_var, saved_mean, saved_var = out + new_running_mean.setType(running_mean.type()) + new_running_var.setType(running_var.type()) + saved_mean.setDebugName("batch_norm_dead_output-" + saved_mean.debugName()) + saved_var.setDebugName("batch_norm_dead_output-" + saved_var.debugName()) + return res + + +@_onnx_symbolic("aten::native_layer_norm") +@symbolic_helper.quantized_args(True, False, False, False) +@symbolic_helper.parse_args("v", "is", "v", "v", "f") +@_beartype.beartype +def native_layer_norm( + g: jit_utils.GraphContext, + input: _C.Value, + normalized_shape: Sequence[int], + weight: _C.Value, + bias: _C.Value, + eps: float, +) -> Tuple[_C.Value, _C.Value, _C.Value]: + axes = [-i for i in range(len(normalized_shape), 0, -1)] + + two_cst = symbolic_helper._generate_wrapped_number(g, 2.0) + eps_cst = symbolic_helper._generate_wrapped_number(g, eps) + + mean = g.op("ReduceMean", input, axes_i=axes) + numerator = sub(g, input, mean) + + # Cast it to eps dtype to avoid precision loss + is_type_half = ( + _type_utils.JitScalarType.from_value(numerator) + == _type_utils.JitScalarType.HALF + ) + if is_type_half: + eps_dtype = _type_utils.JitScalarType.from_value(eps_cst) + numerator = g.op( + "Cast", numerator, to_i=_type_utils.JitScalarType(eps_dtype).onnx_type() + ) + + # variance = e((x - e(x))^2), and (x - e(x)) is the numerator in the layer_norm formula + variance = g.op("ReduceMean", pow(g, numerator, two_cst), axes_i=axes) + denominator = sqrt(g, g.op("Add", variance, eps_cst)) + normalized = g.op("Div", numerator, denominator) + + # Cast back to input type as eps related ops are all done + if is_type_half: + input_dtype = _type_utils.JitScalarType.from_value(input) + normalized = g.op( + "Cast", normalized, to_i=_type_utils.JitScalarType(input_dtype).onnx_type() + ) + + if not (weight is None or symbolic_helper._is_none(weight)): + normalized = mul(g, normalized, weight) + if not (bias is None or symbolic_helper._is_none(bias)): + normalized = add(g, normalized, bias) + + # rdenominator := 1 / sqrt(variance + eps) + # According to aten::native_layer_norm, rdenominator should have the same dtype as input, + # mean and normalized, so we need to Cast it back + if is_type_half: + denominator = g.op( + "Cast", denominator, to_i=_type_utils.JitScalarType(input_dtype).onnx_type() + ) + rdenominator = g.op("Reciprocal", denominator) + else: + rdenominator = reciprocal(g, denominator) + + return normalized, mean, rdenominator + + +@_onnx_symbolic("aten::layer_norm") +@symbolic_helper.quantized_args(True, False, False, False) +@symbolic_helper.parse_args("v", "is", "v", "v", "f", "b") +@_beartype.beartype +def layer_norm( + g: jit_utils.GraphContext, + input: _C.Value, + normalized_shape: Sequence[int], + weight: _C.Value, + bias: _C.Value, + eps: float, + cudnn_enable: bool, +) -> _C.Value: + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at( + "layer_norm", + input, + weight, + bias, + normalized_shape_i=normalized_shape, + eps_f=eps, + cudnn_enable_i=cudnn_enable, + ) + normalized, _, _ = native_layer_norm(g, input, normalized_shape, weight, bias, eps) + return normalized + + +@_onnx_symbolic("aten::instance_norm") +@symbolic_helper.parse_args("v", "v", "v", "v", "v", "b", "f", "f", "b") +@_beartype.beartype +def instance_norm( + g: jit_utils.GraphContext, + input, + weight, + bias, + running_mean, + running_var, + use_input_stats: bool, + momentum: Number, + eps: Number, + cudnn_enabled: bool, +): + symbolic_helper.check_training_mode(use_input_stats, "instance_norm") + channel_size = symbolic_helper._get_tensor_dim_size(input, 1) + if weight is None or symbolic_helper._is_none(weight): + if channel_size is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of instance_norm for unknown channel size.", + input, + ) + weight_value = torch.tensor( + [1.0] * channel_size, + dtype=_type_utils.JitScalarType.from_value(input).dtype(), + ) + weight = g.op("Constant", value_t=weight_value) + if bias is None or symbolic_helper._is_none(bias): + if channel_size is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of instance_norm for unknown channel size.", + input, + ) + bias_value = torch.tensor( + [0.0] * channel_size, + dtype=_type_utils.JitScalarType.from_value(input).dtype(), + ) + bias = g.op("Constant", value_t=bias_value) + if ( + running_mean is None + or symbolic_helper._is_none(running_mean) + or running_var is None + or symbolic_helper._is_none(running_var) + ): + return g.op("InstanceNormalization", input, weight, bias, epsilon_f=eps) + else: + input_size = symbolic_helper._get_tensor_sizes(input) + # If input shape is [N, C, H, W], reshape to [1, N * C, H, W] and call batch_norm. + # For more information instance_norm(): + # https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/Normalization.cpp#L542 + input_size_reshape = input_size.copy() + n = input_size[0] + if n is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of instance_norm training for unknown " + "batch size.", + input, + ) + c = input_size[1] + input_size_reshape[0] = 1 + input_size_reshape[1] = n * c + weight_ = repeat( + g, weight, g.op("Constant", value_t=torch.tensor([n], dtype=torch.int64)) + ) + bias_ = repeat( + g, bias, g.op("Constant", value_t=torch.tensor([n], dtype=torch.int64)) + ) + running_mean_ = repeat( + g, + running_mean, + g.op("Constant", value_t=torch.tensor([n], dtype=torch.int64)), + ) + running_var_ = repeat( + g, + running_var, + g.op("Constant", value_t=torch.tensor([n], dtype=torch.int64)), + ) + input_reshaped = g.op( + "Reshape", + input, + g.op("Constant", value_t=torch.LongTensor(input_size_reshape)), + ) + out = batch_norm( + g, + input_reshaped, + weight_, + bias_, + running_mean_, + running_var_, + use_input_stats, + momentum, + eps, + cudnn_enabled, + ) + return view(g, out, g.op("Constant", value_t=torch.tensor(input_size))) + + +@_onnx_symbolic("aten::unfold") +@symbolic_helper.parse_args("v", "i", "i", "i") +@_beartype.beartype +def unfold(g: jit_utils.GraphContext, input, dimension, size, step): + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("unfold", input, dimension_i=dimension, size_i=size, step_i=step) + sizes = symbolic_helper._get_tensor_sizes(input) + # FIXME(justinchuby): Get rid of the try catch here to improve readability + try: + sizedim = sizes[dimension] + except Exception: + # FIXME(justinchuby): Avoid catching Exception. + # Catch a more specific exception instead. + sizedim = None + if sizedim is not None: + low_indices = range(0, sizedim, step) + hi_indices = range(size, sizedim + 1, step) + stack = [ + symbolic_helper._slice_helper( + g, input, axes=[dimension], starts=[low], ends=[hi] + ) + for low, hi in zip(low_indices, hi_indices) + ] + ndim = len(sizes) + perm = list(range(0, ndim)) + perm.append(perm.pop(dimension)) + unsqueeze = [ + symbolic_helper._unsqueeze_helper( + g, g.op("Transpose", t, perm_i=perm), [dimension] + ) + for t in stack + ] + return g.op("Concat", *unsqueeze, axis_i=dimension) + else: + return symbolic_helper._unimplemented( + "Unfold", "input size not accessible", input + ) + + +@_onnx_symbolic("aten::elu") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "t", "t", "t") +@_beartype.beartype +def elu(g: jit_utils.GraphContext, input, alpha, scale, input_scale): + if scale and scale != 1.0: + return symbolic_helper._unimplemented( + "scale", "does not support scale in Elu", scale + ) + if input_scale and input_scale != 1.0: + return symbolic_helper._unimplemented( + "input_scale", "does not support input_scale in Elu", input_scale + ) + # See Note [Export inplace] + return g.op("Elu", input, alpha_f=symbolic_helper._scalar(alpha)) + + +@_onnx_symbolic("aten::selu") +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def selu(g: jit_utils.GraphContext, input): + return g.op("Selu", input) + + +@_onnx_symbolic("aten::index_select") +@symbolic_helper.parse_args("v", "i", "v") +@_beartype.beartype +def index_select(g: jit_utils.GraphContext, self, dim, index): + # In case of a scalar index, index_select returns a tensor with the same rank as the input. + # To match this behavior in ONNX, we make index a 1D tensor so that the following gather + # also produces a tensor with the same rank as the input. + return symbolic_helper._select_helper(g, self, dim, index) + + +@_onnx_symbolic("aten::index_put") +@_beartype.beartype +def index_put(g: jit_utils.GraphContext, self, indices_list_value, values, accumulate): + if symbolic_helper._is_packed_list(indices_list_value): + indices_list = symbolic_helper._unpack_list(indices_list_value) + else: + indices_list = [indices_list_value] + if symbolic_helper.is_caffe2_aten_fallback(): + args = [self] + indices_list + [values, accumulate] + return g.at("index_put", *args) + + accumulate = symbolic_helper._parse_arg(accumulate, "b") + + if len(indices_list) == 0: + if accumulate: + return add(g, self, values) + return values + symbolic_helper._onnx_opset_unsupported("index_put", 9, 11, self) + + +@_onnx_symbolic("aten::index_fill") +@_beartype.beartype +def index_fill(g: jit_utils.GraphContext, self, dim, index, value): + dim_value = symbolic_helper._parse_arg(dim, "i") + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at( + "index_fill", + self, + index, + value, + overload_name="int_Scalar", + dim_i=dim_value, + ) + + expanded_index_shape, expanded_index = symbolic_helper._index_fill_reshape_helper( + g, self, dim, index + ) + value = symbolic_helper._maybe_get_scalar(value) + value = symbolic_helper._if_scalar_type_as(value, self) + expanded_value = expand(g, value, expanded_index_shape, None) + + return scatter(g, self, dim, expanded_index, expanded_value) + + +@_onnx_symbolic("aten::index_copy") +@_beartype.beartype +def index_copy(g: jit_utils.GraphContext, self, dim, index, source): + dim_value = symbolic_helper._parse_arg(dim, "i") + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("index_copy", self, index, source, dim_i=dim_value) + expanded_index_shape, expanded_index = symbolic_helper._index_fill_reshape_helper( + g, self, dim, index + ) + return scatter(g, self, dim, expanded_index, source) + + +@_onnx_symbolic("aten::bucketize") +@symbolic_helper.parse_args("v", "v", "b", "b") +@_beartype.beartype +def bucketize( + g: jit_utils.GraphContext, self, boundaries, out_int32=False, right=False +): + out_type = _C_onnx.TensorProtoDataType.INT64 + if out_int32: + out_type = _C_onnx.TensorProtoDataType.INT32 + # A tensor expanded_boundaries is created such that it + # contains a copy of boundaries for each element of self. + new_shape = g.op("Concat", g.op("Shape", boundaries), g.op("Shape", self), axis_i=0) + # Unsqueeze step is performed to respect ONNX's numpy style broadcasting for comparison ops + # https://github.com/onnx/onnx/blob/main/docs/Broadcasting.md + tensor_rank = symbolic_helper._get_tensor_rank(self) + assert tensor_rank is not None + unsqueeze_axes = list(range(1, tensor_rank + 1)) + expanded_boundaries = expand( + g, + symbolic_helper._unsqueeze_helper(g, boundaries, unsqueeze_axes), + new_shape, + None, + ) + # Compare each element of self to boundaries to get a tensor + # with leading 1s and trailing 0s. + # e.g., 4 > [1, 3, 4] = [1, 1, 0] + # The index of the last 1 is the bucket where the element should go. + if right: + cond = ge(g, self, expanded_boundaries) + else: + cond = gt(g, self, expanded_boundaries) + cond_out = g.op("Cast", cond, to_i=out_type) + # Sum to get the number of 1s corresponding to each element, + # which is the same as the bucket index. + # e.g., sum(4 > [1, 3, 4]) = sum([1, 1, 0]) = 2 + return symbolic_helper._reducesum_helper(g, cond_out, axes_i=[0], keepdims_i=0) + + +@_onnx_symbolic("aten::type_as") +@_beartype.beartype +def type_as(g: jit_utils.GraphContext, self, other): + self_dtype = symbolic_helper._try_get_scalar_type(self) + other_dtype = symbolic_helper._try_get_scalar_type(other) + if self_dtype == other_dtype and self_dtype is not None: + return self + if other_dtype is not None: + return g.op( + "Cast", + self, + to_i=other_dtype.onnx_type(), + ) + + if symbolic_helper.is_caffe2_aten_fallback(): + # We don't know the type of other, bail by emitting ATen + return g.at("type_as", self, other) + + raise errors.SymbolicValueError( + "Unsupported: ONNX export of type_as for tensor " + "of unknown dtype. Please check if the dtype of the " + "parameter passed to the type_as function is correct.", + other, + ) + + +@_onnx_symbolic("aten::cosine_similarity") +@symbolic_helper.parse_args("v", "v", "i", "f") +@_beartype.beartype +def cosine_similarity(g: jit_utils.GraphContext, x1, x2, dim, eps): + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("cosine_similarity", x1, x2, dim_i=dim, eps_f=eps) + cross = symbolic_helper._reducesum_helper( + g, mul(g, x1, x2), axes_i=[dim], keepdims_i=0 + ) + x1_l2 = symbolic_helper._reducesum_helper( + g, mul(g, x1, x1), axes_i=[dim], keepdims_i=0 + ) + x2_l2 = symbolic_helper._reducesum_helper( + g, mul(g, x2, x2), axes_i=[dim], keepdims_i=0 + ) + div_tens = max( + g, sqrt(g, mul(g, x1_l2, x2_l2)), g.op("Constant", value_t=torch.tensor([eps])) + ) + return div(g, cross, div_tens) + + +@_onnx_symbolic("aten::pairwise_distance") +@_beartype.beartype +def pairwise_distance(g: jit_utils.GraphContext, input1, input2, p, eps, keepdim): + if not symbolic_helper._is_value(eps): + eps = g.op("Constant", value_t=torch.tensor([eps])) + inv_p = div( + g, + g.op("Constant", value_t=torch.tensor([1], dtype=torch.float)), + add(g, p, eps), + ) + summation = symbolic_helper._reducesum_helper( + g, + pow(g, sub(g, input1, input2), p), + axes_i=[-1], + keepdims_i=symbolic_helper._parse_arg(keepdim, "i"), + ) + return pow(g, summation, inv_p) + + +@_onnx_symbolic("aten::clone") +# ignore clone operators that are inserted by PyTorch autograd +@_beartype.beartype +def clone(g: jit_utils.GraphContext, input, unused_memory_format): + return input + + +@_onnx_symbolic("aten::abs") +@_beartype.beartype +def abs(g: jit_utils.GraphContext, self): + return g.op("Abs", self) + + +@_onnx_symbolic("aten::log") +@_beartype.beartype +def log(g: jit_utils.GraphContext, self): + return g.op("Log", self) + + +@_onnx_symbolic("aten::log1p") +@_beartype.beartype +def log1p(g: jit_utils.GraphContext, self): + return log(g, add(g, symbolic_helper._if_scalar_type_as(torch.ones(1), self), self)) + + +@_onnx_symbolic("aten::log10") +@_beartype.beartype +def log10(g: jit_utils.GraphContext, self): + _ln10 = 2.30258509299404568401 + return g.op("Div", log(g, self), g.op("Constant", value_t=torch.tensor([_ln10]))) + + +@_onnx_symbolic("aten::pow") +@_beartype.beartype +def pow(g: jit_utils.GraphContext, self, exponent): + f_dtype = _type_utils.JitScalarType.from_value(self) + if not symbolic_helper._is_fp(self): + f_dtype = _type_utils.JitScalarType.FLOAT + self = g.op("Cast", self, to_i=f_dtype.onnx_type()) + if not symbolic_helper._is_fp(exponent): + exponent = g.op( + "Cast", + exponent, + to_i=f_dtype.onnx_type(), + ) + pow = g.op("Pow", self, exponent) + return pow + + +@_onnx_symbolic("aten::clamp") +@_beartype.beartype +def clamp(g: jit_utils.GraphContext, self, min, max): + # min or max may be None that we need to dispatch to + # Clip separately, as ONNX does not have None syntax + if symbolic_helper._is_none(min): + return clamp_max(g, self, max) + elif symbolic_helper._is_none(max): + return clamp_min(g, self, min) + else: + if symbolic_helper._is_constant(min) and symbolic_helper._is_constant(max): + return _op_with_optional_float_cast( + g, + "Clip", + self, + min_f=symbolic_helper._parse_arg(min, "f"), + max_f=symbolic_helper._parse_arg(max, "f"), + opset_before=12, + ) + else: + return clamp_max(g, clamp_min(g, self, min), max) + + +@_onnx_symbolic("aten::clamp_min") +@symbolic_helper.parse_args("v", "v") +@_beartype.beartype +def clamp_min(g: jit_utils.GraphContext, self, min): + if symbolic_helper._is_constant(min): + return _op_with_optional_float_cast( + g, "Clip", self, min_f=symbolic_helper._parse_arg(min, "f"), opset_before=12 + ) + else: + dtype = _type_utils.JitScalarType.from_value(self) + min = g.op("Cast", min, to_i=dtype.onnx_type()) + return _op_with_optional_float_cast(g, "Max", self, min, opset_before=12) + + +@_onnx_symbolic("aten::clamp_max") +@symbolic_helper.parse_args("v", "v") +@_beartype.beartype +def clamp_max(g: jit_utils.GraphContext, self, max): + if symbolic_helper._is_constant(max): + return _op_with_optional_float_cast( + g, "Clip", self, max_f=symbolic_helper._parse_arg(max, "f"), opset_before=12 + ) + else: + dtype = _type_utils.JitScalarType.from_value(self) + max = g.op("Cast", max, to_i=dtype.onnx_type()) + return _op_with_optional_float_cast(g, "Min", self, max, opset_before=12) + + +@_onnx_symbolic("aten::max") +# torch.max (same for torch.min) actually has two interfaces smashed together: +# torch.max(x, dim, keepdim) and torch.max(x, y) +# TODO(justinchuby): Support multiple quantized args in output +@_beartype.beartype +def max(g: jit_utils.GraphContext, self, dim_or_y=None, keepdim=None): + # torch.max(input) + if dim_or_y is None and keepdim is None: + return g.op("ReduceMax", self, keepdims_i=0) + # torch.max(input, other) + if keepdim is None: + return _op_with_optional_float_cast(g, "Max", self, dim_or_y, opset_before=12) + # torch.max(input, dim, keepdim) + else: + dim = symbolic_helper._get_const(dim_or_y, "i", "dim") + keepdim = symbolic_helper._get_const(keepdim, "i", "keepdim") + max = g.op("ReduceMax", self, axes_i=[dim], keepdims_i=keepdim) + indices = g.op("ArgMax", self, axis_i=dim, keepdims_i=keepdim) + return max, indices + + +@_onnx_symbolic("aten::maximum") +@symbolic_helper.quantized_args(True, True) +@_beartype.beartype +def maximum(g: jit_utils.GraphContext, input, other): + return max(g, input, dim_or_y=other) + + +@_onnx_symbolic("aten::min") +# TODO(justinchuby): Support multiple quantized args in output +@_beartype.beartype +def min(g: jit_utils.GraphContext, self, dim_or_y=None, keepdim=None): + # torch.min(input) + if dim_or_y is None and keepdim is None: + return g.op("ReduceMin", self, keepdims_i=0) + # torch.min(input, other) + if keepdim is None: + return _op_with_optional_float_cast(g, "Min", self, dim_or_y, opset_before=12) + # torch.min(input, dim, keepdim) + else: + dim = symbolic_helper._get_const(dim_or_y, "i", "dim") + keepdim = symbolic_helper._get_const(keepdim, "i", "keepdim") + min = g.op("ReduceMin", self, axes_i=[dim], keepdims_i=keepdim) + indices = g.op("ArgMin", self, axis_i=dim, keepdims_i=keepdim) + return min, indices + + +@_onnx_symbolic("aten::minimum") +@symbolic_helper.quantized_args(True, True) +@_beartype.beartype +def minimum(g: jit_utils.GraphContext, input, other): + return min(g, input, dim_or_y=other) + + +@_onnx_symbolic("aten::amax") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "is", "i") +@_beartype.beartype +def amax(g: jit_utils.GraphContext, self, dim, keepdim): + return g.op("ReduceMax", self, axes_i=dim, keepdims_i=keepdim) + + +@_onnx_symbolic("aten::amin") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "is", "i") +@_beartype.beartype +def amin(g: jit_utils.GraphContext, self, dim, keepdim): + return g.op("ReduceMin", self, axes_i=dim, keepdims_i=keepdim) + + +@_onnx_symbolic("aten::aminmax") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "v", "i") +@_beartype.beartype +def aminmax(g: jit_utils.GraphContext, self, dim, keepdim): + reduce_kwargs = {"keepdims_i": keepdim} + if not symbolic_helper._is_none(dim): + dim = symbolic_helper._get_const(dim, "i", "dim") + reduce_kwargs["axes_i"] = [dim] + + return g.op("ReduceMin", self, **reduce_kwargs), g.op( + "ReduceMax", self, **reduce_kwargs + ) + + +@_onnx_symbolic("aten::exp") +@_beartype.beartype +def exp(g: jit_utils.GraphContext, self): + return g.op("Exp", self) + + +@_onnx_symbolic("aten::dropout_") +@_onnx_symbolic("aten::dropout") +@symbolic_helper.parse_args("v", "f", "i") +@_beartype.beartype +def dropout(g: jit_utils.GraphContext, input, p, train): + symbolic_helper.check_training_mode(train, "dropout") + # if train is False, dropout is no-op + if not train: + return input + r, _ = g.op("Dropout", input, ratio_f=p, outputs=2) + return r + + +@_onnx_symbolic( + "aten::alpha_dropout_", decorate=[_apply_params("aten::alpha_dropout_")] +) # See Note [Export inplace] +@_onnx_symbolic( + "aten::feature_alpha_dropout_", + decorate=[_apply_params("aten::feature_alpha_dropout_")], +) +@_onnx_symbolic( + "aten::feature_dropout_", decorate=[_apply_params("aten::feature_dropout_")] +) +@_onnx_symbolic( + "aten::feature_alpha_dropout", + decorate=[_apply_params("aten::feature_alpha_dropout")], +) +@_onnx_symbolic("aten::alpha_dropout", decorate=[_apply_params("aten::alpha_dropout")]) +@_onnx_symbolic( + "aten::feature_dropout", decorate=[_apply_params("aten::feature_dropout")] +) +@_beartype.beartype +def _unsupported_dropout(name: str): + @symbolic_helper.parse_args("v", "none", "b") + @_beartype.beartype + def feature_dropout(g, input, p, train): + # NB: In inference mode, FeatureDropout is exported as an identity op. + if train: + return symbolic_helper._unimplemented(name, "training mode", input) + return input + + return feature_dropout + + +@_onnx_symbolic("aten::norm") +@symbolic_helper.parse_args("v", "t", "is", "i", "v") +@_beartype.beartype +def norm(g: jit_utils.GraphContext, self, p, dim, keepdim, dtype=None): + if p == 1: + f = _reduce_op_symbolic("ReduceL1") + elif p == 2: + f = _reduce_op_symbolic("ReduceL2") + else: + raise errors.SymbolicValueError( + "ONNX export only p-norms with p of 1 or 2", self + ) + result = f(g, self, dim=dim, keepdim=keepdim) + if dtype is not None: + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + result = g.op("Cast", result, to_i=_type_utils.JitScalarType(dtype).onnx_type()) + return result + + +@_onnx_symbolic("aten::conv_tbc") +@symbolic_helper.parse_args("v", "v", "v", "i") +@_beartype.beartype +def conv_tbc(g: jit_utils.GraphContext, input, weight, bias, pad): + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("conv_tbc", input, weight, bias, pad_i=pad) + else: + # input must have 3 dimensions, see: + # https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/ConvolutionTBC.cpp#L8-L10 + # input = (time, batch, in_channels) + # weight = (kernel_width, in_channels, out_channels) + # bias = (out_channels,) + input = g.op("Transpose", input, perm_i=[1, 2, 0]) + weight = g.op("Transpose", weight, perm_i=[2, 1, 0]) + conv = conv1d(g, input, weight, bias, [1], [pad], [1], 1) + return g.op("Transpose", conv, perm_i=[2, 0, 1]) + + +@_onnx_symbolic("aten::_unique") +@symbolic_helper.parse_args("v", "i", "i") +@_beartype.beartype +def _unique(g: jit_utils.GraphContext, input, sorted, return_inverse): + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at( + "_unique", + input, + sorted_i=sorted, + return_inverse_i=return_inverse, + outputs=2, + ) + else: + return symbolic_helper._onnx_unsupported("_unique", input) + + +@_onnx_symbolic("aten::_unique2") +@symbolic_helper.parse_args("v", "i", "i", "i") +@_beartype.beartype +def _unique2(g: jit_utils.GraphContext, input, sorted, return_inverse, return_counts): + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at( + "_unique2", + input, + sorted_i=sorted, + return_inverse_i=return_inverse, + return_counts_i=return_counts, + outputs=3, + ) + + symbolic_helper._onnx_opset_unsupported("_unique2", 9, 11, input) + + +@_onnx_symbolic("aten::_cast_Byte") +@_deprecation.deprecated( + "2.0", + "the future", + "Avoid using this function and create a Cast node instead", +) +@_beartype.beartype +def _cast_Byte(g: jit_utils.GraphContext, input, non_blocking): + return g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.UINT8) + + +@_onnx_symbolic("aten::_cast_Char") +@_deprecation.deprecated( + "2.0", + "the future", + "Avoid using this function and create a Cast node instead", +) +@_beartype.beartype +def _cast_Char(g: jit_utils.GraphContext, input, non_blocking): + return g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.INT8) + + +@_onnx_symbolic("aten::_cast_Short") +@_deprecation.deprecated( + "2.0", + "the future", + "Avoid using this function and create a Cast node instead", +) +@_beartype.beartype +def _cast_Short(g: jit_utils.GraphContext, input, non_blocking): + return g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.INT16) + + +@_onnx_symbolic("aten::_cast_Int") +@_deprecation.deprecated( + "2.0", + "the future", + "Avoid using this function and create a Cast node instead", +) +@_beartype.beartype +def _cast_Int(g: jit_utils.GraphContext, input, non_blocking): + return g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.INT32) + + +@_onnx_symbolic("aten::_cast_Long") +@_deprecation.deprecated( + "2.0", + "the future", + "Avoid using this function and create a Cast node instead", +) +@_beartype.beartype +def _cast_Long(g: jit_utils.GraphContext, input, non_blocking): + return g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.INT64) + + +@_onnx_symbolic("aten::_cast_Half") +@_deprecation.deprecated( + "2.0", + "the future", + "Avoid using this function and create a Cast node instead", +) +@_beartype.beartype +def _cast_Half(g: jit_utils.GraphContext, input, non_blocking): + return g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.FLOAT16) + + +@_onnx_symbolic("aten::_cast_Float") +@_deprecation.deprecated( + "2.0", + "the future", + "Avoid using this function and create a Cast node instead", +) +@_beartype.beartype +def _cast_Float(g: jit_utils.GraphContext, input, non_blocking): + return g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.FLOAT) + + +@_onnx_symbolic("aten::_cast_Double") +@_deprecation.deprecated( + "2.0", + "the future", + "Avoid using this function and create a Cast node instead", +) +@_beartype.beartype +def _cast_Double(g: jit_utils.GraphContext, input, non_blocking): + return g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.DOUBLE) + + +@_onnx_symbolic("aten::_cast_Bool") +@_deprecation.deprecated( + "2.0", + "the future", + "Avoid using this function and create a Cast node instead", +) +@_beartype.beartype +def _cast_Bool(g: jit_utils.GraphContext, input, non_blocking): + return g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.BOOL) + + +@_onnx_symbolic("aten::empty") +@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v") +@_beartype.beartype +def empty( + g: jit_utils.GraphContext, + sizes, + dtype, + layout, + device, + pin_memory=False, + memory_format=None, +): + return zeros(g, sizes, dtype, layout, device, pin_memory) + + +@_onnx_symbolic("aten::empty_like") +@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v") +@_beartype.beartype +def empty_like( + g: jit_utils.GraphContext, + input, + dtype=None, + layout=None, + device=None, + pin_memory=False, + memory_format=None, +): + return zeros_like(g, input, dtype, layout, device, pin_memory) + + +@_onnx_symbolic("aten::new_empty") +@_beartype.beartype +def new_empty( + g: jit_utils.GraphContext, self, sizes, dtype, layout, device, pin_memory=False +): + self_dtype = symbolic_helper._try_get_scalar_type(self) + if symbolic_helper._is_none(dtype) and self_dtype is not None: + dtype = self_dtype + return empty(g, sizes, dtype, layout, device, pin_memory) + + +@_onnx_symbolic("aten::scalar_tensor") +@_beartype.beartype +def scalar_tensor(g: jit_utils.GraphContext, scalar, dtype, *options): + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + if dtype is None: + dtype = _type_utils.JitScalarType.FLOAT + scalar = g.op("Cast", scalar, to_i=_type_utils.JitScalarType(dtype).onnx_type()) + return scalar + + +@_onnx_symbolic("aten::tensor") +@_beartype.beartype +def tensor( + g: jit_utils.GraphContext, data, dtype=None, device=None, requires_grad=False +): + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + if symbolic_helper._is_packed_list(data): + if dtype is None: + dtype = _type_utils.JitScalarType.from_value( + symbolic_helper._unpack_list(data)[0] + ) + input_list = list() + for t in symbolic_helper._unpack_list(data): + shape_reference = g.op("Constant", value_t=torch.LongTensor([1])) + t = symbolic_helper._reshape_helper(g, t, shape_reference) + t = g.op("Cast", t, to_i=_type_utils.JitScalarType(dtype).onnx_type()) + input_list.append(t) + return g.op("Concat", *input_list, axis_i=0) + else: + if dtype is None: + dtype = _type_utils.JitScalarType.from_value(data) + if symbolic_helper._is_list(data) and ( + symbolic_helper._is_tensor_list(data) + or symbolic_helper._is_scalar_list(data) + ): + data = g.op("ConcatFromSequence", data, axis_i=0, new_axis_i=1) + return g.op("Cast", data, to_i=_type_utils.JitScalarType(dtype).onnx_type()) + + +@_onnx_symbolic("aten::as_tensor") +@_beartype.beartype +def as_tensor(g: jit_utils.GraphContext, data, dtype=None, device=None): + return tensor(g, data, dtype, device) + + +@_onnx_symbolic("aten::zeros") +@symbolic_helper.parse_args("v", "i", "v", "v", "v") +@_beartype.beartype +def zeros(g: jit_utils.GraphContext, sizes, dtype, layout, device, pin_memory=False): + # NOTE: no way to set device, layout and pin_memory in ONNX, so we ignore it + if dtype is None: + scalar_type = _type_utils.JitScalarType.FLOAT + else: + scalar_type = _type_utils.JitScalarType(dtype) + sizes_ = symbolic_helper._maybe_get_const(sizes, "is") + if isinstance(sizes_, list) and len(sizes_) == 0: + sizes = g.op("Constant", value_t=torch.tensor([]).to(torch.int64)) + return g.op( + "ConstantOfShape", + sizes, + value_t=torch.tensor([0], dtype=scalar_type.dtype()), + ) + + +@_onnx_symbolic("aten::zeros_like") +@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v") +@_beartype.beartype +def zeros_like( + g: jit_utils.GraphContext, + input, + dtype=None, + layout=None, + device=None, + pin_memory=False, + memory_format=None, +): + shape = g.op("Shape", input) + if symbolic_helper._is_none(dtype): + scalar_type = _type_utils.JitScalarType.from_value( + input, _type_utils.JitScalarType.FLOAT + ) + else: + scalar_type = _type_utils.JitScalarType(dtype) + return g.op( + "ConstantOfShape", + shape, + value_t=torch.tensor([0], dtype=scalar_type.dtype()), + ) + + +@_onnx_symbolic("aten::new_zeros") +@_beartype.beartype +def new_zeros( + g: jit_utils.GraphContext, self, sizes, dtype, layout, device, pin_memory=False +): + self_dtype = symbolic_helper._try_get_scalar_type(self) + + if symbolic_helper._is_none(dtype) and self_dtype is not None: + dtype = self_dtype + return zeros(g, sizes, dtype, layout, device, pin_memory) + + +@_onnx_symbolic("aten::zero") +@_beartype.beartype +def zero(g: jit_utils.GraphContext, self): + self_dtype = symbolic_helper._try_get_scalar_type(self) + return zeros_like(g, self, self_dtype) + + +@_onnx_symbolic("aten::ones") +@symbolic_helper.parse_args("v", "i", "v", "v", "v") +@_beartype.beartype +def ones(g: jit_utils.GraphContext, sizes, dtype, layout, device, pin_memory=False): + if dtype is None: + scalar_type = _type_utils.JitScalarType.FLOAT + else: + scalar_type = _type_utils.JitScalarType(dtype) + sizes_ = symbolic_helper._maybe_get_const(sizes, "is") + if isinstance(sizes_, list) and len(sizes_) == 0: + sizes = g.op("Constant", value_t=torch.tensor([]).to(torch.int64)) + return g.op( + "ConstantOfShape", + sizes, + value_t=torch.tensor([1], dtype=scalar_type.dtype()), + ) + + +@_onnx_symbolic("aten::ones_like") +@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v") +@_beartype.beartype +def ones_like( + g: jit_utils.GraphContext, + input, + dtype=None, + layout=None, + device=None, + pin_memory=False, + memory_format=None, +): + shape = g.op("Shape", input) + if symbolic_helper._is_none(dtype): + scalar_type = _type_utils.JitScalarType.from_value( + input, _type_utils.JitScalarType.FLOAT + ) + else: + scalar_type = _type_utils.JitScalarType(dtype) + return g.op( + "ConstantOfShape", + shape, + value_t=torch.tensor([1], dtype=scalar_type.dtype()), + ) + + +@_onnx_symbolic("aten::new_ones") +@_beartype.beartype +def new_ones( + g: jit_utils.GraphContext, self, sizes, dtype, layout, device, pin_memory=False +): + self_dtype = symbolic_helper._try_get_scalar_type(self) + if symbolic_helper._is_none(dtype) and self_dtype is not None: + dtype = self_dtype + return ones(g, sizes, dtype, layout, device, pin_memory) + + +@_onnx_symbolic("aten::full") +@_beartype.beartype +def full( + g: jit_utils.GraphContext, sizes, value, dtype, layout, device, pin_memory=False +): + const_value = symbolic_helper._maybe_get_const(value, "t") + if symbolic_helper._is_value(const_value): + dtype = _type_utils.JitScalarType.FLOAT if dtype is None else dtype + tmp = zeros(g, sizes, dtype, layout, device) + return add(g, tmp, value, g.op("Constant", value_t=torch.tensor(1))) + else: + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + if dtype is None: + scalar_type = _type_utils.JitScalarType.FLOAT + else: + scalar_type = _type_utils.JitScalarType(dtype) + sizes_ = symbolic_helper._maybe_get_const(sizes, "is") + if isinstance(sizes_, list) and len(sizes_) == 0: + sizes = g.op("Constant", value_t=torch.tensor([]).to(torch.int64)) + return g.op( + "ConstantOfShape", + sizes, + value_t=const_value.view(1).to(scalar_type.dtype()), + ) + + +@_onnx_symbolic("aten::full_like") +@_beartype.beartype +def full_like( + g: jit_utils.GraphContext, + input, + fill_value, + dtype=None, + layout=None, + device=None, + pin_memory=False, + memory_format=None, +): + fill_value = symbolic_helper._maybe_get_const(fill_value, "f") + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + if dtype is None: + scalar_type = _type_utils.JitScalarType.from_value( + input, _type_utils.JitScalarType.FLOAT + ) + else: + scalar_type = _type_utils.JitScalarType(dtype) + if symbolic_helper._is_value(fill_value): + tmp = zeros_like(g, input, dtype, layout, device) + fill_value = g.op("Cast", fill_value, to_i=scalar_type.onnx_type()) + return add(g, tmp, fill_value, g.op("Constant", value_t=torch.tensor(1))) + else: + shape = g.op("Shape", input) + return g.op( + "ConstantOfShape", + shape, + value_t=torch.tensor([fill_value], dtype=scalar_type.dtype()), + ) + + +@_onnx_symbolic("aten::new_full") +@_beartype.beartype +def new_full( + g: jit_utils.GraphContext, + self, + size, + fill_value, + dtype, + layout, + device, + pin_memory=False, +): + self_dtype = symbolic_helper._try_get_scalar_type(self) + if symbolic_helper._is_none(dtype) and self_dtype is not None: + dtype = self_dtype + return full(g, size, fill_value, dtype, layout, device, pin_memory) + + +@_onnx_symbolic("aten::eye") +@_beartype.beartype +def eye(g: jit_utils.GraphContext, *args): + if len(args) == 5: + # aten::eye(n, dtype, layout, device, pin_memory) + n, dtype, layout, device, pin_memory = args + dim_size = symbolic_helper._unsqueeze_helper(g, n, [0]) + shape = g.op("Concat", dim_size, dim_size, axis_i=0) + tensor = zeros(g, shape, dtype, layout, device) + return g.op("EyeLike", tensor) + if len(args) == 6: + # aten::eye(n, m, dtype, layout, device, pin_memory) + n, m, dtype, layout, device, pin_memory = args + shape = g.op( + "Concat", + symbolic_helper._unsqueeze_helper(g, n, [0]), + symbolic_helper._unsqueeze_helper(g, m, [0]), + axis_i=0, + ) + tensor = zeros(g, shape, dtype, layout, device) + return g.op("EyeLike", tensor) + + return symbolic_helper._unimplemented("aten::eye", f"with {len(args)} arguments") + + +@_onnx_symbolic("aten::slice") +@_beartype.beartype +def slice(g: jit_utils.GraphContext, self, *args): + if len(args) == 4: + # aten::slice(Tensor self, int dim, int start, int end, int step) -> Tensor + dim, start, end, step = args + step = symbolic_helper._parse_arg(step, "i") + if step != 1: + raise errors.SymbolicValueError("step!=1 is currently not supported", self) + is_start_none = start.node().kind() == "prim::Constant" and isinstance( + start.type(), _C.NoneType + ) + is_end_none = end.node().kind() == "prim::Constant" and isinstance( + end.type(), _C.NoneType + ) + is_start_onnx_const = start.node().kind() == "onnx::Constant" + is_end_onnx_const = end.node().kind() == "onnx::Constant" + if ( + ((not is_start_none) and (not is_start_onnx_const)) + or ((not is_end_none) and (not is_end_onnx_const)) + or dim.node().kind() != "onnx::Constant" + ): + if GLOBALS.operator_export_type == _C_onnx.OperatorExportTypes.ONNX: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of Slice with dynamic inputs. DynamicSlice " + "is a deprecated experimental op. Please use statically allocated " + "variables or export to a higher opset version.", + self, + ) + else: + start_unsqueezed = symbolic_helper._unsqueeze_helper(g, start, [0]) + end_unsqueezed = symbolic_helper._unsqueeze_helper(g, end, [0]) + dim_unsqueezed = symbolic_helper._unsqueeze_helper(g, dim, [0]) + return g.op( + "DynamicSlice", + self, + start_unsqueezed, + end_unsqueezed, + dim_unsqueezed, + ) + else: + start = 0 if is_start_none else symbolic_helper._parse_arg(start, "i") + end = ( + _constants.INT64_MAX + if is_end_none + else symbolic_helper._parse_arg(end, "i") + ) + dim = symbolic_helper._parse_arg(dim, "i") + return symbolic_helper._slice_helper( + g, self, axes=[dim], starts=[start], ends=[end] + ) + elif len(args) == 3: + # aten::slice(t[] l, int start, int end, int step) -> t[] + start, end, step = args + dim = 0 + is_start_none = start.node().kind() == "prim::Constant" and isinstance( + start.type(), _C.NoneType + ) + is_end_none = end.node().kind() == "prim::Constant" and isinstance( + end.type(), _C.NoneType + ) + start = 0 if is_start_none else symbolic_helper._parse_arg(start, "i") + end = ( + _constants.INT64_MAX + if is_end_none + else symbolic_helper._parse_arg(end, "i") + ) + return symbolic_helper._slice_helper( + g, self, axes=[dim], starts=[start], ends=[end] + ) + + return symbolic_helper._unimplemented("aten::slice", f"with {len(args)} arguments") + + +@_onnx_symbolic("aten::hardtanh") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "f", "f") +@_beartype.beartype +def hardtanh(g: jit_utils.GraphContext, self: _C.Value, min_val: float, max_val: float): + return _op_with_optional_float_cast( + g, "Clip", self, min_f=min_val, max_f=max_val, opset_before=12 + ) + + +@_onnx_symbolic("aten::hardswish") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v") +@_beartype.beartype +def hardswish(g: jit_utils.GraphContext, self): + hs = hardsigmoid(g, self) + return g.op("Mul", self, hs) + + +@_onnx_symbolic("aten::hardsigmoid") +# Fixed scale and zero_point, discovered from aten/src/ATen/native/quantized/cpu/qhardsigmoid.cpp +@symbolic_helper.quantized_args(True, scale=1.0 / 256.0, zero_point=0) +@symbolic_helper.parse_args("v") +@_beartype.beartype +def hardsigmoid(g: jit_utils.GraphContext, self): + # Set alpha_f to 1 / 6 to make op equivalent to PyTorch's definition of Hardsigmoid. + # See https://pytorch.org/docs/stable/generated/torch.nn.Hardsigmoid.html + return g.op("HardSigmoid", self, alpha_f=1 / 6) + + +@_onnx_symbolic("aten::tanhshrink") +@symbolic_helper.parse_args("v") +@_beartype.beartype +def tanhshrink(g: jit_utils.GraphContext, self): + return g.op("Sub", self, tanh(g, self)) + + +@_onnx_symbolic("aten::hardshrink") +@symbolic_helper.parse_args("v", "f") +@_beartype.beartype +def hardshrink(g: jit_utils.GraphContext, self, lambd): + scalar_type = _type_utils.JitScalarType.from_value( + self, _type_utils.JitScalarType.FLOAT + ) + lambd_op = g.op( + "Constant", + value_t=torch.tensor(lambd, dtype=scalar_type.dtype()), + ) + cond = logical_or(g, gt(g, self, lambd_op), lt(g, self, neg(g, lambd_op))) + return g.op( + "Where", + cond, + self, + g.op( + "Constant", + value_t=torch.tensor(0, dtype=scalar_type.dtype()), + ), + ) + + +@_onnx_symbolic("aten::softshrink") +@symbolic_helper.parse_args("v", "f") +@_beartype.beartype +def softshrink(g: jit_utils.GraphContext, self, lambd): + scalar_type = _type_utils.JitScalarType.from_value( + self, _type_utils.JitScalarType.FLOAT + ) + lambd_op = g.op( + "Constant", + value_t=torch.tensor(lambd, dtype=scalar_type.dtype()), + ) + gt_cond = gt(g, self, lambd_op) + gt_out = g.op( + "Where", + gt_cond, + sub(g, self, lambd_op), + g.op( + "Constant", + value_t=torch.tensor(0, dtype=scalar_type.dtype()), + ), + ) + lt_cond = lt(g, self, neg(g, lambd_op)) + lt_out = g.op( + "Where", + lt_cond, + add(g, self, lambd_op), + g.op( + "Constant", + value_t=torch.tensor(0, dtype=scalar_type.dtype()), + ), + ) + return add(g, gt_out, lt_out) + + +@_onnx_symbolic("aten::alias") +@_beartype.beartype +def alias(g: jit_utils.GraphContext, self): + return self + + +@_onnx_symbolic("aten::unsqueeze") +@symbolic_helper.parse_args("v", "i") +@_beartype.beartype +def unsqueeze(g: jit_utils.GraphContext, self, dim): + # Handle negative dim + if dim < 0: + rank = symbolic_helper._get_tensor_rank(self) + if rank is not None: + warnings.warn( + "ONNX export unsqueeze with negative axis " + + str(dim) + + " might cause the onnx model to be incorrect. " + + "Negative axis is not supported in ONNX. " + + "Axis is converted to " + + str(dim + rank + 1) + + " based on input shape at export time. " + + "Passing an tensor of different rank in execution will be incorrect." + ) + dim = dim + rank + 1 + else: + return symbolic_helper._unimplemented( + "unsqueeze", "negative axis with unknown input rank", self + ) + + return symbolic_helper._unsqueeze_helper(g, self, axes_i=[dim]) + + +@_onnx_symbolic("aten::sort") +# TODO(justinchuby): Support multiple quantized args in output +@symbolic_helper.parse_args("v", "i", "i", "none") +@_beartype.beartype +def sort(g: jit_utils.GraphContext, self, dim, decending, out=None): + if out is not None: + symbolic_helper._unimplemented( + "Sort", "Out parameter is not supported for sort", self + ) + self_sizes = symbolic_helper._get_tensor_sizes(self) + try: + dim_size = self_sizes[dim] + except Exception: + # FIXME(justinchuby): Avoid catching Exception. + # Catch a more specific exception instead. + dim_size = None + + if dim_size is None: + return symbolic_helper._unimplemented("Sort", "input size not accessible", self) + + return g.op("TopK", self, k_i=dim_size, axis_i=dim, outputs=2) + + +@_onnx_symbolic("aten::numel") +@_beartype.beartype +def numel(g: jit_utils.GraphContext, self): + shape = g.op("Shape", self) + return g.op("ReduceProd", shape, keepdims_i=0) + + +@_onnx_symbolic("aten::topk") +# TODO(justinchuby): Support multiple quantized args in output +@symbolic_helper.parse_args("v", "i", "i", "i", "i", "none") +@_beartype.beartype +def topk(g: jit_utils.GraphContext, self, k, dim, largest, sorted, out=None): + if out is not None: + symbolic_helper._unimplemented( + "TopK", "Out parameter is not supported for topk", self + ) + if not largest: + symbolic_helper._unimplemented("TopK", "Ascending TopK is not supported", self) + + return g.op("TopK", self, k_i=k, axis_i=dim, outputs=2) + + +@_onnx_symbolic("prim::convert_element_type") +@_beartype.beartype +def convert_element_type(g: jit_utils.GraphContext, self, *args): + dtype = symbolic_helper._get_const(args[0], "i", "dtype") + return g.op("Cast", self, to_i=_type_utils.JitScalarType(dtype).onnx_type()) + + +@_onnx_symbolic("aten::to") +@_beartype.beartype +def to(g: jit_utils.GraphContext, self, *args): + @_beartype.beartype + def is_aten_to_device_only(args): + if len(args) == 4: + # aten::to(Tensor, Device, bool, bool, memory_format) + return ( + args[0].node().kind() == "prim::device" + or args[0].type().isSubtypeOf(_C.ListType.ofInts()) + or isinstance(args[0].type(), _C.DeviceObjType) + ) + elif len(args) == 5: + # aten::to(Tensor, Device, ScalarType, bool, bool, memory_format) + # When dtype is None, this is a aten::to(device) call + dtype = symbolic_helper._get_const(args[1], "i", "dtype") + return dtype is None + elif len(args) in (6, 7): + # aten::to(Tensor, ScalarType, Layout, Device, bool, bool, memory_format) -> Tensor + # aten::to(Tensor, ScalarType, Layout, Device, bool, bool, bool, memory_format) -> Tensor + # When dtype is None, this is a aten::to(device) call + dtype = symbolic_helper._get_const(args[0], "i", "dtype") + return dtype is None + return False + + # ONNX doesn't have a concept of a device, so we ignore device-only casts + if is_aten_to_device_only(args): + return self + + if len(args) == 4: + # TestONNXRuntime::test_ones_bool shows args[0] of aten::to() can be onnx::Constant[value=]() + # In this case, the constant value is a tensor not int, + # so symbolic_helper._maybe_get_const(args[0], 'i') would not work. + dtype = args[0] + if ( + symbolic_helper._is_value(args[0]) + and args[0].node().kind() == "onnx::Constant" + ): + tval = symbolic_helper._node_get(args[0].node(), "value") + if isinstance(tval, torch.Tensor): + if len(tval.shape) == 0: + tval = tval.item() + dtype = int(tval) + else: + dtype = tval + + if symbolic_helper._is_value(dtype) or isinstance(dtype, torch.Tensor): + # aten::to(Tensor, Tensor, bool, bool, memory_format) + dtype = _type_utils.JitScalarType.from_value(args[0]) + return g.op( + "Cast", + self, + to_i=dtype.onnx_type(), + ) + else: + # aten::to(Tensor, ScalarType, bool, bool, memory_format) + # memory_format is ignored + return g.op("Cast", self, to_i=_type_utils.JitScalarType(dtype).onnx_type()) + elif len(args) == 5: + # aten::to(Tensor, Device, ScalarType, bool, bool, memory_format) + dtype = symbolic_helper._get_const(args[1], "i", "dtype") + # memory_format is ignored + return g.op("Cast", self, to_i=_type_utils.JitScalarType(dtype).onnx_type()) + elif len(args) == 6: + # aten::to(Tensor, ScalarType, Layout, Device, bool, bool, memory_format) -> Tensor + dtype = symbolic_helper._get_const(args[0], "i", "dtype") + # Layout, device and memory_format are ignored + return g.op("Cast", self, to_i=_type_utils.JitScalarType(dtype).onnx_type()) + elif len(args) == 7: + # aten::to(Tensor, ScalarType, Layout, Device, bool, bool, bool, memory_format) -> Tensor + dtype = symbolic_helper._get_const(args[0], "i", "dtype") + # Layout, device and memory_format are ignored + return g.op("Cast", self, to_i=_type_utils.JitScalarType(dtype).onnx_type()) + + return symbolic_helper._onnx_unsupported("Unknown aten::to signature", self) + + +@_onnx_symbolic("aten::repeat") +@_beartype.beartype +def repeat(g: jit_utils.GraphContext, self, repeats): + dtype = _type_utils.JitScalarType.INT64 + shape_ = ones_like(g, repeats, dtype) + self = g.op("Expand", self, shape_) + return g.op("Tile", self, repeats) + + +@_onnx_symbolic("aten::repeat_interleave") +@_beartype.beartype +def repeat_interleave( + g: jit_utils.GraphContext, self, repeats, dim=None, output_size=None +): + input = self + # if dim is None flatten + # By default, use the flattened input array, and return a flat output array + if symbolic_helper._is_none(dim): + input = symbolic_helper._reshape_helper( + g, self, g.op("Constant", value_t=torch.tensor([-1])) + ) + dim = torch.tensor(0, dtype=torch.int64) + else: + dim = symbolic_helper._maybe_get_scalar(dim) + + repeats_dim = symbolic_helper._get_tensor_rank(repeats) + repeats_sizes = symbolic_helper._get_tensor_sizes(repeats) + input_sizes = symbolic_helper._get_tensor_sizes(input) + if repeats_dim is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of repeat_interleave for unknown repeats rank.", + input, + ) + if repeats_sizes is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of repeat_interleave for unknown repeats size.", + input, + ) + if input_sizes is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of repeat_interleave for unknown input size.", + input, + ) + + # Handle cases where dim is negative + if dim < 0: + dim += len(input_sizes) + + input_sizes_temp = input_sizes.copy() + for idx, input_size in enumerate(input_sizes): + if input_size is None: + input_sizes[idx], input_sizes_temp[idx] = 0, -1 + + # Cases where repeats is an int or single value tensor + if repeats_dim == 0 or (repeats_dim == 1 and repeats_sizes[0] == 1): + if input_sizes[dim] == 0: + return symbolic_helper._onnx_opset_unsupported_detailed( + "repeat_interleave", + 9, + 13, + "Unsupported along dimension with unknown input size", + self, + ) + return symbolic_helper._repeat_interleave_single_value_repeat_helper( + g, self, repeats, dim + ) + + # Cases where repeats is a 1 dim Tensor + elif repeats_dim == 1: + if input_sizes[dim] == 0: + return symbolic_helper._onnx_opset_unsupported_detailed( + "repeat_interleave", + 9, + 13, + "Unsupported along dimension with unknown input size", + self, + ) + if repeats_sizes[0] is None: + return symbolic_helper._onnx_opset_unsupported_detailed( + "repeat_interleave", + 9, + 13, + "Unsupported for cases with dynamic repeats", + self, + ) + assert ( + repeats_sizes[0] == input_sizes[dim] + ), "repeats must have the same size as input along dim" + reps = repeats_sizes[0] + else: + raise errors.SymbolicValueError("repeats must be 0-dim or 1-dim tensor", self) + + final_splits = list() + r_splits = symbolic_helper._repeat_interleave_split_helper(g, repeats, reps, 0) + i_splits = symbolic_helper._repeat_interleave_split_helper(g, input, reps, dim) + input_sizes[dim], input_sizes_temp[dim] = -1, 1 + for idx, r_split in enumerate(r_splits): + i_split = unsqueeze(g, i_splits[idx], dim + 1) + r_concat = [ + g.op("Constant", value_t=torch.LongTensor(input_sizes_temp[: dim + 1])), + r_split, + g.op("Constant", value_t=torch.LongTensor(input_sizes_temp[dim + 1 :])), + ] + r_concat = g.op("Concat", *r_concat, axis_i=0) + i_split = expand(g, i_split, r_concat, None) + i_split = symbolic_helper._reshape_helper( + g, + i_split, + g.op("Constant", value_t=torch.LongTensor(input_sizes)), + allowzero=0, + ) + final_splits.append(i_split) + return g.op("Concat", *final_splits, axis_i=dim) + + +@_onnx_symbolic("aten::pixel_shuffle") +@symbolic_helper.parse_args("v", "i") +@_beartype.beartype +def pixel_shuffle(g: jit_utils.GraphContext, self, upscale_factor): + dims = symbolic_helper._get_tensor_sizes(self) + if len(dims) != 4: + return symbolic_helper._unimplemented( + "pixel_shuffle", "only support 4d input", self + ) + if any(i is None for i in dims[1:]): + after_view = symbolic_helper._reshape_helper( + g, + symbolic_helper._unsqueeze_helper(g, self, [2, 3]), + g.op( + "Constant", + value_t=torch.tensor([0, -1, upscale_factor, upscale_factor, 0, 0]), + ), + allowzero=0, + ) + after_transpose = g.op("Transpose", after_view, perm_i=[0, 1, 4, 2, 5, 3]) + # For dynamic input shapes, two reshapes are performed + reshape_h = symbolic_helper._reshape_helper( + g, + after_transpose, + g.op("Constant", value_t=torch.tensor([0, 0, -1, 1, 0, 0])), + allowzero=0, + ) + reshape_w = symbolic_helper._reshape_helper( + g, + reshape_h, + g.op("Constant", value_t=torch.tensor([0, 0, 0, 0, -1, 1])), + allowzero=0, + ) + return symbolic_helper._squeeze_helper(g, reshape_w, [3, 5]) + else: + output_channel = dims[1] // upscale_factor // upscale_factor + after_view = symbolic_helper._reshape_helper( + g, + self, + g.op( + "Constant", + value_t=torch.tensor( + [ + -1, + output_channel, + upscale_factor, + upscale_factor, + dims[2], + dims[3], + ] + ), + ), + allowzero=0, + ) + after_transpose = g.op("Transpose", after_view, perm_i=[0, 1, 4, 2, 5, 3]) + return symbolic_helper._reshape_helper( + g, + after_transpose, + g.op( + "Constant", + value_t=torch.tensor( + [ + -1, + output_channel, + dims[2] * upscale_factor, + dims[3] * upscale_factor, + ] + ), + ), + allowzero=0, + ) + + +@_onnx_symbolic("aten::pixel_unshuffle") +@symbolic_helper.parse_args("v", "i") +@_beartype.beartype +def pixel_unshuffle(g: jit_utils.GraphContext, self, downscale_factor): + dims = symbolic_helper._get_tensor_sizes(self) + if len(dims) != 4: + return symbolic_helper._unimplemented( + "pixel_shuffle", "only support 4d input", self + ) + if any(i is None for i in dims[1:]): + # For dynamic input shapes, two reshapes are performed + reshape_h = symbolic_helper._reshape_helper( + g, + symbolic_helper._unsqueeze_helper(g, self, [3]), + g.op("Constant", value_t=torch.tensor([0, 0, -1, downscale_factor, 0])), + allowzero=0, + ) + reshape_w = symbolic_helper._reshape_helper( + g, + reshape_h, + g.op("Constant", value_t=torch.tensor([0, 0, 0, 0, -1, downscale_factor])), + allowzero=0, + ) + after_transpose = g.op("Transpose", reshape_w, perm_i=[0, 1, 3, 5, 2, 4]) + final_reshape = symbolic_helper._reshape_helper( + g, + after_transpose, + g.op("Constant", value_t=torch.tensor([0, -1, 1, 1, 0, 0])), + allowzero=0, + ) + return symbolic_helper._squeeze_helper(g, final_reshape, [2, 3]) + else: + output_channel = dims[1] * downscale_factor * downscale_factor + after_view = symbolic_helper._reshape_helper( + g, + self, + g.op( + "Constant", + value_t=torch.tensor( + [ + -1, + dims[1], + dims[2] // downscale_factor, + downscale_factor, + dims[3] // downscale_factor, + downscale_factor, + ] + ), + ), + allowzero=0, + ) + after_transpose = g.op("Transpose", after_view, perm_i=[0, 1, 3, 5, 2, 4]) + return symbolic_helper._reshape_helper( + g, + after_transpose, + g.op( + "Constant", + value_t=torch.tensor( + [ + -1, + output_channel, + dims[2] // downscale_factor, + dims[3] // downscale_factor, + ] + ), + ), + allowzero=0, + ) + + +@_beartype.beartype +def _generic_rnn( + g: jit_utils.GraphContext, + variant, + input, + initial_states, + all_weights, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_first=None, + batch_sizes=None, +): + warnings.warn( + "Exporting a model to ONNX with a batch_size other than 1, " + + "with a variable length with " + + variant + + " can cause an error " + + "when running the ONNX model with a different batch size. " + + "Make sure to save the model with a batch size of 1, " + + "or define the initial states (h0/c0) as inputs of the model. " + ) + + onnxActivations = [ + "Relu", + "Tanh", + "Sigmoid", + "Affine", + "LeakyRelu", + "ThresholdedRelu", + "ScaledTanh", + "HardSigmoid", + "Elu", + "Softsign", + "Softplus", + ] + variantToOnnxActivationMap = dict( + zip([act_fun.lower() for act_fun in onnxActivations], onnxActivations) + ) + weights_per_layer = 4 if has_biases else 2 + # this means that projections are used inside LSTM, so need to tell user that it's not supported + if variant == "LSTM" and len(all_weights) != num_layers * weights_per_layer * ( + 1 + bidirectional + ): + return symbolic_helper._unimplemented("LSTM", "LSTMs with projections", input) + assert len(all_weights) == num_layers * weights_per_layer * (1 + bidirectional) + layer_weights = [ + all_weights[i : i + weights_per_layer] + for i in range(0, len(all_weights), weights_per_layer) + ] + if batch_first: + # batch, seq, feat -> seq, batch, feat + input = g.op("Transpose", input, perm_i=[1, 0, 2]) + if dropout and train: + return symbolic_helper._unimplemented( + "RNN/GRU/LSTM", "dropout in training mode", input + ) + + if variant.startswith("RNN"): + nonlinearity = variantToOnnxActivationMap[variant[4:].lower()] + variant = "RNN" + + w_hh = all_weights[1] + hidden_size = symbolic_helper._get_tensor_dim_size(w_hh, 1) + if hidden_size is None: + return symbolic_helper._unimplemented( + "RNN/GRU/LSTM", "unknown hidden size", input + ) + + unidirectional = not bidirectional + + prev_output = input + + h_outs = [] + if variant == "RNN" or variant == "GRU": + h0 = initial_states + elif variant == "LSTM": + h0, c0 = initial_states + c_outs = [] + + sequence_lens = unused(g) if batch_sizes is None else batch_sizes + + if variant == "GRU": + # pytorch is reset, input, hidden + # onnx is input, reset, hidden + reform_permutation = [(1, 2), (0, 1), (2, 3)] + elif variant == "LSTM": + # pytorch is input, forget, cell, output. + # onnx is input, output, forget, cell. + reform_permutation = [(0, 1), (3, 4), (1, 3)] + + @_beartype.beartype + def reform_weights(g, w, n, intervals): + slices = [ + symbolic_helper._slice_helper(g, w, axes=[0], starts=[x * n], ends=[y * n]) + for x, y in intervals + ] + return g.op("Concat", *slices, axis_i=0) + + @_beartype.beartype + def transform_weights_no_bias(layer_index): + weights = layer_weights[layer_index] + if variant == "RNN": + weight_ih, weight_hh = weights + elif variant == "GRU" or variant == "LSTM": + weight_ih, weight_hh = ( + reform_weights(g, w, hidden_size, reform_permutation) for w in weights + ) + return tuple( + symbolic_helper._unsqueeze_helper(g, x, [0]) for x in (weight_ih, weight_hh) + ) + + @_beartype.beartype + def transform_weights(layer_index): + weights = layer_weights[layer_index] + if variant == "RNN": + weight_ih, weight_hh, bias_ih, bias_hh = weights + elif variant == "GRU" or variant == "LSTM": + weight_ih, weight_hh, bias_ih, bias_hh = ( + reform_weights(g, w, hidden_size, reform_permutation) for w in weights + ) + bias_concat = g.op("Concat", bias_ih, bias_hh, axis_i=0) + return tuple( + symbolic_helper._unsqueeze_helper(g, x, [0]) + for x in (weight_ih, weight_hh, bias_concat) + ) + + @_beartype.beartype + def retrieve_state(x, start, end): + return ( + x + if num_layers == 1 + else symbolic_helper._slice_helper( + g, x, axes=[0], starts=[start], ends=[end] + ) + ) + + for i in range(num_layers): + if unidirectional: + if weights_per_layer == 4: + weight_ih, weight_hh, bias_concat = transform_weights(i) + else: + weight_ih, weight_hh = transform_weights_no_bias(i) + bias_concat = unused(g) + + state_indices = i, i + 1 + else: + if weights_per_layer == 4: + weight_ih_f, weight_hh_f, bias_f = transform_weights(2 * i) + weight_ih_b, weight_hh_b, bias_b = transform_weights(2 * i + 1) + bias_concat = g.op("Concat", bias_f, bias_b, axis_i=0) + else: + weight_ih_f, weight_hh_f = transform_weights_no_bias(2 * i) + weight_ih_b, weight_hh_b = transform_weights_no_bias(2 * i + 1) + bias_concat = unused(g) + + weight_ih = g.op("Concat", weight_ih_f, weight_ih_b, axis_i=0) + weight_hh = g.op("Concat", weight_hh_f, weight_hh_b, axis_i=0) + + state_indices = 2 * i, 2 * i + 2 + + inputs = [prev_output, weight_ih, weight_hh, bias_concat, sequence_lens] + + inputs.append(retrieve_state(h0, *state_indices)) + if variant == "LSTM": + inputs.append(retrieve_state(c0, *state_indices)) + + extra_kwargs = {} if unidirectional else {"direction_s": "bidirectional"} + if variant == "RNN": + if bidirectional: + activation = [nonlinearity, nonlinearity] + else: + activation = [nonlinearity] + + prev_output, h_out = g.op( + "RNN", + *inputs, + outputs=2, + hidden_size_i=hidden_size, + activations_s=activation, + **extra_kwargs, + ) + elif variant == "GRU": + prev_output, h_out = g.op( + "GRU", + *inputs, + outputs=2, + hidden_size_i=hidden_size, + linear_before_reset_i=1, + **extra_kwargs, + ) + elif variant == "LSTM": + prev_output, h_out, c_out = g.op( + "LSTM", *inputs, outputs=3, hidden_size_i=hidden_size, **extra_kwargs + ) + + if bidirectional: + # The ONNX RNN/GRU/LSTM produce an output of dimensions + # seq_len, num_directions, batch, hidden_size + # We have to convert to match pytorch's expected + # seq_len, batch, num_directions * hidden_size + # by first moving num_directions before hidden_size with + # Transpose, and then combining it with hidden_size + # with Reshape. + prev_output = g.op("Transpose", prev_output, perm_i=[0, 2, 1, 3]) + prev_output = symbolic_helper._reshape_helper( + g, + prev_output, + g.op("Constant", value_t=torch.LongTensor([0, 0, -1])), + allowzero=0, + ) + else: + prev_output = symbolic_helper._squeeze_helper(g, prev_output, [1]) + + h_outs.append(h_out) + if variant == "LSTM": + c_outs.append(c_out) + if batch_first: + # seq, batch, num_directions * hidden_size -> batch, seq, num_directions * hidden_size + prev_output = g.op("Transpose", prev_output, perm_i=[1, 0, 2]) + h_outs = h_out if num_layers == 1 else g.op("Concat", *h_outs, axis_i=0) + if variant == "RNN" or variant == "GRU": + return prev_output, h_outs + elif variant == "LSTM": + c_outs = c_out if num_layers == 1 else g.op("Concat", *c_outs, axis_i=0) + return prev_output, h_outs, c_outs + + +@symbolic_helper.parse_args("v", "v", "v", "i", "i", "f", "i", "i", "i") +@_beartype.beartype +def _lstm_full( + g: jit_utils.GraphContext, + input, + hidden_v, + weight_v, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_first, +): + hidden, weight = symbolic_helper._unpack_list( + hidden_v + ), symbolic_helper._unpack_list(weight_v) + return _generic_rnn( + g, + "LSTM", + input, + hidden, + weight, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_first, + ) + + +@symbolic_helper.parse_args("v", "v", "v", "v", "i", "i", "f", "i", "i") +@_beartype.beartype +def _lstm_packed( + g: jit_utils.GraphContext, + input, + batch_sizes, + hidden_v, + weight_v, + has_biases, + num_layers, + dropout, + train, + bidirectional, +): + hidden, weight = symbolic_helper._unpack_list( + hidden_v + ), symbolic_helper._unpack_list(weight_v) + return _generic_rnn( + g, + "LSTM", + input, + hidden, + weight, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_sizes=batch_sizes, + ) + + +@_onnx_symbolic("aten::lstm") +@_beartype.beartype +def lstm(g: jit_utils.GraphContext, *args): + if symbolic_helper._is_tensor_list(args[3]): + return _lstm_packed(g, *args) + else: + return _lstm_full(g, *args) + + +@_onnx_symbolic("aten::lstm_cell") +@_beartype.beartype +def lstm_cell(g: jit_utils.GraphContext, self, hidden, w_ih, w_hh, b_ih, b_hh): + input = symbolic_helper._unsqueeze_helper(g, self, [0]) + hidden = symbolic_helper._unpack_list(hidden) + hidden = [symbolic_helper._unsqueeze_helper(g, x, [0]) for x in hidden] + weight = ( + (w_ih, w_hh, b_ih, b_hh) if symbolic_helper._is_tensor(b_ih) else (w_ih, w_hh) + ) + has_biases = True if symbolic_helper._is_tensor(b_ih) else False + _, h_outs, c_outs = _generic_rnn( + g, + "LSTM", + input, + hidden, + weight, + has_biases, + num_layers=1, + dropout=0, + train=0, + bidirectional=False, + batch_first=False, + ) + return symbolic_helper._squeeze_helper( + g, h_outs, [0] + ), symbolic_helper._squeeze_helper(g, c_outs, [0]) + + +@_onnx_symbolic("aten::gru", decorate=[_apply_params("GRU"), _export("gru")]) +@_onnx_symbolic( + "aten::rnn_tanh", decorate=[_apply_params("RNN_TANH"), _export("rnn_tanh")] +) +@_onnx_symbolic( + "aten::rnn_relu", decorate=[_apply_params("RNN_RELU"), _export("rnn_relu")] +) +def _one_hidden_rnn(kind: str): + @symbolic_helper.parse_args("v", "v", "v", "i", "i", "f", "i", "i", "i") + @_beartype.beartype + def _rnn_full( + g, + input, + hidden, + weight_v, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_first, + ): + weight = symbolic_helper._unpack_list(weight_v) + return _generic_rnn( + g, + kind, + input, + hidden, + weight, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_first, + ) + + @symbolic_helper.parse_args("v", "v", "v", "v", "i", "i", "f", "i", "i") + def _rnn_packed( + g, + input, + batch_sizes, + hidden, + weight_v, + has_biases, + num_layers, + dropout, + train, + bidirectional, + ): + weight = symbolic_helper._unpack_list(weight_v) + return _generic_rnn( + g, + kind, + input, + hidden, + weight, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_sizes=batch_sizes, + ) + + def symbolic(g, *args): + if symbolic_helper._is_tensor_list(args[3]): + return _rnn_packed(g, *args) + else: + return _rnn_full(g, *args) + + return symbolic + + +@_onnx_symbolic("aten::_dim_arange") +@symbolic_helper.parse_args("v", "i") +@_beartype.beartype +def _dim_arange(g: jit_utils.GraphContext, like, dim): + like_shape = g.op("Shape", like) + stop = g.op( + "Gather", like_shape, g.op("Constant", value_t=torch.tensor(dim)), axis_i=0 + ) + if symbolic_helper.is_caffe2_aten_fallback(): + return g.op("_caffe2::Range", stop) + else: + # aten::arange(Scalar end, ScalarType dtype, Layout, Device, bool pin_memory) + return arange(g, stop, 4, None, None, None) + + +@_onnx_symbolic("aten::detach") +@_beartype.beartype +def detach(g: jit_utils.GraphContext, input): + # Erase aten::detach nodes because ONNX is inference only + return input + + +@_onnx_symbolic("aten::contiguous") +@symbolic_helper.parse_args("v", "i") +@_beartype.beartype +def contiguous(g: jit_utils.GraphContext, input, memory_format): + if memory_format > 2: # allower values are any, preserve and contiguous_format + raise errors.SymbolicValueError( + "onnx memory_format support is not implemented", input + ) + return input + + +@_onnx_symbolic("aten::_pack_padded_sequence") +@symbolic_helper.parse_args("v", "v", "i") +@_beartype.beartype +def _pack_padded_sequence(g: jit_utils.GraphContext, input, lengths, batch_first): + # Currently there is no PackPadded operator in ONNX. We rely on an + # optimization pass to remove this later. It is an error if all + # PackPadded operators cannot be optimized out. + if batch_first: + input = g.op("Transpose", input, perm_i=[1, 0, 2]) + if not lengths.type().isSubtypeOf(torch._C.TensorType.get()): + raise errors.SymbolicValueError( + "'lengths' must be a Tensor for ONNX export", input + ) + # We know it's a TensorType so this check is now safe. + # It's really only necessary because those operators expand to something that + # only works with int32 types in Caffe2... + if ( + _type_utils.JitScalarType.from_value( + lengths, _type_utils.JitScalarType.UNDEFINED + ) + != _type_utils.JitScalarType.INT + ): + lengths = g.op("Cast", lengths, to_i=_C_onnx.TensorProtoDataType.INT32) + return g.op("prim::PackPadded", input, lengths, outputs=2) + + +@_onnx_symbolic("aten::_pad_packed_sequence") +@symbolic_helper.parse_args("v", "v", "i", "t", "v") +@_beartype.beartype +def _pad_packed_sequence( + g: jit_utils.GraphContext, + data, + batch_sizes, + batch_first, + padding_value, + total_length, +): + # Ignore total_length as it is not supported in _symbolic_pad_packed_sequence + # It is only useful/used when training using data_parallel model, so + # It shouldn't be relevant for ONNX anyway + data, lengths = g.op("prim::PadPacked", data, batch_sizes, outputs=2) + if batch_first: + data = g.op("Transpose", data, perm_i=[1, 0, 2]) + return data, lengths + + +@_onnx_symbolic("aten::randint") +@_beartype.beartype +def randint(g: jit_utils.GraphContext, low, high, shapes, dtype, *options): + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + low_i = symbolic_helper._get_const(low, "i", "low") + high_i = symbolic_helper._get_const(high, "i", "high") + if dtype is None: + scalar_type = _type_utils.JitScalarType.INT64 + else: + scalar_type = _type_utils.JitScalarType(dtype) + if low_i is None: + raise symbolic_helper._onnx_unsupported("randint", low) + if high_i is None: + raise symbolic_helper._onnx_unsupported("randint", high) + + shape = symbolic_helper._maybe_get_const(shapes, "is") + if symbolic_helper._is_value(shape): + shape_const = g.op( + "ConstantOfShape", + shapes, + value_t=torch.tensor([0], dtype=torch.float), + ) + randn = g.op( + "RandomUniformLike", + shape_const, + low_f=low_i, + high_f=high_i, + ) + else: + randn = g.op( + "RandomUniform", + shape_i=shape, + low_f=low_i, + high_f=high_i, + ) + + # cast to integer type + int_dtype = _type_utils.JitScalarType.INT64 + randint = g.op("Cast", randn, to_i=int_dtype.onnx_type()) + if int_dtype != scalar_type: + randint = g.op("Cast", randint, to_i=scalar_type.onnx_type()) + return randint + + +@_onnx_symbolic("aten::randint_like") +@_beartype.beartype +def randint_like(g: jit_utils.GraphContext, self, low, high, dtype, *options): + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + low_i = symbolic_helper._get_const(low, "i", "low") + high_i = symbolic_helper._get_const(high, "i", "high") + if dtype is None: + scalar_type = _type_utils.JitScalarType.INT64 + else: + scalar_type = _type_utils.JitScalarType(dtype) + if low_i is None: + raise symbolic_helper._onnx_unsupported("randint", low) + if high_i is None: + raise symbolic_helper._onnx_unsupported("randint", high) + + randn = g.op( + "RandomUniformLike", + self, + low_f=low_i, + high_f=high_i, + ) + + # cast to integer type + int_dtype = _type_utils.JitScalarType.INT64 + randint = g.op("Cast", randn, to_i=int_dtype.onnx_type()) + if int_dtype != scalar_type: + randint = g.op("Cast", randint, to_i=scalar_type.onnx_type()) + return randint + + +@_onnx_symbolic("aten::randn") +@_beartype.beartype +def randn(g: jit_utils.GraphContext, shapes, dtype, *options): + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + if dtype is None: + scalar_type = _type_utils.JitScalarType.FLOAT + else: + scalar_type = _type_utils.JitScalarType(dtype) + shape = symbolic_helper._maybe_get_const(shapes, "is") + if symbolic_helper._is_value(shape): + shape_const = g.op( + "ConstantOfShape", + shapes, + value_t=torch.tensor([0], dtype=torch.float), + ) + return g.op( + "RandomNormalLike", + shape_const, + dtype_i=scalar_type.onnx_type(), + ) + return g.op( + "RandomNormal", + shape_i=shape, + dtype_i=scalar_type.onnx_type(), + ) + + +@_onnx_symbolic("aten::rand") +@_beartype.beartype +def rand(g: jit_utils.GraphContext, shapes, dtype, *options): + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + if dtype is None: + scalar_type = _type_utils.JitScalarType.FLOAT + else: + scalar_type = _type_utils.JitScalarType(dtype) + shape = symbolic_helper._maybe_get_const(shapes, "is") + if symbolic_helper._is_value(shape): + shape_const = g.op( + "ConstantOfShape", + shapes, + value_t=torch.tensor([0], dtype=torch.float), + ) + return g.op( + "RandomUniformLike", + shape_const, + dtype_i=scalar_type.onnx_type(), + ) + return g.op( + "RandomUniform", + shape_i=shape, + dtype_i=scalar_type.onnx_type(), + ) + + +@_onnx_symbolic("aten::randn_like") +@_beartype.beartype +def randn_like( + g: jit_utils.GraphContext, + self, + dtype, + layout=None, + device=None, + pin_memory=False, + memory_format=None, +): + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + if dtype is None: + scalar_type = _type_utils.JitScalarType.from_value( + self, _type_utils.JitScalarType.FLOAT + ) + else: + scalar_type = _type_utils.JitScalarType(dtype) + return g.op("RandomNormalLike", self, dtype_i=scalar_type.onnx_type()) + + +@_onnx_symbolic("aten::rand_like") +@_beartype.beartype +def rand_like( + g: jit_utils.GraphContext, + self, + dtype, + layout=None, + device=None, + pin_memory=False, + memory_format=None, +): + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + if dtype is None: + dtype = _type_utils.JitScalarType.from_value( + self, _type_utils.JitScalarType.FLOAT + ) + return g.op( + "RandomUniformLike", self, dtype_i=_type_utils.JitScalarType(dtype).onnx_type() + ) + + +@_onnx_symbolic("aten::rrelu") +@symbolic_helper.parse_args("v", "f", "f", "i", "none") +@_beartype.beartype +def rrelu(g: jit_utils.GraphContext, input, lower, upper, training, generator): + if not training: + slope = (upper + lower) / 2.0 + return g.op("LeakyRelu", input, alpha_f=slope) + p = g.op("RandomUniformLike", input, high_f=upper, low_f=lower) + return g.op("PRelu", input, p) + + +@_onnx_symbolic("aten::bernoulli") +@_beartype.beartype +def bernoulli(g: jit_utils.GraphContext, input, p=None, generator=None, out=None): + if out is not None and not symbolic_helper._is_none(out): + symbolic_helper._unimplemented( + "Bernoulli", "out parameter is not supported for bernoulli", input + ) + if generator is not None and not symbolic_helper._is_none(generator): + symbolic_helper._unimplemented( + "Bernoulli", "generator is not supported for bernoulli", input + ) + + dtype = _type_utils.JitScalarType.from_value( + input, _type_utils.JitScalarType.UNDEFINED + ) + if dtype == _type_utils.JitScalarType.UNDEFINED: + return symbolic_helper._unimplemented( + "Bernoulli", "input dtype not accessible", input + ) + + rands = g.op( + "RandomUniformLike", + input, + high_f=1.0, + low_f=0.0, + dtype_i=dtype.onnx_type(), + ) + prob = p if p is not None and not symbolic_helper._is_none(p) else input + output = g.op("Less", rands, prob) + return g.op("Cast", output, to_i=dtype.onnx_type()) + + +@_onnx_symbolic("aten::log_sigmoid") +@symbolic_helper.parse_args("v") +@_beartype.beartype +def log_sigmoid(g: jit_utils.GraphContext, input): + p = g.op("Sigmoid", input) + return g.op("Log", p) + + +@_onnx_symbolic("aten::erf") +@symbolic_helper.parse_args("v") +@_beartype.beartype +def erf(g: jit_utils.GraphContext, input): + return g.op("Erf", input) + + +@_onnx_symbolic("aten::flatten") +@symbolic_helper.quantized_args(True, False, False) +@symbolic_helper.parse_args("v", "i", "i") +@_beartype.beartype +def flatten(g: jit_utils.GraphContext, input, start_dim, end_dim): + dim = symbolic_helper._get_tensor_rank(input) + if dim is None: + return symbolic_helper._unimplemented( + "dim", + "ONNX and PyTorch use different strategies to split the input. " + "Input rank must be known at export time.", + input, + ) + + if dim == 0: + return symbolic_helper._reshape_helper(g, input, [1]) + if dim == 1: + return g.op("Identity", input) + # TODO: remove this as onnx opset 11 spec allows negative axes + if end_dim < 0: + end_dim = dim + end_dim + # use ONNX's Flatten operator for cases where the output shape is 2D + if start_dim == 1 and end_dim == dim - 1: + return g.op("Flatten", input, axis_i=start_dim) + if start_dim == 0 and end_dim == dim - 2: + return g.op("Flatten", input, axis_i=end_dim + 1) + + return symbolic_helper._flatten_helper(g, input, start_dim, end_dim, dim) + + +@_onnx_symbolic("aten::nonzero") +@symbolic_helper.parse_args("v") +@_beartype.beartype +def nonzero(g: jit_utils.GraphContext, input): + """Emitted from `torch.nonzero(x, as_tuple=False)`""" + return t(g, g.op("NonZero", input)) + + +@_onnx_symbolic("aten::nonzero_numpy") +# Emitted from `torch.nonzero(x, as_tuple=True)` +@_beartype.beartype +def nonzero_numpy(g: jit_utils.GraphContext, input, _outputs=None): + return unbind(g, nonzero(g, input), 1, _outputs=_outputs) + + +@_onnx_symbolic("aten::isnan") +@symbolic_helper.parse_args("v") +@_beartype.beartype +def isnan(g: jit_utils.GraphContext, input): + output = g.op("IsNaN", input) + return output + + +@_onnx_symbolic("aten::any") +@_beartype.beartype +def _any(g: jit_utils.GraphContext, *args): + # aten::any(Tensor self) + if len(args) == 1: + input = args[0] + dim, keepdim = None, 0 + # aten::any(Tensor self, int[]? dim, bool keepdim) + else: + input, dim, keepdim = args + # Can be int list or single int + dim = symbolic_helper._parse_arg(dim, "t") + dim = [int(d) for d in dim.view(-1)] + keepdim = symbolic_helper._parse_arg(keepdim, "i") + input = g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.INT64) + input_sum = symbolic_helper._reducesum_helper( + g, input, axes_i=dim, keepdims_i=keepdim + ) + return gt(g, input_sum, g.op("Constant", value_t=torch.tensor(0, dtype=torch.long))) + + +@_onnx_symbolic("aten::all") +@_beartype.beartype +def _all(g: jit_utils.GraphContext, *args): + input = g.op("Not", args[0]) + # aten::all(Tensor self) + if len(args) == 1: + return g.op("Not", _any(g, input)) + # aten::all(Tensor self, int[]? dim, bool keepdim) + else: + return g.op("Not", _any(g, input, args[1], args[2])) + + +@_onnx_symbolic("aten::narrow") +@symbolic_helper.parse_args("v", "i", "i", "i") +@_beartype.beartype +def narrow(g: jit_utils.GraphContext, input, dim, start, length): + return symbolic_helper._slice_helper( + g, input, axes=[dim], starts=[start], ends=[start + length] + ) + + +@_onnx_symbolic("aten::argmax") +@symbolic_helper.parse_args("v", "v", "b") +@_beartype.beartype +def argmax( + g: jit_utils.GraphContext, + input: torch._C.Value, + dim: torch._C.Value, + keepdim: bool, +): + return symbolic_helper._argmin_argmax_helper(g, input, dim, keepdim, "ArgMax") + + +@_onnx_symbolic("aten::argmin") +@symbolic_helper.parse_args("v", "v", "b") +@_beartype.beartype +def argmin( + g: jit_utils.GraphContext, + input: torch._C.Value, + dim: torch._C.Value, + keepdim: bool, +): + return symbolic_helper._argmin_argmax_helper(g, input, dim, keepdim, "ArgMin") + + +@_onnx_symbolic("aten::scatter") +@symbolic_helper.parse_args("v", "i", "v", "v") +@_beartype.beartype +def scatter(g: jit_utils.GraphContext, self, dim, index, src): + src_type = _type_utils.JitScalarType.from_value( + src, _type_utils.JitScalarType.UNDEFINED + ) + src = symbolic_helper._maybe_get_scalar(src) + if symbolic_helper._is_value(src): + return g.op("Scatter", self, index, src, axis_i=dim) + else: + # Check if scalar "src" has same type as self (PyTorch allows different + # type for scalar src (but not when src is tensor)). If not, insert Cast node. + self_scalar_type = _type_utils.JitScalarType.from_value(self) + if self_scalar_type != src_type: + src = g.op("Cast", src, to_i=self_scalar_type.onnx_type()) + return g.op("Scatter", self, index, expand_as(g, src, index), axis_i=dim) + + +@_onnx_symbolic("aten::scatter_add") +@symbolic_helper.parse_args("v", "i", "v", "v") +@_beartype.beartype +def scatter_add(g: jit_utils.GraphContext, self, dim, index, src): + scalar_type = symbolic_helper._try_get_scalar_type(self) + if scalar_type is None: + return symbolic_helper._unimplemented( + "scatter_add", "input dtype not accessible", self + ) + sizes = symbolic_helper._get_tensor_sizes(self, allow_nonstatic=False) + if sizes: + to_add = g.op("Constant", value_t=torch.zeros(sizes, dtype=scalar_type.dtype())) + else: + to_add = zeros_like(g, self, scalar_type) + to_add = symbolic_helper._scatter_helper(g, to_add, dim, index, src) + return add(g, self, to_add) + + +@_onnx_symbolic("aten::log2") +@_beartype.beartype +def log2(g: jit_utils.GraphContext, self): + _ln2 = 0.693147180559945309 + return g.op("Div", log(g, self), g.op("Constant", value_t=torch.tensor(_ln2))) + + +@_onnx_symbolic("aten::is_floating_point") +@_beartype.beartype +def is_floating_point(g: jit_utils.GraphContext, self): + if symbolic_helper._is_fp(self): + return g.op("Constant", value_t=torch.BoolTensor([1])) + return g.op("Constant", value_t=torch.BoolTensor([0])) + + +@_onnx_symbolic("aten::__is_") +@_beartype.beartype +def __is_(g: jit_utils.GraphContext, self, other): + if symbolic_helper._is_none(other): + if symbolic_helper._is_none(self): + return g.op("Constant", value_t=torch.BoolTensor([1])) + return g.op("Constant", value_t=torch.BoolTensor([0])) + return eq(g, self, other) + + +@_onnx_symbolic("aten::__isnot_") +@wrap_logical_op_with_negation +@_beartype.beartype +def __isnot_(g: jit_utils.GraphContext, self, other): + return __is_(g, self, other) + + +@_onnx_symbolic("aten::one_hot") +@_beartype.beartype +def one_hot(g: jit_utils.GraphContext, self, num_classes): + values = g.op("Constant", value_t=torch.LongTensor([0, 1])) + # onnxruntime supports limited type combinations for OneHot. + if _type_utils.JitScalarType.from_value( + num_classes, _type_utils.JitScalarType.UNDEFINED + ) in { + _type_utils.JitScalarType.UINT8, + _type_utils.JitScalarType.INT8, + _type_utils.JitScalarType.INT, + _type_utils.JitScalarType.INT16, + }: + num_classes = g.op("Cast", num_classes, to_i=_C_onnx.TensorProtoDataType.INT64) + return g.op("OneHot", self, num_classes, values, axis_i=-1) + + +@_onnx_symbolic("aten::gather") +@symbolic_helper.parse_args("v", "i", "v", "v") +@_beartype.beartype +def gather(g: jit_utils.GraphContext, self, dim, index, sparse_grad=False): + if symbolic_helper._maybe_get_const(sparse_grad, "i"): + return symbolic_helper._unimplemented("gather", "sparse_grad == True", self) + # NOTE: This workaround is needed since GatherElement is only supported + # since opset 11, and Gather in ONNX is not the same as torch.gather. + scalar_type = _type_utils.JitScalarType.from_value(self) + values = g.op("Constant", value_t=torch.LongTensor([0, 1])) + depth = size(g, self, g.op("Constant", value_t=torch.LongTensor([dim]))) + index = g.op( + "Cast", + g.op("OneHot", index, depth, values, axis_i=dim), + to_i=scalar_type.onnx_type(), + ) + mul = g.op("Mul", symbolic_helper._unsqueeze_helper(g, self, [dim + 1]), index) + return symbolic_helper._reducesum_helper(g, mul, axes_i=[dim], keepdims_i=0) + + +@symbolic_helper.parse_args("v", "is", "i", "i") +@_beartype.beartype +def _var_mean(g: jit_utils.GraphContext, input, dim, correction, keepdim): + if dim is None: + mean = g.op("ReduceMean", input, keepdims_i=0) + t_mean = mean + num_elements = numel(g, input) + else: + mean = g.op("ReduceMean", input, axes_i=dim, keepdims_i=keepdim) + t_mean = g.op("ReduceMean", input, axes_i=dim, keepdims_i=1) + redudced_dims = g.op("Shape", input) + # dim could contain one or multiple dimensions + redudced_dims = g.op( + "Gather", + redudced_dims, + g.op("Constant", value_t=torch.tensor(dim)), + axis_i=0, + ) + num_elements = g.op("ReduceProd", redudced_dims, keepdims_i=0) + sub_v = g.op("Sub", input, t_mean) + sqr_sub = g.op("Mul", sub_v, sub_v) + keepdim_mean = 0 if dim is None else keepdim + var = g.op("ReduceMean", sqr_sub, axes_i=dim, keepdims_i=keepdim_mean) + # Correct bias in calculating variance, by dividing it over (N - correction) instead on N + if correction is None: + correction = 1 + if correction != 0: + num_elements = g.op( + "Cast", num_elements, to_i=_C_onnx.TensorProtoDataType.FLOAT + ) + one = g.op("Constant", value_t=torch.tensor(correction, dtype=torch.float)) + mul = g.op("Mul", var, num_elements) + var = g.op("Div", mul, g.op("Sub", num_elements, one)) + return var, mean + + +@_onnx_symbolic("aten::std") +@_beartype.beartype +def std(g: jit_utils.GraphContext, input, *args): + var, _ = var_mean(g, input, *args) + return g.op("Sqrt", var) + + +@_onnx_symbolic("aten::var") +@_beartype.beartype +def var(g: jit_utils.GraphContext, input, *args): + var, _ = var_mean(g, input, *args) + return var + + +@_onnx_symbolic("aten::var_mean") +@_beartype.beartype +def var_mean(g: jit_utils.GraphContext, input, *args): + # var_mean (and all variance-related functions) has multiple signatures, so need to manually figure + # out the correct arguments: + # aten::var_mean(Tensor self, bool unbiased) + # aten::var_mean(Tensor self, int[1] dim, bool unbiased, bool keepdim=False) + # aten::var_mean(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) + if len(args) == 1: + return _var_mean(g, input, None, args[0], None) + else: + return _var_mean(g, input, *args) + + +@_onnx_symbolic("aten::std_mean") +@_beartype.beartype +def std_mean(g: jit_utils.GraphContext, input, *args): + var, mean = var_mean(g, input, *args) + return g.op("Sqrt", var), mean + + +@_onnx_symbolic("aten::logsumexp") +@symbolic_helper.parse_args("v", "is", "i") +@_beartype.beartype +def logsumexp(g: jit_utils.GraphContext, input, dim, keepdim): + return g.op("ReduceLogSumExp", input, axes_i=dim, keepdims_i=keepdim) + + +@_onnx_symbolic("aten::arange") +@_beartype.beartype +def arange(g: jit_utils.GraphContext, *args): + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("arange", *args) + + @_beartype.beartype + def _get_arange_dtype(dtype): + dtype = symbolic_helper._maybe_get_const(dtype, "i") + return dtype + + @_beartype.beartype + def _float_step_convert(range_tensor): + if symbolic_helper._is_fp(range_tensor): + range_tensor = g.op( + "Cast", + g.op("Ceil", range_tensor), + to_i=_type_utils.JitScalarType.INT64.onnx_type(), + ) + return range_tensor + + if len(args) == 2 or len(args) == 5: + if len(args) == 2: + # aten::arange(Scalar end, Tensor out) + dtype = None + else: + # aten::arange(Scalar end, ScalarType dtype, Layout, Device, bool pin_memory) + dtype = _get_arange_dtype(args[1]) + dtype, end, start, step = symbolic_helper._arange_cast_helper( + g, end=args[0], dtype=dtype + ) + end = symbolic_helper._unsqueeze_helper(g, end, [0]) + range_tensor = _float_step_convert(end) + arange_tensor = symbolic_helper._squeeze_helper( + g, nonzero(g, ones(g, range_tensor, dtype, None, None)), [1] + ) + return g.op( + "Cast", arange_tensor, to_i=_type_utils.JitScalarType(dtype).onnx_type() + ) + elif len(args) == 4 or len(args) == 7: + if len(args) == 4: + # aten::arange(Scalar start, Scalar end, Scalar step, Tensor out) + dtype = None + else: + # aten::arange(Scalar start, Scalar end, Scalar step, ScalarType dtype, Layout, Device, bool pin_memory) + dtype = _get_arange_dtype(args[3]) + dtype, end, start, step = symbolic_helper._arange_cast_helper( + g, start=args[0], end=args[1], step=args[2], dtype=dtype + ) + step = symbolic_helper._unsqueeze_helper(g, step, [0]) + end = symbolic_helper._unsqueeze_helper(g, end, [0]) + start = symbolic_helper._unsqueeze_helper(g, start, [0]) + range_tensor = _float_step_convert(g.op("Div", g.op("Sub", end, start), step)) + arange_tensor = symbolic_helper._squeeze_helper( + g, nonzero(g, ones(g, range_tensor, None, None, None)), [1] + ) + arange_tensor = g.op("Add", g.op("Mul", arange_tensor, step), start) + return g.op( + "Cast", arange_tensor, to_i=_type_utils.JitScalarType(dtype).onnx_type() + ) + elif len(args) == 6: + # aten::arange(Scalar start, Scalar end, ScalarType dtype, Layout, Device, bool pin_memory) + dtype = _get_arange_dtype(args[2]) + dtype, end, start, step = symbolic_helper._arange_cast_helper( + g, start=args[0], end=args[1], dtype=dtype + ) + end = symbolic_helper._unsqueeze_helper(g, end, [0]) + start = symbolic_helper._unsqueeze_helper(g, start, [0]) + range_tensor = _float_step_convert(g.op("Sub", end, start)) + arange_tensor = g.op( + "Add", + symbolic_helper._squeeze_helper( + g, nonzero(g, ones(g, range_tensor, dtype, *(args[3:]))), [1] + ), + start, + ) + return g.op( + "Cast", arange_tensor, to_i=_type_utils.JitScalarType(dtype).onnx_type() + ) + + return symbolic_helper._unimplemented("aten::arange", f"with {len(args)} arguments") + + +@_onnx_symbolic("aten::linspace") +@_beartype.beartype +def linspace( + g: jit_utils.GraphContext, start, end, steps, dtype, layout, device, pin_memory +): + range_tensor = symbolic_helper._arange_helper(g, steps, None) + step = div( + g, + sub(g, end, start), + sub(g, steps, g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64))), + ) + return add(g, mul(g, range_tensor, step), start) + + +@_onnx_symbolic("aten::lift") +@_beartype.beartype +def lift(g: jit_utils.GraphContext, self): + # at::lift() is a no-op from the perspective of tracing for onnx + return self + + +@_onnx_symbolic("aten::masked_fill") +@_beartype.beartype +def masked_fill(g: jit_utils.GraphContext, self, mask, value): + mask = g.op("Cast", mask, to_i=_C_onnx.TensorProtoDataType.BOOL) + value = symbolic_helper._maybe_get_scalar(value) + return g.op("Where", mask, symbolic_helper._if_scalar_type_as(value, self), self) + + +@_onnx_symbolic("aten::masked_fill_") +@_beartype.beartype +def masked_fill_(g: jit_utils.GraphContext, self, mask, value): + return masked_fill(g, self, mask, value) + + +@_onnx_symbolic("aten::index") +@_beartype.beartype +def index(g: jit_utils.GraphContext, self, index): + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("index", self, index, overload_name="Tensor") + + if symbolic_helper._is_packed_list(index): + indices = symbolic_helper._unpack_list(index) + else: + indices = [index] + + @_beartype.beartype + def try_mask_to_index(index): + if not symbolic_helper._is_none(index) and ( + _type_utils.JitScalarType.from_value( + index, _type_utils.JitScalarType.UNDEFINED + ) + == _type_utils.JitScalarType.UINT8 + or symbolic_helper._is_bool(index) + ): + if g.opset < 9: + raise errors.SymbolicValueError( + "Exporting masked indices are only supported after ONNX opset 9.", + self, + ) + warnings.warn( + "Exporting aten::index operator with indices of type Byte. " + "Only 1-D indices are supported. In any other case, " + "this will produce an incorrect ONNX graph." + ) + index = symbolic_helper._squeeze_helper(g, nonzero(g, index), [1]) + return index + + indices = [try_mask_to_index(idx) for idx in indices] + if len(indices) == 1: + return symbolic_helper._select_helper( + g, self, 0, indices[0], apply_reshape=False + ) + else: + # Multiple tensors as indices. Each tensor could either be + # 1. prim::Constant() + # representing ":" in python indexing. E.g. tensor[:, :] + # 2. prim::Constant[value=...] or tensor output + # representing advanced indexing. E.g. tensor[[0, 1], [2, 0]]. + # For more info on advanced indexing, + # check https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing + + # Consider a general case of + # t: [x_1, y_1, y_2, ..., x_m, ..., y_n] + # where t is a tensor of rank m+n, {x_i} are axes where tensor index is provided, and {y_i} are axes for ":". + # Same results can be achieved through transposing t into + # t: [x_1, x_2, ..., x_m, y_1, y_2, ..., y_n] + # and use gatherND. However ONNX does not have gatherND, to use 1d gather we'll need to flatten t + # and process the tensor indices. + # t: [x_1 * x_2 * ... * x_m, y_1 * y_2 * ... * y_n] + # tensor index = \sum_{i=1}^m (ind_i * \prod_{j=i+1}^m (x_j)) + # After gather, reshape and transpose back. + adv_idx_indices = [ + i for i, idx in enumerate(indices) if not symbolic_helper._is_none(idx) + ] + + if len(adv_idx_indices) == 0: + return self + elif len(adv_idx_indices) == 1: + return index_select( + g, self, adv_idx_indices[0], indices[adv_idx_indices[0]] + ) + else: + rank = symbolic_helper._get_tensor_rank(self) + if rank is None: + return symbolic_helper._unimplemented( + "aten::index", + "operator of advanced indexing on tensor of unknown rank. " + "Try turning on shape inference during export: " + "torch.onnx._export(..., onnx_shape_inference=True).", + self, + ) + # TODO: If indexing is supported natively in ONNX in future opsets, + # update the warning to recommend exporting with higher opset version. + warnings.warn( + "Exporting aten::index operator of advanced indexing in opset " + f"{GLOBALS.export_onnx_opset_version}" + " is achieved by combination of multiple ONNX operators, " + "including Reshape, Transpose, Concat, and Gather. " + "If indices include negative values, the exported graph will produce incorrect results." + ) + adv_idx_count = len(adv_idx_indices) + shape_tensor = _shape_as_tensor(g, self) + dim_tensor_list = [ + g.op( + "Gather", + shape_tensor, + g.op("Constant", value_t=torch.LongTensor([dim])), + axis_i=0, + ) + for dim in range(rank) + ] + + self = g.op( + "Transpose", + self, + perm_i=adv_idx_indices + + [i for i in range(rank) if i not in adv_idx_indices], + ) + self = g.op("Flatten", self, axis_i=adv_idx_count) + + # Note that tensor indices will be broadcasted while accumulating. Thus we get the final subarray shape as well. + cum_adv_index = indices[adv_idx_indices[-1]] + multiplier = dim_tensor_list[adv_idx_indices[-1]] + for i in range(adv_idx_count - 2, -1, -1): + adv_index = g.op("Mul", indices[adv_idx_indices[i]], multiplier) + cum_adv_index = g.op("Add", cum_adv_index, adv_index) + multiplier = g.op( + "Mul", multiplier, dim_tensor_list[adv_idx_indices[i]] + ) + + # perform gather + self = index_select(g, self, 0, cum_adv_index) + + cum_adv_index_shape_tensor = _shape_as_tensor(g, cum_adv_index) + # check if all advanced indices are consecutive. + # Refer to https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#combining-advanced-and-basic-indexing + # to understand how the subarray position is decided. + if adv_idx_indices == list( + range(adv_idx_indices[0], adv_idx_indices[-1] + 1) + ): + # unfold regular index axes + folded_adv_idx_shape_list = [ + g.op("Constant", value_t=torch.LongTensor([-1])) + ] + [ + dim_tensor_list[i] for i in range(rank) if i not in adv_idx_indices + ] + folded_adv_idx_shape = g.op( + "Concat", *folded_adv_idx_shape_list, axis_i=0 + ) + self = symbolic_helper._reshape_helper(g, self, folded_adv_idx_shape) + + # Transpose folded advanced indexed axis to its original location. + adv_idx_permute = ( + list(range(1, adv_idx_indices[0] + 1)) + + [0] + + list(range(adv_idx_indices[0] + 1, rank - adv_idx_count + 1)) + ) + self = g.op("Transpose", self, perm_i=adv_idx_permute) + + # unfold advanced index axes + final_shape_list = ( + [dim_tensor_list[i] for i in range(adv_idx_indices[0])] + + [cum_adv_index_shape_tensor] + + [ + dim_tensor_list[i] + for i in range(adv_idx_indices[0], rank) + if i not in adv_idx_indices + ] + ) + final_shape = g.op("Concat", *final_shape_list, axis_i=0) + else: + final_shape = g.op( + "Concat", + cum_adv_index_shape_tensor, + *[ + dim_tensor_list[i] + for i in range(rank) + if i not in adv_idx_indices + ], + axis_i=0, + ) + + return symbolic_helper._reshape_helper(g, self, final_shape) + + +@_onnx_symbolic("aten::linalg_norm") +@symbolic_helper.parse_args("v", "v", "is", "b", "v") +@_beartype.beartype +def linalg_norm( + g: jit_utils.GraphContext, + self: torch._C.Value, + ord: torch._C.Value, + dim: Optional[Sequence[int]], + keepdim: bool, + dtype: torch._C.Value, +): + # Conditions based on https://pytorch.org/docs/stable/generated/torch.linalg.norm.html + ord_value = None + if dim is None: + if symbolic_helper._is_none(ord): + self = symbolic_helper._reshape_helper(g, self, [-1]) + ord = g.op("Constant", value_t=torch.LongTensor([2])) + self_dim = symbolic_helper._get_tensor_rank(self) + if self_dim is None: + return symbolic_helper._unimplemented( + "dim", "Input rank must be known at export time.", self + ) + if self_dim == 1: + ord_value = symbolic_helper._parse_arg(ord, "f") + else: + dim = [0, 1] + else: + if len(dim) == 1: + if symbolic_helper._is_none(ord): + ord = g.op("Constant", value_t=torch.LongTensor([2])) + ord_value = symbolic_helper._parse_arg(ord, "f") + if ord_value: + return linalg_vector_norm(g, self, ord_value, dim, keepdim, dtype) + return linalg_matrix_norm(g, self, ord, dim, keepdim, dtype) + + +@_onnx_symbolic("aten::linalg_vector_norm") +@symbolic_helper.parse_args("v", "f", "is", "b", "v") +@_beartype.beartype +def linalg_vector_norm( + g: jit_utils.GraphContext, + self: torch._C.Value, + ord: float, + dim: Optional[Sequence[int]], + keepdim: bool, + dtype: torch._C.Value, +): + # Conditions based on https://pytorch.org/docs/stable/generated/torch.linalg.vector_norm.html + if symbolic_helper._is_none(dim): + self = symbolic_helper._reshape_helper(g, self, [-1]) + keepdim = False + + if ord == math.inf: + result = g.op("ReduceMax", g.op("Abs", self), axes_i=dim, keepdims_i=keepdim) + elif ord == -math.inf: + result = g.op("ReduceMin", g.op("Abs", self), axes_i=dim, keepdims_i=keepdim) + elif ord == 0: + return symbolic_helper._onnx_opset_unsupported_detailed( + "linalg_vector_norm", 9, 11, "ord=0 not supported", self + ) + elif ord == 1: + result = _reduce_op_symbolic("ReduceL1")(g, self, dim=dim, keepdim=keepdim) + elif ord == 2: + result = _reduce_op_symbolic("ReduceL2")(g, self, dim=dim, keepdim=keepdim) + else: + ord_op = g.op("Constant", value_t=torch.tensor(ord, dtype=torch.float32)) + result = symbolic_helper._reducesum_helper( + g, g.op("Pow", g.op("Abs", self), ord_op), axes_i=dim, keepdims_i=keepdim + ) + result = g.op( + "Pow", + result, + g.op( + "Div", + g.op("Constant", value_t=torch.tensor(1, dtype=torch.float32)), + ord_op, + ), + ) + + if not symbolic_helper._is_none(dtype): + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + result = g.op("Cast", result, to_i=_type_utils.JitScalarType(dtype).onnx_type()) # type: ignore[arg-type] + return result + + +@_onnx_symbolic("aten::linalg_matrix_norm") +@symbolic_helper.parse_args("v", "v", "is", "b", "v") +@_beartype.beartype +def linalg_matrix_norm( + g: jit_utils.GraphContext, + self: torch._C.Value, + ord: torch._C.Value, + dim: List[int], + keepdim: bool, + dtype: torch._C.Value, +): + # Conditions based on https://pytorch.org/docs/stable/generated/torch.linalg.matrix_norm.html + ord_value = symbolic_helper._parse_arg(ord, "s") + if ord_value == "fro": + return frobenius_norm(g, self, dim, keepdim) + elif ord_value == "nuc": + return symbolic_helper._unimplemented("linalg.matrix_norm", "ord==nuc", self) + else: + ord_value = symbolic_helper._parse_arg(ord, "f") + if ord_value is None: + return frobenius_norm(g, self, dim, keepdim) + if ord_value == 2 or ord_value == -2: + # ord = 2/-2 unimplemented due to lack of operators + # used to calculate singular values + return symbolic_helper._unimplemented("linalg.matrix_norm", "ord==2", self) + # Wrap the dim vector to handle negative dim values + self_dim = symbolic_helper._get_tensor_rank(self) + if self_dim is None: + return symbolic_helper._unimplemented( + "linalg.matrix_norm", "Input rank must be known at export time.", self + ) + # Common implementation for cases with + # ord = 1/-1 and ord = inf/-inf + if dim[0] < 0: + dim[0] += self_dim + if dim[1] < 0: + dim[1] += self_dim + + if ord_value == math.inf or ord_value == -math.inf: + dim[0], dim[1] = dim[1], dim[0] + if dim[1] > dim[0] and not keepdim: + dim[1] -= 1 + sum = symbolic_helper._reducesum_helper( + g, g.op("Abs", self), axes_i=[dim[0]], keepdims_i=keepdim + ) + if ord_value > 0: + result, indices = max( + g, + sum, + dim_or_y=g.op("Constant", value_t=torch.LongTensor([dim[1]])), + keepdim=keepdim, + ) + else: + result, indices = min( + g, + sum, + dim_or_y=g.op("Constant", value_t=torch.LongTensor([dim[1]])), + keepdim=keepdim, + ) + return result + + +@_onnx_symbolic("aten::linalg_cross") +@symbolic_helper.parse_args("v", "v", "i") +@_beartype.beartype +def linalg_cross(g: jit_utils.GraphContext, input, other, dim=-1): + return cross(g, input, other, dim) + + +@_onnx_symbolic("aten::frobenius_norm") +@symbolic_helper.parse_args("v", "is", "b") +@_beartype.beartype +def frobenius_norm(g: jit_utils.GraphContext, self, dim=None, keepdim=False): + sqr = g.op("Mul", self, self) + sumsqr = symbolic_helper._reducesum_helper(g, sqr, axes_i=dim, keepdims_i=keepdim) + return g.op("Sqrt", sumsqr) + + +@_onnx_symbolic("aten::multinomial") +@symbolic_helper.parse_args("v", "i", "b", "v") +@_beartype.beartype +def multinomial( + g: jit_utils.GraphContext, input, num_samples, replacement=False, generator=None +): + if generator is not None and not symbolic_helper._is_none(generator): + symbolic_helper._unimplemented( + "Multinomial", "generator is not supported for multinomial", input + ) + if not replacement and num_samples > 1: + symbolic_helper._unimplemented( + "Multinomial", + "replacement=False when num_samples > 1 is not supported for multinomial", + input, + ) + + log_input = log(g, input) + return g.op( + "Multinomial", + log_input, + dtype_i=_C_onnx.TensorProtoDataType.INT64, + sample_size_i=num_samples, + ) + + +@_onnx_symbolic("aten::baddbmm") +@_beartype.beartype +def baddbmm(g: jit_utils.GraphContext, self, batch1, batch2, beta, alpha): + scalar_type = _type_utils.JitScalarType.from_value(self) + batch_mul = matmul(g, batch1, batch2) + mul_a = mul( + g, + batch_mul, + g.op("Cast", alpha, to_i=scalar_type.onnx_type()), + ) + mul_b = mul( + g, + self, + g.op("Cast", beta, to_i=scalar_type.onnx_type()), + ) + return add(g, mul_a, mul_b) + + +@_onnx_symbolic("aten::meshgrid") +@symbolic_helper.parse_args("v", "s") +@_beartype.beartype +def meshgrid(g: jit_utils.GraphContext, tensor_list, indexing: Optional[str] = None): + if indexing is None: + indexing = "ij" + elif indexing not in {"ij", "xy"}: + raise errors.SymbolicValueError( + f"Unsupported indexing: {indexing}", tensor_list + ) + unpacked_tensor_list = symbolic_helper._unpack_list(tensor_list) + if indexing == "xy": + unpacked_tensor_list[:2] = unpacked_tensor_list[1::-1] + tensors = [ + symbolic_helper._reshape_helper( + g, t, g.op("Constant", value_t=torch.LongTensor([-1])) + ) + for t in unpacked_tensor_list + ] + tensors_shape = [g.op("Shape", t) for t in tensors] + out_shape = g.op("Concat", *tensors_shape, axis_i=0) + out = [] + for i, t in enumerate(tensors): + shape_i = [g.op("Constant", value_t=torch.ones(1, dtype=torch.int64))] * len( + tensors + ) + shape_i[i] = tensors_shape[i] + t_reshaped = _reshape_from_tensor(g, t, g.op("Concat", *shape_i, axis_i=0)) + out.append(g.op("Expand", t_reshaped, out_shape)) + if indexing == "xy": + out[0], out[1] = out[1], out[0] + return g.op("prim::ListConstruct", *out) + + +@_onnx_symbolic("aten::remainder") +@_beartype.beartype +def remainder(g: jit_utils.GraphContext, input, other): + div = _floor_divide(g, input, other) + quo = g.op("Mul", div, other) + return g.op("Sub", input, quo) + + +@_onnx_symbolic("aten::gelu") +@symbolic_helper.parse_args("v", "s") +@_beartype.beartype +def gelu(g: jit_utils.GraphContext, self: torch._C.Value, approximate: str = "none"): + if approximate == "tanh": + kBeta = math.sqrt(2 / math.pi) + kKappa = 0.044715 + + beta = torch.tensor(kBeta, dtype=torch.double) + kappa = torch.tensor(kKappa, dtype=torch.double) + one = torch.tensor(1.0, dtype=torch.double) + half = torch.tensor(0.5, dtype=torch.double) + + self_cube = mul(g, self, mul(g, self, self)) + inner = mul(g, beta, add(g, self, mul(g, kappa, self_cube))) + return mul(g, half, mul(g, self, add(g, one, g.op("Tanh", inner)))) + else: + _sqrt2 = 1.4142135623730951 + erf = g.op("Erf", g.op("Div", self, torch.tensor(_sqrt2, dtype=torch.double))) + erf_plusone = add( + g, erf, g.op("Constant", value_t=torch.tensor(1, dtype=torch.double)) + ) + return mul( + g, + mul(g, self, erf_plusone), + g.op("Constant", value_t=torch.tensor(0.5, dtype=torch.double)), + ) + + +@_onnx_symbolic("aten::group_norm") +@symbolic_helper.quantized_args(True, False, False, False) +@symbolic_helper.parse_args("v", "i", "v", "v", "f", "i") +@_beartype.beartype +def group_norm( + g: jit_utils.GraphContext, input, num_groups, weight, bias, eps, cudnn_enabled +): + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at( + "group_norm", + input, + weight, + bias, + num_groups_i=num_groups, + eps_f=eps, + cudnn_enabled_i=cudnn_enabled, + ) + + channel_size = symbolic_helper._get_tensor_dim_size(input, 1) + if channel_size is not None: + assert channel_size % num_groups == 0 + input_rank = symbolic_helper._get_tensor_rank(input) + if input_rank is None: + return symbolic_helper._unimplemented("group_norm", "unknown input rank", input) + # 0 in the shape list keeps dimension value unchanged. + shape = [0, num_groups, -1] + input_reshaped = symbolic_helper._reshape_helper( + g, input, g.op("Constant", value_t=torch.LongTensor(shape)) + ) + + # C is always divisible by num_groups + # Due to shape difference. we need to apply weight and bias after + # instance norm computation and reshape + weight_ = g.op( + "Constant", + value_t=torch.tensor( + [1.0] * num_groups, + dtype=_type_utils.JitScalarType.from_value(input).dtype(), + ), + ) + bias_ = g.op( + "Constant", + value_t=torch.tensor( + [0.0] * num_groups, + dtype=_type_utils.JitScalarType.from_value(input).dtype(), + ), + ) + + norm_reshaped = g.op( + "InstanceNormalization", input_reshaped, weight_, bias_, epsilon_f=eps + ) + norm = symbolic_helper._reshape_helper(g, norm_reshaped, g.op("Shape", input)) + + if weight is None or weight.node().mustBeNone(): + weight_value = torch.tensor( + [1.0], dtype=_type_utils.JitScalarType.from_value(input).dtype() + ) + weight = g.op("Constant", value_t=weight_value) + if bias is None or bias.node().mustBeNone(): + bias_value = torch.tensor( + [0.0], dtype=_type_utils.JitScalarType.from_value(input).dtype() + ) + bias = g.op("Constant", value_t=bias_value) + + # Norm has shape [N, C, *] so we reshape weight and bias to [C, *] + axes = list(range(1, input_rank - 1)) + return add( + g, + mul(g, norm, symbolic_helper._unsqueeze_helper(g, weight, axes)), + symbolic_helper._unsqueeze_helper(g, bias, axes), + ) + + +@_onnx_symbolic("aten::_weight_norm") +@symbolic_helper.parse_args("v", "v", "i") +@_beartype.beartype +def _weight_norm(g: jit_utils.GraphContext, weight_v, weight_g, dim): + rank = symbolic_helper._get_tensor_rank(weight_v) + if rank is not None: + # W = g * ((v) / ||v||) + # Compute norm_except_dim for l2 norm. dim = None means over all dims + # torch's weight_norm module sets dim = -1 if it's None. + # This conflicts the logic for negative axes to access dims backwards + # TODO: Might need a fix in torch group_norm module + axes = list(range(rank)) + if dim is not None: + if dim < -1: + dim += rank + if dim != -1: + axes.remove(dim) + norm_v = norm(g, weight_v, 2, axes, 1) + div = g.op("Div", weight_v, norm_v) + return g.op("Mul", div, weight_g) + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("_weight_norm", weight_v, weight_g, dim_i=dim) + + raise errors.SymbolicValueError( + "Unsupported: ONNX export of _weight_norm for tensor of unknown rank.", + weight_v, + ) + + +@_onnx_symbolic("aten::dim") +@_beartype.beartype +def dim(g: jit_utils.GraphContext, self): + """Implement the dim functionality available for a pytorch tensor in ONNX""" + # ONNX does not support dim directly in this opset so we can use 2 ops to get the info + shape = g.op("Shape", self) + return g.op("Size", shape) + + +@_onnx_symbolic("aten::__contains_") +@_beartype.beartype +def __contains_(g: jit_utils.GraphContext, self, element): + unpacked_list = symbolic_helper._unpack_list(self) + if all( + symbolic_helper._is_constant(x) for x in unpacked_list + ) and symbolic_helper._is_constant(element): + return g.op( + "Constant", + value_t=torch.tensor( + symbolic_helper._node_get(element.node(), "value") + in (symbolic_helper._node_get(x.node(), "value") for x in unpacked_list) + ), + ) + + raise errors.SymbolicValueError( + "Unsupported: ONNX export of __contains__ for non-constant list or element.", + self, + ) + + +@_onnx_symbolic("aten::__getitem_") +@_beartype.beartype +def __getitem_(g: jit_utils.GraphContext, self, i): + return select(g, self, g.op("Constant", value_t=torch.tensor([0])), i) + + +@_onnx_symbolic("aten::item") +@_beartype.beartype +def item(g: jit_utils.GraphContext, self): + return self + + +@_onnx_symbolic("aten::take") +@_beartype.beartype +def take(g: jit_utils.GraphContext, self, index): + self_flattened = symbolic_helper._reshape_helper( + g, self, g.op("Constant", value_t=torch.tensor([-1], dtype=torch.int64)) + ) + out = index_select(g, self_flattened, 0, index) + out = reshape_as(g, out, index) + return out + + +@_beartype.beartype +def _kl_div_log_target_impl(g: jit_utils.GraphContext, input, target): + diff_ = sub(g, target, input) + exp_ = exp(g, target) + output = mul(g, exp_, diff_) + return output + + +@_beartype.beartype +def _kl_div_non_log_target_impl(g: jit_utils.GraphContext, input, target): + log_ = log(g, target) + diff_ = sub(g, log_, input) + output_pos = mul(g, target, diff_) + zeros_ = zeros_like(g, output_pos) + mask_ = gt(g, target, g.op("Constant", value_t=torch.tensor(0))) + output = where(g, mask_, output_pos, zeros_) + return output + + +@_onnx_symbolic("aten::kl_div") +@symbolic_helper.parse_args("v", "v", "i", "b") +@_beartype.beartype +def kl_div(g: jit_utils.GraphContext, input, target, reduction, log_target): + if log_target: + output = _kl_div_log_target_impl(g, input, target) + else: + output = _kl_div_non_log_target_impl(g, input, target) + + if reduction == 0: + return output + elif reduction == 1: + return g.op("ReduceMean", output, keepdims_i=0) + elif reduction == 2: + return symbolic_helper._reducesum_helper(g, output, keepdims_i=0) + else: + return symbolic_helper._onnx_unsupported( + "kl_div with reduction other than none, mean, or sum.", input + ) + + +@_onnx_symbolic("aten::mse_loss") +@symbolic_helper.parse_args("v", "v", "i") +@_beartype.beartype +def mse_loss(g: jit_utils.GraphContext, input, target, reduction): + output = mul(g, sub(g, input, target), sub(g, input, target)) + if reduction == 0: + return output + elif reduction == 1: + return g.op("ReduceMean", output, keepdims_i=0) + elif reduction == 2: + return symbolic_helper._reducesum_helper(g, output, keepdims_i=0) + else: + return symbolic_helper._onnx_unsupported( + "mse_loss with reduction other than none, mean, or sum.", input + ) + + +@_onnx_symbolic("aten::as_strided") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "v", "is", "i") +@_beartype.beartype +def as_strided(g: jit_utils.GraphContext, self, sizes, strides, offset=None): + sizes = symbolic_helper._maybe_get_const(sizes, "is") + rank = len(strides) + self_1d = symbolic_helper._reshape_helper( + g, self, g.op("Constant", value_t=torch.tensor([-1], dtype=torch.int64)) + ) + ind: Optional[torch.Tensor] + if not symbolic_helper._is_value(sizes): + ind = torch.tensor([0], dtype=torch.long) + for i, (size, stride) in enumerate(zip(sizes, strides)): + r_size = [1] * rank + r_size[i] = -1 + ind = ind + torch.arange(size).view(r_size) * stride + if offset: + ind = ind + offset + return g.op("Gather", self_1d, g.op("Constant", value_t=ind)) + else: + ind = None + for i, stride in enumerate(strides): + r_size = [1] * rank + r_size[i] = -1 + size = select( + g, + sizes, + g.op("Constant", value_t=torch.tensor([0])), + g.op("Constant", value_t=torch.tensor(i)), + ) + tmp_ind = symbolic_helper._reshape_helper( + g, + arange(g, size, 4, None, None, None), + g.op("Constant", value_t=torch.tensor(r_size)), + ) + tmp_ind = g.op( + "Mul", tmp_ind, g.op("Constant", value_t=torch.tensor([stride])) + ) + if ind is None: + ind = tmp_ind + else: + ind = g.op("Add", ind, tmp_ind) + if offset: + ind = g.op("Add", ind, g.op("Constant", torch.tensor([offset]))) + return g.op("Gather", self_1d, ind) + + +@_onnx_symbolic("aten::__derive_index") +@_beartype.beartype +def __derive_index(g: jit_utils.GraphContext, index, start, step): + return g.op("Add", start, g.op("Mul", index, step)) + + +@_onnx_symbolic("aten::__range_length") +# Source code for aten op can be found here: pytorch/torch/csrc/jit/runtime/register_prim_ops.cpp +# if (step > 0 && lo < hi) { +# push(stack, 1 + (hi - 1 - lo) / step); +# } else if (step < 0 && lo > hi) { +# push(stack, 1 + (lo - 1 - hi) / (0 - step)); +# } else { +# push(stack, 0); +# } +@_beartype.beartype +def __range_length(g: jit_utils.GraphContext, lo, hi, step): + sub = g.op("Sub", hi, lo) + div = g.op("Ceil", true_divide(g, sub, step)) + return g.op("Cast", div, to_i=_C_onnx.TensorProtoDataType.INT64) + + +@_onnx_symbolic("aten::linear") +@_beartype.beartype +def linear(g: jit_utils.GraphContext, input, weight, bias): + rank = symbolic_helper._get_tensor_rank(input) + weight = t(g, weight) + if rank == 2 and not bias.node().mustBeNone(): + alpha = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)) + beta = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)) + output = addmm(g, bias, input, weight, alpha, beta) + else: + output = matmul(g, input, weight) + if not bias.node().mustBeNone(): + output = add(g, bias, output) + + return output + + +@_onnx_symbolic("aten::hann_window") +@symbolic_helper.parse_args("v", "b", "i", "v", "v", "v", "v") +@_beartype.beartype +def hann_window( + g: jit_utils.GraphContext, + window_length, + periodic=True, + dtype: Optional[int] = None, + layout=None, + device=None, + pin_memory=None, + requires_grad=False, +): + if dtype is None: + dtype_ = torch.get_default_dtype() + if not dtype_ or not dtype_.is_floating_point: + dtype_ = torch.float + scalar_type = _type_utils.JitScalarType.from_dtype(dtype_) + else: + scalar_type = _type_utils.JitScalarType(dtype) + + n_array = arange(g, window_length, 4, None, None, None) + output = g.op("Cast", n_array, to_i=_C_onnx.TensorProtoDataType.FLOAT) + output = mul( + g, g.op("Constant", value_t=torch.tensor(math.pi, dtype=torch.float)), output + ) + + if periodic is False: + window_length = sub( + g, window_length, g.op("Constant", value_t=torch.tensor(1, dtype=torch.int)) + ) + output = div(g, output, window_length) + output = g.op( + "Cast", + square(g, sin(g, output)), + to_i=scalar_type.onnx_type(), + ) + + return output + + +@_onnx_symbolic("aten::mv") +@_beartype.beartype +def mv(g: jit_utils.GraphContext, self, vec): + return matmul(g, self, vec) + + +@_onnx_symbolic("aten::dot") +@_beartype.beartype +def dot(g: jit_utils.GraphContext, self, other): + return matmul(g, self, other) + + +@_onnx_symbolic("aten::movedim") +@symbolic_helper.parse_args("v", "t", "t") +@_beartype.beartype +def movedim(g: jit_utils.GraphContext, self, source, destination): + # This is a pythonic implementation mostly taken from aten/src/ATen/native/TensorShape.cpp::movedim + source = source.view(-1) + destination = destination.view(-1) + + assert source.size() == destination.size() + + if (source == destination).all(): + return self + + self_rank = symbolic_helper._get_tensor_rank(self) + assert self_rank is not None + + perm = list(range(self_rank)) + + src_dims = perm.copy() + dst_dims = perm.copy() + + for src, dst in zip(source.tolist(), destination.tolist()): + perm[dst] = src + src_dims[src] = -1 + dst_dims[dst] = -1 + + src_dims = [dim for dim in src_dims if dim != -1] + dst_dims = [dim for dim in dst_dims if dim != -1] + + for src, dst in zip(src_dims, dst_dims): + perm[dst] = src + + return g.op("Transpose", self, perm_i=perm) + + +@_onnx_symbolic("aten::fill") +@symbolic_helper.parse_args("v", "v") +@_beartype.beartype +def fill(g: jit_utils.GraphContext, self, value): + scalar_type = _type_utils.JitScalarType.from_value( + self, _type_utils.JitScalarType.FLOAT + ) + return full_like(g, self, value, scalar_type) + + +@_onnx_symbolic("aten::index_add") +@_beartype.beartype +def index_add(g: jit_utils.GraphContext, self, dim, index, other, alpha=None): + warnings.warn( + "Warning: ONNX export does not support duplicated values in 'index' field, " + + "this will cause the ONNX model to be incorrect." + ) + + # ONNX does not support "alpha" argument, unlike aten index_add + # See: https://github.com/pytorch/pytorch/pull/65993#issuecomment-953151102 for more context + if alpha and symbolic_helper._scalar(symbolic_helper._maybe_get_scalar(alpha)) != 1: + return symbolic_helper._unimplemented("index_add", "alpha != 1", self) + + dim = symbolic_helper._maybe_get_const(dim, "i") + if dim is None: + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting 'index_add_()' function with " + "unknown 'dim' value.", + self, + ) + + self_dim_rank = symbolic_helper._get_tensor_rank(self) + other_dim_rank = symbolic_helper._get_tensor_rank(other) + + if self_dim_rank is None or other_dim_rank is None: + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting 'index_add_()' function while " + "the rank of self tensor or tensor to be added is unknown.", + self, + ) + + if other_dim_rank != self_dim_rank: + delta = self_dim_rank - other_dim_rank + for i in range(delta): + other = symbolic_helper._unsqueeze_helper( + g, other, [symbolic_helper._get_tensor_rank(other)] + ) + + other_dim_size = symbolic_helper._get_tensor_dim_size(other, dim) + self_dim_size = symbolic_helper._get_tensor_dim_size(self, dim) + + if (other_dim_size is not None) and (self_dim_size is not None): + if other_dim_size > self_dim_size: + raise errors.SymbolicValueError( + "ONNX export does not support exporting 'index_add_()' function with " + "duplicated values in 'index' parameter yet.", + self, + ) + + # Construct a new shape. It's almost as same as self except the size of the 'dim' + # dimension is 1, so that we can expand other dimensions as expected. + new_shape_axes = list(range(self_dim_rank)) + new_shape_starts = [0 for i in range(self_dim_rank)] + new_shape_ends = [sys.maxsize if (i != dim) else 1 for i in range(self_dim_rank)] + + new_shape = symbolic_helper._slice_helper( + g, self, axes=new_shape_axes, starts=new_shape_starts, ends=new_shape_ends + ) + other = expand_as(g, other, new_shape) + + for i in range(dim): + index = symbolic_helper._unsqueeze_helper(g, index, [0]) + + for i in range(self_dim_rank - dim - 1): + index = symbolic_helper._unsqueeze_helper( + g, index, [symbolic_helper._get_tensor_rank(index)] + ) + + return scatter_add(g, self, dim, expand_as(g, index, other), other) + + +@_onnx_symbolic("aten::roll") +@symbolic_helper.parse_args("v", "is", "is") +@_beartype.beartype +def roll(g: jit_utils.GraphContext, self, shifts, dims): + assert len(shifts) == len(dims) + + result = self + for i in range(len(shifts)): + shapes = [] + shape = symbolic_helper._slice_helper( + g, result, axes=[dims[i]], starts=[-shifts[i]], ends=[sys.maxsize] + ) + shapes.append(shape) + shape = symbolic_helper._slice_helper( + g, result, axes=[dims[i]], starts=[0], ends=[-shifts[i]] + ) + shapes.append(shape) + result = g.op("Concat", *shapes, axis_i=dims[i]) + + return result + + +@_onnx_symbolic("aten::cross") +@symbolic_helper.parse_args("v", "v", "i") +@_beartype.beartype +def cross(g: jit_utils.GraphContext, input, other, dim=None): + dim = symbolic_helper._get_dim_for_cross(input, dim) + # If we have two tensors such that + # A = [a, b, c], B = [d, e, f], we permute the tensor such that we have + # After first roll, + # A' = [b, c, a], B' = [f, d, e], so that we calculate (b*f, c*d, a*e) + roll_x_1 = roll(g, input, [2], [dim]) + roll_y_1 = roll(g, other, [1], [dim]) + # After second roll, + # A' = [c, a, b], B' = [e, f, d], so that we calculate (c*e, a*f, b*d) + roll_x_2 = roll(g, input, [1], [dim]) + roll_y_2 = roll(g, other, [2], [dim]) + # cross product is calculated as + # result = [(b*f - c*e), (c*d - a*f), (a*e - b*d)] + return sub(g, mul(g, roll_x_1, roll_y_1), mul(g, roll_x_2, roll_y_2)) + + +@_onnx_symbolic("aten::cdist") +@_beartype.beartype +def cdist( + g: jit_utils.GraphContext, + x1, + x2, + p=2.0, + compute_mode="use_mm_for_euclid_dist_if_necessary", +): + # X1.shape = (B * P * D), X2.shape = (B * R * D) + # In order to respect numpy style broadcasting as demonstrated in + # https://github.com/onnx/onnx/blob/main/docs/Broadcasting.md + # we unsqueeze both input tensors + # Currently we ignore the 'compute_mode' variable as we use default to + # using matrix multiplication to calculate the euclidean distance + rank = symbolic_helper._get_tensor_rank(x1) + assert rank is not None + broadcasted_x1 = symbolic_helper._unsqueeze_helper(g, x1, [rank - 1]) + broadcasted_x2 = symbolic_helper._unsqueeze_helper(g, x2, [rank - 2]) + return pairwise_distance( + g, broadcasted_x1, broadcasted_x2, p, eps=1e-06, keepdim=False + ) + + +@_onnx_symbolic("aten::lerp") +@_beartype.beartype +def lerp(g: jit_utils.GraphContext, self, end, weight): + # Conditional for better numeric. This has been discussed in + # https://github.com/pytorch/pytorch/pull/18871 + diff = g.op("Sub", end, self) + return where( + g, + g.op("Less", weight, g.op("Constant", value_t=torch.tensor(0.5))), + g.op("Add", self, g.op("Mul", weight, diff)), + g.op( + "Sub", + end, + g.op( + "Mul", + diff, + g.op("Sub", g.op("Constant", value_t=torch.tensor(1.0)), weight), + ), + ), + ) + + +@_onnx_symbolic("aten::broadcast_tensors") +@_beartype.beartype +def broadcast_tensors(g: jit_utils.GraphContext, self): + all_tensors = symbolic_helper._unpack_list(self) + t_with_final_shape = zeros_like(g, all_tensors[0]) + + # Add operator supports multidirectional broadcasting. So we leverage this function + # to infer the final shape generated by the broadcast. + for t in all_tensors: + t_with_final_shape = add(g, t_with_final_shape, t) + + t_list = [expand_as(g, t, t_with_final_shape) for t in all_tensors] + return g.op("prim::ListConstruct", *t_list) + + +@_onnx_symbolic("aten::is_pinned") +def is_pinned(g: jit_utils.GraphContext, self, device=None): + # Unused by ONNX. + return None + + +@_onnx_symbolic("prim::ConstantSplit") +@_beartype.beartype +def prim_constant_split(g: jit_utils.GraphContext, self, split_size, dim): + size = symbolic_helper._get_tensor_dim_size(self, dim) + if size is None: + return symbolic_helper._unimplemented( + "prim::ConstantSplit", "unknown dimension size", self + ) + splits = [split_size] * (size // split_size) + leftover = size % split_size + if leftover: + splits.append(leftover) + return g.op("Split", self, split_i=splits, axis_i=dim, outputs=len(splits)) + + +# TODO: It would be better to export this as a chunk directly, as this is +# less sensitive to changes in input size. +# TODO: Once we have proper scoping, stop reimplementing chunk, delete this +# method, and use the desugared version +@_onnx_symbolic("prim::ConstantChunk") +@_beartype.beartype +def prim_constant_chunk(g: jit_utils.GraphContext, self, chunks, dim): + dim_size = symbolic_helper._get_tensor_dim_size(self, dim) + if dim_size is None: + return symbolic_helper._unimplemented( + "prim::ConstantChunk", "unknown dimension size", self + ) + split_size = (dim_size + chunks - 1) // chunks + return prim_constant_split(g, self, split_size, dim) + + +@_onnx_symbolic("prim::shape") +@_beartype.beartype +def prim_shape(g: jit_utils.GraphContext, self): + return g.op("Shape", self) + + +@_onnx_symbolic("prim::max") +@_beartype.beartype +def prim_max(g: jit_utils.GraphContext, self, other): + return _op_with_optional_float_cast(g, "Max", self, other, opset_before=12) + + +@_onnx_symbolic("prim::min") +@_beartype.beartype +def prim_min(g: jit_utils.GraphContext, self, other=None): + if not other: + if symbolic_helper._is_packed_list(self): + self = stack(g, self, g.op("Constant", value_t=torch.tensor([0]))) + return min(g, self) + return min(g, self, other) + + +@_onnx_symbolic("prim::data") +@_beartype.beartype +def prim_data(g: jit_utils.GraphContext, self): + return self + + +@_onnx_symbolic("prim::layout") +def prim_layout(g: jit_utils.GraphContext, self): + # Always return 'torch.strided'. Other layout types are not supported by JIT 'TensorType'. + # Layout class defined in 'c10/core/Layout.h'. + return g.op("Constant", value_t=torch.tensor(0)) + + +@_onnx_symbolic("prim::ListConstruct") +@_beartype.beartype +def prim_list_construct(g: jit_utils.GraphContext, *inputs, **kwargs): + return None + + +@_onnx_symbolic("prim::ListUnpack") +@_beartype.beartype +def prim_list_unpack( + g: jit_utils.GraphContext, *inputs, **kwargs +) -> Optional[List[_C.Value]]: + if len(inputs) == 1 and inputs[0].node().kind() == "prim::ListConstruct": + # Cancel the previous node if it is ListConstruct by returning its inputs + # TODO(justinchuby): Use a public method in the helper module + return symbolic_helper._unpack_list(inputs[0]) + + return None + + +@_onnx_symbolic("prim::TupleConstruct") +@_beartype.beartype +def prim_tuple_construct(g: jit_utils.GraphContext, *inputs, **kwargs): + return None + + +@_onnx_symbolic("prim::Uninitialized") +@_beartype.beartype +def prim_uninitialized(g: jit_utils.GraphContext, *inputs, **kwargs): + return None + + +# exists to refine the type of the Value +# if x is an optional Tensor, unchecked_cast will cast +# x to Tensor, so the rest of the graph knows that x is a Tensor +# this doesn't do anything in runtime and is a noop in ONNX +@_onnx_symbolic("prim::unchecked_cast") +@_beartype.beartype +def prim_unchecked_cast(g: jit_utils.GraphContext, self): + return self + + +@_onnx_symbolic("prim::dtype") +@_beartype.beartype +def prim_dtype(g: jit_utils.GraphContext, self): + scalar_type = symbolic_helper._try_get_scalar_type(self) + if scalar_type is None: + scalar_type = _type_utils.JitScalarType.FLOAT + # This node records a torch dtype as int + return g.op("Constant", value_t=torch.tensor(scalar_type)) + + +@_onnx_symbolic("prim::tolist") +@_beartype.beartype +def prim_tolist(g: jit_utils.GraphContext, input, dim_val, elem_ty_val): + """tolist is currently supported only for 1D input tensors. + + dim_val and elem_ty_val represent dimension and type annotations + that need to match dimension and type of the input tensor. + """ + dim = symbolic_helper._maybe_get_const(dim_val, "i") + if dim > 1: + return symbolic_helper._unimplemented("prim::tolist", "dim_val > 1", input) + return input + + +# ----------------------------------------------------------------------------- +# Symbolic functions that need extra context +# ----------------------------------------------------------------------------- +@_onnx_symbolic("prim::device") +@_beartype.beartype +def prim_device(g: jit_utils.GraphContext, *inputs, **kwargs) -> None: + output_type = g.original_node.output().type() + if isinstance(output_type, _C.DeviceObjType): + return None + + return symbolic_helper._unimplemented( + "prim::device", + f"output type should be 'DeviceObjType', not '{output_type.kind()}'", + g.original_node.output(), + ) + + +@_onnx_symbolic("prim::Loop") +@_beartype.beartype +def prim_loop(g: jit_utils.GraphContext, *inputs, **attrs) -> List[_C.Value]: + node = g.original_node + env = g.env + params_dict = g.params_dict + + operator_export_type = GLOBALS.operator_export_type + opset_version = GLOBALS.export_onnx_opset_version + + old_blocks = tuple(node.blocks()) + new_op_outputs, new_block_contexts, new_node = jit_utils.add_op_with_blocks( + g, "Loop", *inputs, outputs=node.outputsSize(), n_blocks=len(old_blocks) + ) + + for old_block, new_block_context in zip(old_blocks, new_block_contexts): + # Copy input metadata to subblock + # + # prim::Loop(iter, cond, input_1, ..., input_n) + # block0(iter, input_1, ..., input_n) + # + # For `Loop` node, copy metadata for `iter`, `input_1`, ..., `input_n`. + for i, b_in in enumerate(old_block.inputs()): + if i == 0 and i < len(inputs): + b_in.setType(inputs[i].type()) + # For optional block inputs, they may switch between None not-None inside + # the loop body, so if the loop input is not optional, the block input may + # still need to be optional. + if ( + i > 0 + and (i + 1) < len(inputs) + and not isinstance(b_in.type(), _C.OptionalType) + ): + b_in.setType(inputs[i + 1].type()) + torch._C._jit_pass_onnx_block( + old_block, + new_block_context.block, + operator_export_type, + env, + False, + ) + fixed_outputs = torch._C._jit_pass_fixup_onnx_controlflow_node( + new_node, opset_version + ) + # Run shape type inference for Loop after subblock is converted. + if GLOBALS.onnx_shape_inference: + torch._C._jit_pass_onnx_node_shape_type_inference( + new_node, params_dict, opset_version + ) + return fixed_outputs + + +@_onnx_symbolic("prim::If") +@_beartype.beartype +def prim_if(g: jit_utils.GraphContext, *inputs, **attrs) -> List[_C.Value]: + n = g.original_node + block = g.block + env = g.env + params_dict = g.params_dict + + operator_export_type = GLOBALS.operator_export_type + opset_version = GLOBALS.export_onnx_opset_version + + static_if = inputs[0].node().kind() == "onnx::Constant" + if static_if: + # Fold static if + # + # The torch IR + # graph(%embedding_matrix.1 : Float(10, 15, strides=[15, 1], requires_grad=0, device=cpu), + # %input.1 : Long(6, strides=[1], requires_grad=0, device=cpu), ... + # %65 : Bool(requires_grad=0, device=cpu) = prim::Constant[value={0}]() + # %21 : Long(device=cpu) = aten::eq(%20, %64) + # %22 : Long(device=cpu) = prim::If(%21) + # block0(): + # %23 : Long(device=cpu) = aten::is_floating_point(%input.1) + # -> (%23) + # block1(): + # -> (%65) + # %input.53 : Tensor, %weight : Tensor = prim::If(%22) + # block0(): + # -> (%embedding_matrix.1, %input.1) + # block1(): + # -> (%input.1, %embedding_matrix.1) + # %26 : int[] = aten::size(%input.53) + # + # The converted ONNX graph + # %10 : Bool(device=cpu) = onnx::Constant[value={0}]() + # %14 : Bool(device=cpu) = onnx::Equal(%13, %8) + # %15 : Bool(requires_grad=0, device=cpu) = onnx::Constant[value={0}]() + # %16 : Long(1, strides=[1], device=cpu) = onnx::Shape(%input.1) + input_flag = symbolic_helper._node_get(inputs[0].node(), "value").tolist() + const_value = ( + all(input_flag) if isinstance(input_flag, list) else bool(input_flag) + ) + block_idx = 0 if const_value else 1 + current_b = list(n.blocks())[block_idx] + env = torch._C._jit_pass_onnx_block( + current_b, + block, + operator_export_type, + env, + True, + ) + if_output_list = list(n.outputs()) + current_b_list = list(current_b.outputs()) + + final_b_list = [] + for idx in range(len(if_output_list)): + if current_b_list[idx] not in env: + raise errors.SymbolicValueError( + f"The sub block ATen output {current_b_list[idx]} is not in env.", + current_b_list[idx], + ) # type:ignore[operator] + onnx_b = env[current_b_list[idx]] + final_b_list.append(onnx_b) + return final_b_list + else: + old_blocks = tuple(n.blocks()) + new_op_outputs, new_block_contexts, new_node = jit_utils.add_op_with_blocks( + g, "If", *inputs, outputs=n.outputsSize(), n_blocks=len(old_blocks) + ) + + for old_block, new_block_context in zip(old_blocks, new_block_contexts): + torch._C._jit_pass_onnx_block( + old_block, + new_block_context.block, + operator_export_type, + env, + False, + ) + fixed_outputs = torch._C._jit_pass_fixup_onnx_controlflow_node( + new_node, opset_version + ) + # Run shape type inference for If after subblock is converted. + if GLOBALS.onnx_shape_inference: + torch._C._jit_pass_onnx_node_shape_type_inference( + new_node, params_dict, opset_version + ) + return fixed_outputs + + +@_onnx_symbolic("prim::Constant") +@_beartype.beartype +def prim_constant(g: jit_utils.GraphContext, *inputs, **attrs): + node = g.original_node + + if node.mustBeNone(): + return None + # This must go before checking for string values, because some device constants + # have string values, but we want to keep them as unconverted Device types so + # that eq() can work on them. + if isinstance(node.output().type(), _C.DeviceObjType): + return None + if node.kindOf("value") == "t": + return g.op("Constant", value_t=symbolic_helper._node_get(node, "value")) + if node.kindOf("value") == "s": + return g.op("Constant", value_s=symbolic_helper._node_get(node, "value")) + if node.output().type().isSubtypeOf( + _C.ListType.ofInts() + ) or node.output().type().isSubtypeOf(_C.ListType.ofFloats()): + return g.op( + "Constant", value_t=torch.tensor(symbolic_helper._node_get(node, "value")) + ) + if node.output().type().isSubtypeOf(_C.ListType.ofStrings()): + str_constants = [ + g.op("Constant", value_s=s) + for s in symbolic_helper._node_get(node, "value") + ] + return g.op("prim::ListConstruct", *str_constants) + + raise errors.SymbolicValueError( + f"Unsupported prim::Constant kind: '{node.kindOf('value')}'. " + f"Please send a bug report at {_constants.PYTORCH_GITHUB_ISSUES_URL}.", + node.output(), + ) + + +@_onnx_symbolic("prim::type") +@_beartype.beartype +def prim_type(g: jit_utils.GraphContext, device_value: _C.Value, *args, **kwargs): + if device_value.node().kind() == "prim::device": + device = jit_utils.get_device_from_value(device_value.node().input()) + if device is not None: + return g.op("Constant", value_s=str(device)) + + return symbolic_helper._unimplemented( + "prim::type", + "Device type cannot be statically determined.", + device_value, + ) + + +@_onnx_symbolic("onnx::Placeholder") +@_beartype.beartype +def onnx_placeholder(g: jit_utils.GraphContext, *inputs, **attrs): + node = g.original_node + block = g.block + env = g.env + + return torch._C._jit_onnx_convert_pattern_from_subblock(block, node, env) + + +@_onnx_symbolic("aten::resolve_conj") +@_onnx_symbolic("aten::resolve_neg") +@_beartype.beartype +def noop_complex_operators(g: jit_utils.GraphContext, input: _C.Value): + # ONNX does not have operators to *directly* manipulate real/imaginary components + # However, a few torch APIs (e.g. .tolist()) use complex operations when input is real, + # which results in failures due to missing operators for complex numbers + + # `aten::resolve_conj` and `aten::resolve_neg` can safely be implemented as no-op + return input + + +@_onnx_symbolic("aten::_conj") +@_onnx_symbolic("aten::conj_physical") +@_beartype.beartype +def unsupported_complex_operators(g: jit_utils.GraphContext, input: _C.Value): + # ONNX does not have operators to *directly* manipulate real/imaginary components + # However, a few torch APIs (e.g. .tolist()) use complex operations when input is real, + # which results in failures due to missing operators for complex numbers + + # While `aten::_conj` and `aten::conj_physical` raise exception when input is complex + if symbolic_helper.is_complex_value(input): + # FIXME(justinchuby): report correct name for symbolic being executed + return symbolic_helper._onnx_unsupported( + "aten::_conj, aten::conj_physical", + input, + ) + + # they can safely be implemented as no-op for real numbers only + return noop_complex_operators(g, input) + + +@_onnx_symbolic("aten::logit") +@_beartype.beartype +def logit(g: jit_utils.GraphContext, self: torch._C.Value, eps: torch._C.Value): + one = g.op("Constant", value_t=torch.tensor(1.0)) + + if not symbolic_helper._is_none(eps): + eps = g.op( + "Cast", eps, to_i=_type_utils.JitScalarType.from_value(self).onnx_type() + ) + one_sub_eps = g.op("Sub", one, eps) + self_less_equal_one_sub_eps = g.op("Greater", one_sub_eps, self) + temporary_self = g.op("Where", self_less_equal_one_sub_eps, self, one_sub_eps) + + temporary_self_less_eps = g.op("Less", temporary_self, eps) + z = g.op("Where", temporary_self_less_eps, eps, temporary_self) + else: + z = self + + sub = g.op("Sub", one, z) + div = g.op("Div", z, sub) + return g.op("Log", div)